GFS2: Eliminate sd_rindex_mutex
Over time, we've slowly eliminated the use of sd_rindex_mutex. Up to this point, it was only used in two places: function gfs2_ri_total (which totals the file system size by reading and parsing the rindex file) and function gfs2_rindex_update which updates the rgrps in memory. Both of these functions have the rindex glock to protect them, so the rindex is unnecessary. Since gfs2_grow writes to the rindex via the meta_fs, the mutex is in the wrong order according to the normal rules. This patch eliminates the mutex entirely to avoid the problem. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
a08fd280b5
commit
6aad1c3d3e
3 changed files with 10 additions and 14 deletions
|
@ -644,7 +644,6 @@ struct gfs2_sbd {
|
||||||
|
|
||||||
int sd_rindex_uptodate;
|
int sd_rindex_uptodate;
|
||||||
spinlock_t sd_rindex_spin;
|
spinlock_t sd_rindex_spin;
|
||||||
struct mutex sd_rindex_mutex;
|
|
||||||
struct rb_root sd_rindex_tree;
|
struct rb_root sd_rindex_tree;
|
||||||
unsigned int sd_rgrps;
|
unsigned int sd_rgrps;
|
||||||
unsigned int sd_max_rg_data;
|
unsigned int sd_max_rg_data;
|
||||||
|
|
|
@ -83,7 +83,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||||
spin_lock_init(&sdp->sd_statfs_spin);
|
spin_lock_init(&sdp->sd_statfs_spin);
|
||||||
|
|
||||||
spin_lock_init(&sdp->sd_rindex_spin);
|
spin_lock_init(&sdp->sd_rindex_spin);
|
||||||
mutex_init(&sdp->sd_rindex_mutex);
|
|
||||||
sdp->sd_rindex_tree.rb_node = NULL;
|
sdp->sd_rindex_tree.rb_node = NULL;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&sdp->sd_jindex_list);
|
INIT_LIST_HEAD(&sdp->sd_jindex_list);
|
||||||
|
|
|
@ -540,7 +540,6 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
|
||||||
struct file_ra_state ra_state;
|
struct file_ra_state ra_state;
|
||||||
int error, rgrps;
|
int error, rgrps;
|
||||||
|
|
||||||
mutex_lock(&sdp->sd_rindex_mutex);
|
|
||||||
file_ra_state_init(&ra_state, inode->i_mapping);
|
file_ra_state_init(&ra_state, inode->i_mapping);
|
||||||
for (rgrps = 0;; rgrps++) {
|
for (rgrps = 0;; rgrps++) {
|
||||||
loff_t pos = rgrps * sizeof(struct gfs2_rindex);
|
loff_t pos = rgrps * sizeof(struct gfs2_rindex);
|
||||||
|
@ -553,11 +552,10 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
|
||||||
break;
|
break;
|
||||||
total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
|
total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
|
||||||
}
|
}
|
||||||
mutex_unlock(&sdp->sd_rindex_mutex);
|
|
||||||
return total_data;
|
return total_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rgd_insert(struct gfs2_rgrpd *rgd)
|
static int rgd_insert(struct gfs2_rgrpd *rgd)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = rgd->rd_sbd;
|
struct gfs2_sbd *sdp = rgd->rd_sbd;
|
||||||
struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
|
struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
|
||||||
|
@ -573,11 +571,13 @@ static void rgd_insert(struct gfs2_rgrpd *rgd)
|
||||||
else if (rgd->rd_addr > cur->rd_addr)
|
else if (rgd->rd_addr > cur->rd_addr)
|
||||||
newn = &((*newn)->rb_right);
|
newn = &((*newn)->rb_right);
|
||||||
else
|
else
|
||||||
return;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_link_node(&rgd->rd_node, parent, newn);
|
rb_link_node(&rgd->rd_node, parent, newn);
|
||||||
rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
|
rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
|
||||||
|
sdp->sd_rgrps++;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -631,10 +631,12 @@ static int read_rindex_entry(struct gfs2_inode *ip,
|
||||||
if (rgd->rd_data > sdp->sd_max_rg_data)
|
if (rgd->rd_data > sdp->sd_max_rg_data)
|
||||||
sdp->sd_max_rg_data = rgd->rd_data;
|
sdp->sd_max_rg_data = rgd->rd_data;
|
||||||
spin_lock(&sdp->sd_rindex_spin);
|
spin_lock(&sdp->sd_rindex_spin);
|
||||||
rgd_insert(rgd);
|
error = rgd_insert(rgd);
|
||||||
sdp->sd_rgrps++;
|
|
||||||
spin_unlock(&sdp->sd_rindex_spin);
|
spin_unlock(&sdp->sd_rindex_spin);
|
||||||
return error;
|
if (!error)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
error = 0; /* someone else read in the rgrp; free it and ignore it */
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kfree(rgd->rd_bits);
|
kfree(rgd->rd_bits);
|
||||||
|
@ -695,22 +697,18 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp)
|
||||||
|
|
||||||
/* Read new copy from disk if we don't have the latest */
|
/* Read new copy from disk if we don't have the latest */
|
||||||
if (!sdp->sd_rindex_uptodate) {
|
if (!sdp->sd_rindex_uptodate) {
|
||||||
mutex_lock(&sdp->sd_rindex_mutex);
|
|
||||||
if (!gfs2_glock_is_locked_by_me(gl)) {
|
if (!gfs2_glock_is_locked_by_me(gl)) {
|
||||||
error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
|
error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_unlock;
|
return error;
|
||||||
unlock_required = 1;
|
unlock_required = 1;
|
||||||
}
|
}
|
||||||
if (!sdp->sd_rindex_uptodate)
|
if (!sdp->sd_rindex_uptodate)
|
||||||
error = gfs2_ri_update(ip);
|
error = gfs2_ri_update(ip);
|
||||||
if (unlock_required)
|
if (unlock_required)
|
||||||
gfs2_glock_dq_uninit(&ri_gh);
|
gfs2_glock_dq_uninit(&ri_gh);
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&sdp->sd_rindex_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue