btrfs: compress: put variables defined per compress type in struct to make cache friendly
Below variables are defined per compress type. - struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES] - spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES] - int comp_num_workspace[BTRFS_COMPRESS_TYPES] - atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES] - wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES] BTW, while accessing one compress type of these variables, the next or before address is other compress types of it. So this patch puts these variables in a struct to make cache friendly. Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
619ed39242
commit
d91876496b
1 changed files with 47 additions and 45 deletions
|
@ -745,11 +745,13 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
|
static struct {
|
||||||
static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
|
struct list_head idle_ws;
|
||||||
static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
|
spinlock_t ws_lock;
|
||||||
static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
|
int num_ws;
|
||||||
static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
|
atomic_t alloc_ws;
|
||||||
|
wait_queue_head_t ws_wait;
|
||||||
|
} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
|
||||||
|
|
||||||
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
|
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
|
||||||
&btrfs_zlib_compress,
|
&btrfs_zlib_compress,
|
||||||
|
@ -761,10 +763,10 @@ void __init btrfs_init_compress(void)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
|
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
|
||||||
INIT_LIST_HEAD(&comp_idle_workspace[i]);
|
INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
|
||||||
spin_lock_init(&comp_workspace_lock[i]);
|
spin_lock_init(&btrfs_comp_ws[i].ws_lock);
|
||||||
atomic_set(&comp_alloc_workspace[i], 0);
|
atomic_set(&btrfs_comp_ws[i].alloc_ws, 0);
|
||||||
init_waitqueue_head(&comp_workspace_wait[i]);
|
init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -778,38 +780,38 @@ static struct list_head *find_workspace(int type)
|
||||||
int cpus = num_online_cpus();
|
int cpus = num_online_cpus();
|
||||||
int idx = type - 1;
|
int idx = type - 1;
|
||||||
|
|
||||||
struct list_head *idle_workspace = &comp_idle_workspace[idx];
|
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
|
||||||
spinlock_t *workspace_lock = &comp_workspace_lock[idx];
|
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
|
||||||
atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
|
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
|
||||||
wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
|
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
|
||||||
int *num_workspace = &comp_num_workspace[idx];
|
int *num_ws = &btrfs_comp_ws[idx].num_ws;
|
||||||
again:
|
again:
|
||||||
spin_lock(workspace_lock);
|
spin_lock(ws_lock);
|
||||||
if (!list_empty(idle_workspace)) {
|
if (!list_empty(idle_ws)) {
|
||||||
workspace = idle_workspace->next;
|
workspace = idle_ws->next;
|
||||||
list_del(workspace);
|
list_del(workspace);
|
||||||
(*num_workspace)--;
|
(*num_ws)--;
|
||||||
spin_unlock(workspace_lock);
|
spin_unlock(ws_lock);
|
||||||
return workspace;
|
return workspace;
|
||||||
|
|
||||||
}
|
}
|
||||||
if (atomic_read(alloc_workspace) > cpus) {
|
if (atomic_read(alloc_ws) > cpus) {
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
spin_unlock(workspace_lock);
|
spin_unlock(ws_lock);
|
||||||
prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
|
if (atomic_read(alloc_ws) > cpus && !*num_ws)
|
||||||
schedule();
|
schedule();
|
||||||
finish_wait(workspace_wait, &wait);
|
finish_wait(ws_wait, &wait);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
atomic_inc(alloc_workspace);
|
atomic_inc(alloc_ws);
|
||||||
spin_unlock(workspace_lock);
|
spin_unlock(ws_lock);
|
||||||
|
|
||||||
workspace = btrfs_compress_op[idx]->alloc_workspace();
|
workspace = btrfs_compress_op[idx]->alloc_workspace();
|
||||||
if (IS_ERR(workspace)) {
|
if (IS_ERR(workspace)) {
|
||||||
atomic_dec(alloc_workspace);
|
atomic_dec(alloc_ws);
|
||||||
wake_up(workspace_wait);
|
wake_up(ws_wait);
|
||||||
}
|
}
|
||||||
return workspace;
|
return workspace;
|
||||||
}
|
}
|
||||||
|
@ -821,27 +823,27 @@ again:
|
||||||
static void free_workspace(int type, struct list_head *workspace)
|
static void free_workspace(int type, struct list_head *workspace)
|
||||||
{
|
{
|
||||||
int idx = type - 1;
|
int idx = type - 1;
|
||||||
struct list_head *idle_workspace = &comp_idle_workspace[idx];
|
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
|
||||||
spinlock_t *workspace_lock = &comp_workspace_lock[idx];
|
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
|
||||||
atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
|
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
|
||||||
wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
|
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
|
||||||
int *num_workspace = &comp_num_workspace[idx];
|
int *num_ws = &btrfs_comp_ws[idx].num_ws;
|
||||||
|
|
||||||
spin_lock(workspace_lock);
|
spin_lock(ws_lock);
|
||||||
if (*num_workspace < num_online_cpus()) {
|
if (*num_ws < num_online_cpus()) {
|
||||||
list_add(workspace, idle_workspace);
|
list_add(workspace, idle_ws);
|
||||||
(*num_workspace)++;
|
(*num_ws)++;
|
||||||
spin_unlock(workspace_lock);
|
spin_unlock(ws_lock);
|
||||||
goto wake;
|
goto wake;
|
||||||
}
|
}
|
||||||
spin_unlock(workspace_lock);
|
spin_unlock(ws_lock);
|
||||||
|
|
||||||
btrfs_compress_op[idx]->free_workspace(workspace);
|
btrfs_compress_op[idx]->free_workspace(workspace);
|
||||||
atomic_dec(alloc_workspace);
|
atomic_dec(alloc_ws);
|
||||||
wake:
|
wake:
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (waitqueue_active(workspace_wait))
|
if (waitqueue_active(ws_wait))
|
||||||
wake_up(workspace_wait);
|
wake_up(ws_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -853,11 +855,11 @@ static void free_workspaces(void)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
|
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
|
||||||
while (!list_empty(&comp_idle_workspace[i])) {
|
while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
|
||||||
workspace = comp_idle_workspace[i].next;
|
workspace = btrfs_comp_ws[i].idle_ws.next;
|
||||||
list_del(workspace);
|
list_del(workspace);
|
||||||
btrfs_compress_op[i]->free_workspace(workspace);
|
btrfs_compress_op[i]->free_workspace(workspace);
|
||||||
atomic_dec(&comp_alloc_workspace[i]);
|
atomic_dec(&btrfs_comp_ws[i].alloc_ws);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue