f2fs: keep migration IO order in LFS mode
For non-migration IO, we will keep order of data/node blocks' submitting as allocation sequence by sorting IOs in per log io_list list, but for migration IO, it could be out-of-order. In LFS mode, we should keep all IOs including migration IO be ordered, so that this patch fixes to add an additional lock to keep submitting order. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Yunlong Song <yunlong.song@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
c4408c2387
commit
1f62e4702a
4 changed files with 14 additions and 0 deletions
|
@ -1178,6 +1178,8 @@ struct f2fs_sb_info {
|
||||||
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
||||||
struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
|
struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
|
||||||
/* bio ordering for NODE/DATA */
|
/* bio ordering for NODE/DATA */
|
||||||
|
/* keep migration IO order for LFS mode */
|
||||||
|
struct rw_semaphore io_order_lock;
|
||||||
mempool_t *write_io_dummy; /* Dummy pages */
|
mempool_t *write_io_dummy; /* Dummy pages */
|
||||||
|
|
||||||
/* for checkpoint */
|
/* for checkpoint */
|
||||||
|
|
|
@ -614,6 +614,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
block_t newaddr;
|
block_t newaddr;
|
||||||
int err;
|
int err;
|
||||||
|
bool lfs_mode = test_opt(fio.sbi, LFS);
|
||||||
|
|
||||||
/* do not read out */
|
/* do not read out */
|
||||||
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
|
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
|
||||||
|
@ -654,6 +655,9 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
||||||
fio.page = page;
|
fio.page = page;
|
||||||
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
||||||
|
|
||||||
|
if (lfs_mode)
|
||||||
|
down_write(&fio.sbi->io_order_lock);
|
||||||
|
|
||||||
allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||||
&sum, CURSEG_COLD_DATA, NULL, false);
|
&sum, CURSEG_COLD_DATA, NULL, false);
|
||||||
|
|
||||||
|
@ -710,6 +714,8 @@ static void move_data_block(struct inode *inode, block_t bidx,
|
||||||
put_page_out:
|
put_page_out:
|
||||||
f2fs_put_page(fio.encrypted_page, 1);
|
f2fs_put_page(fio.encrypted_page, 1);
|
||||||
recover_block:
|
recover_block:
|
||||||
|
if (lfs_mode)
|
||||||
|
up_write(&fio.sbi->io_order_lock);
|
||||||
if (err)
|
if (err)
|
||||||
__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
|
__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
|
||||||
true, true);
|
true, true);
|
||||||
|
|
|
@ -2831,7 +2831,10 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||||
{
|
{
|
||||||
int type = __get_segment_type(fio);
|
int type = __get_segment_type(fio);
|
||||||
int err;
|
int err;
|
||||||
|
bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
|
||||||
|
|
||||||
|
if (keep_order)
|
||||||
|
down_read(&fio->sbi->io_order_lock);
|
||||||
reallocate:
|
reallocate:
|
||||||
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||||
&fio->new_blkaddr, sum, type, fio, true);
|
&fio->new_blkaddr, sum, type, fio, true);
|
||||||
|
@ -2844,6 +2847,8 @@ reallocate:
|
||||||
} else if (!err) {
|
} else if (!err) {
|
||||||
update_device_state(fio);
|
update_device_state(fio);
|
||||||
}
|
}
|
||||||
|
if (keep_order)
|
||||||
|
up_read(&fio->sbi->io_order_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||||
|
|
|
@ -2367,6 +2367,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||||
for (i = 0; i < NR_PAGE_TYPE - 1; i++)
|
for (i = 0; i < NR_PAGE_TYPE - 1; i++)
|
||||||
for (j = HOT; j < NR_TEMP_TYPE; j++)
|
for (j = HOT; j < NR_TEMP_TYPE; j++)
|
||||||
mutex_init(&sbi->wio_mutex[i][j]);
|
mutex_init(&sbi->wio_mutex[i][j]);
|
||||||
|
init_rwsem(&sbi->io_order_lock);
|
||||||
spin_lock_init(&sbi->cp_lock);
|
spin_lock_init(&sbi->cp_lock);
|
||||||
|
|
||||||
sbi->dirty_device = 0;
|
sbi->dirty_device = 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue