raid5-cache: use bio chaining
Simplify the bio completion handler by using bio chaining and submitting bios as soon as they are full. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaohua Li <shli@fb.com> Signed-off-by: NeilBrown <neilb@suse.com>
This commit is contained in:
parent
2b8ef16ec4
commit
6143e2cecb
1 changed files with 16 additions and 22 deletions
|
@ -100,8 +100,6 @@ struct r5l_io_unit {
|
||||||
struct page *meta_page; /* store meta block */
|
struct page *meta_page; /* store meta block */
|
||||||
int meta_offset; /* current offset in meta_page */
|
int meta_offset; /* current offset in meta_page */
|
||||||
|
|
||||||
struct bio_list bios;
|
|
||||||
atomic_t pending_io; /* pending bios not written to log yet */
|
|
||||||
struct bio *current_bio;/* current_bio accepting new data */
|
struct bio *current_bio;/* current_bio accepting new data */
|
||||||
|
|
||||||
atomic_t pending_stripe;/* how many stripes not flushed to raid */
|
atomic_t pending_stripe;/* how many stripes not flushed to raid */
|
||||||
|
@ -112,6 +110,7 @@ struct r5l_io_unit {
|
||||||
struct list_head stripe_list; /* stripes added to the io_unit */
|
struct list_head stripe_list; /* stripes added to the io_unit */
|
||||||
|
|
||||||
int state;
|
int state;
|
||||||
|
bool need_split_bio;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* r5l_io_unit state */
|
/* r5l_io_unit state */
|
||||||
|
@ -215,9 +214,6 @@ static void r5l_log_endio(struct bio *bio)
|
||||||
|
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&io->pending_io))
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&log->io_list_lock, flags);
|
spin_lock_irqsave(&log->io_list_lock, flags);
|
||||||
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
|
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
|
||||||
if (log->need_cache_flush)
|
if (log->need_cache_flush)
|
||||||
|
@ -235,7 +231,6 @@ static void r5l_submit_current_io(struct r5l_log *log)
|
||||||
{
|
{
|
||||||
struct r5l_io_unit *io = log->current_io;
|
struct r5l_io_unit *io = log->current_io;
|
||||||
struct r5l_meta_block *block;
|
struct r5l_meta_block *block;
|
||||||
struct bio *bio;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 crc;
|
u32 crc;
|
||||||
|
|
||||||
|
@ -252,22 +247,17 @@ static void r5l_submit_current_io(struct r5l_log *log)
|
||||||
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
|
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
|
||||||
spin_unlock_irqrestore(&log->io_list_lock, flags);
|
spin_unlock_irqrestore(&log->io_list_lock, flags);
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&io->bios)))
|
submit_bio(WRITE, io->current_bio);
|
||||||
submit_bio(WRITE, bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io)
|
static struct bio *r5l_bio_alloc(struct r5l_log *log)
|
||||||
{
|
{
|
||||||
struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
|
struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
|
||||||
|
|
||||||
bio->bi_rw = WRITE;
|
bio->bi_rw = WRITE;
|
||||||
bio->bi_bdev = log->rdev->bdev;
|
bio->bi_bdev = log->rdev->bdev;
|
||||||
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
|
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
|
||||||
bio->bi_end_io = r5l_log_endio;
|
|
||||||
bio->bi_private = io;
|
|
||||||
|
|
||||||
bio_list_add(&io->bios, bio);
|
|
||||||
atomic_inc(&io->pending_io);
|
|
||||||
return bio;
|
return bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +273,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
|
||||||
* of BLOCK_SECTORS.
|
* of BLOCK_SECTORS.
|
||||||
*/
|
*/
|
||||||
if (log->log_start == 0)
|
if (log->log_start == 0)
|
||||||
io->current_bio = NULL;
|
io->need_split_bio = true;
|
||||||
|
|
||||||
io->log_end = log->log_start;
|
io->log_end = log->log_start;
|
||||||
}
|
}
|
||||||
|
@ -296,7 +286,6 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
|
||||||
/* We can't handle memory allocate failure so far */
|
/* We can't handle memory allocate failure so far */
|
||||||
io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
|
io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
|
||||||
io->log = log;
|
io->log = log;
|
||||||
bio_list_init(&io->bios);
|
|
||||||
INIT_LIST_HEAD(&io->log_sibling);
|
INIT_LIST_HEAD(&io->log_sibling);
|
||||||
INIT_LIST_HEAD(&io->stripe_list);
|
INIT_LIST_HEAD(&io->stripe_list);
|
||||||
io->state = IO_UNIT_RUNNING;
|
io->state = IO_UNIT_RUNNING;
|
||||||
|
@ -312,7 +301,9 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
|
||||||
io->meta_offset = sizeof(struct r5l_meta_block);
|
io->meta_offset = sizeof(struct r5l_meta_block);
|
||||||
io->seq = log->seq++;
|
io->seq = log->seq++;
|
||||||
|
|
||||||
io->current_bio = r5l_bio_alloc(log, io);
|
io->current_bio = r5l_bio_alloc(log);
|
||||||
|
io->current_bio->bi_end_io = r5l_log_endio;
|
||||||
|
io->current_bio->bi_private = io;
|
||||||
bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
|
bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
|
||||||
|
|
||||||
r5_reserve_log_entry(log, io);
|
r5_reserve_log_entry(log, io);
|
||||||
|
@ -361,15 +352,18 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
|
||||||
{
|
{
|
||||||
struct r5l_io_unit *io = log->current_io;
|
struct r5l_io_unit *io = log->current_io;
|
||||||
|
|
||||||
alloc_bio:
|
if (io->need_split_bio) {
|
||||||
if (!io->current_bio)
|
struct bio *prev = io->current_bio;
|
||||||
io->current_bio = r5l_bio_alloc(log, io);
|
|
||||||
|
|
||||||
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
|
io->current_bio = r5l_bio_alloc(log);
|
||||||
io->current_bio = NULL;
|
bio_chain(io->current_bio, prev);
|
||||||
goto alloc_bio;
|
|
||||||
|
submit_bio(WRITE, prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
|
||||||
|
BUG();
|
||||||
|
|
||||||
r5_reserve_log_entry(log, io);
|
r5_reserve_log_entry(log, io);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue