2013-03-23 16:11:31 -07:00
|
|
|
/*
|
|
|
|
* background writeback - scan btree for dirty data and write it to the backing
|
|
|
|
* device
|
|
|
|
*
|
|
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
|
|
* Copyright 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bcache.h"
|
|
|
|
#include "btree.h"
|
|
|
|
#include "debug.h"
|
2013-06-05 06:21:07 -07:00
|
|
|
#include "writeback.h"
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/freezer.h>
|
|
|
|
#include <linux/kthread.h>
|
2013-04-26 15:39:55 -07:00
|
|
|
#include <trace/events/bcache.h>
|
|
|
|
|
2013-03-23 16:11:31 -07:00
|
|
|
/* Rate limiting */
|
|
|
|
|
|
|
|
static void __update_writeback_rate(struct cached_dev *dc)
|
|
|
|
{
|
|
|
|
struct cache_set *c = dc->disk.c;
|
2017-09-06 14:25:56 +08:00
|
|
|
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
|
|
|
|
bcache_flash_devs_sectors_dirty(c);
|
2013-03-23 16:11:31 -07:00
|
|
|
uint64_t cache_dirty_target =
|
|
|
|
div_u64(cache_sectors * dc->writeback_percent, 100);
|
|
|
|
|
|
|
|
int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
|
|
|
|
c->cached_dev_sectors);
|
|
|
|
|
|
|
|
/* PD controller */
|
|
|
|
|
2013-06-05 06:21:07 -07:00
|
|
|
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
|
2013-03-23 16:11:31 -07:00
|
|
|
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
|
2013-11-11 13:58:34 -08:00
|
|
|
int64_t proportional = dirty - target;
|
|
|
|
int64_t change;
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
dc->disk.sectors_dirty_last = dirty;
|
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
/* Scale to sectors per second */
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
proportional *= dc->writeback_rate_update_seconds;
|
|
|
|
proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
|
|
|
|
(dc->writeback_rate_d_term /
|
|
|
|
dc->writeback_rate_update_seconds) ?: 1, 0);
|
|
|
|
|
|
|
|
derivative *= dc->writeback_rate_d_term;
|
|
|
|
derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
change = proportional + derivative;
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
/* Don't increase writeback rate if the device isn't keeping up */
|
|
|
|
if (change > 0 &&
|
|
|
|
time_after64(local_clock(),
|
2013-11-11 13:58:34 -08:00
|
|
|
dc->writeback_rate.next + NSEC_PER_MSEC))
|
2013-03-23 16:11:31 -07:00
|
|
|
change = 0;
|
|
|
|
|
|
|
|
dc->writeback_rate.rate =
|
2013-11-11 13:58:34 -08:00
|
|
|
clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
|
2013-03-23 16:11:31 -07:00
|
|
|
1, NSEC_PER_MSEC);
|
2013-11-11 13:58:34 -08:00
|
|
|
|
|
|
|
dc->writeback_rate_proportional = proportional;
|
2013-03-23 16:11:31 -07:00
|
|
|
dc->writeback_rate_derivative = derivative;
|
|
|
|
dc->writeback_rate_change = change;
|
|
|
|
dc->writeback_rate_target = target;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_writeback_rate(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct cached_dev *dc = container_of(to_delayed_work(work),
|
|
|
|
struct cached_dev,
|
|
|
|
writeback_rate_update);
|
|
|
|
|
|
|
|
down_read(&dc->writeback_lock);
|
|
|
|
|
|
|
|
if (atomic_read(&dc->has_dirty) &&
|
|
|
|
dc->writeback_percent)
|
|
|
|
__update_writeback_rate(dc);
|
|
|
|
|
|
|
|
up_read(&dc->writeback_lock);
|
2013-07-24 17:50:06 -07:00
|
|
|
|
|
|
|
schedule_delayed_work(&dc->writeback_rate_update,
|
|
|
|
dc->writeback_rate_update_seconds * HZ);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
|
|
|
|
{
|
2013-08-21 17:49:09 -07:00
|
|
|
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
|
2013-03-23 16:11:31 -07:00
|
|
|
!dc->writeback_percent)
|
|
|
|
return 0;
|
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
return bch_next_delay(&dc->writeback_rate, sectors);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
struct dirty_io {
|
|
|
|
struct closure cl;
|
|
|
|
struct cached_dev *dc;
|
|
|
|
struct bio bio;
|
|
|
|
};
|
2013-06-05 06:24:39 -07:00
|
|
|
|
2013-03-23 16:11:31 -07:00
|
|
|
static void dirty_init(struct keybuf_key *w)
|
|
|
|
{
|
|
|
|
struct dirty_io *io = w->private;
|
|
|
|
struct bio *bio = &io->bio;
|
|
|
|
|
|
|
|
bio_init(bio);
|
|
|
|
if (!io->dc->writeback_percent)
|
|
|
|
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
|
|
|
|
2013-10-11 15:44:27 -07:00
|
|
|
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
|
2013-03-23 16:11:31 -07:00
|
|
|
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
|
|
|
|
bio->bi_private = w;
|
|
|
|
bio->bi_io_vec = bio->bi_inline_vecs;
|
2013-03-28 12:50:55 -06:00
|
|
|
bch_bio_map(bio, NULL);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dirty_io_destructor(struct closure *cl)
|
|
|
|
{
|
|
|
|
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
|
|
|
kfree(io);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_dirty_finish(struct closure *cl)
|
|
|
|
{
|
|
|
|
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
|
|
|
struct keybuf_key *w = io->bio.bi_private;
|
|
|
|
struct cached_dev *dc = io->dc;
|
2013-06-06 18:15:57 -07:00
|
|
|
struct bio_vec *bv;
|
|
|
|
int i;
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-06-06 18:15:57 -07:00
|
|
|
bio_for_each_segment_all(bv, &io->bio, i)
|
2013-03-23 16:11:31 -07:00
|
|
|
__free_page(bv->bv_page);
|
|
|
|
|
|
|
|
/* This is kind of a dumb way of signalling errors. */
|
|
|
|
if (KEY_DIRTY(&w->key)) {
|
2013-07-24 18:07:22 -07:00
|
|
|
int ret;
|
2013-03-23 16:11:31 -07:00
|
|
|
unsigned i;
|
2013-07-24 17:26:51 -07:00
|
|
|
struct keylist keys;
|
|
|
|
|
|
|
|
bch_keylist_init(&keys);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-09-10 18:52:54 -07:00
|
|
|
bkey_copy(keys.top, &w->key);
|
|
|
|
SET_KEY_DIRTY(keys.top, false);
|
|
|
|
bch_keylist_push(&keys);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
for (i = 0; i < KEY_PTRS(&w->key); i++)
|
|
|
|
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
|
|
|
|
|
2013-07-24 18:07:22 -07:00
|
|
|
ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-07-24 18:06:22 -07:00
|
|
|
if (ret)
|
2013-04-26 15:39:55 -07:00
|
|
|
trace_bcache_writeback_collision(&w->key);
|
|
|
|
|
2013-07-24 18:06:22 -07:00
|
|
|
atomic_long_inc(ret
|
2013-03-23 16:11:31 -07:00
|
|
|
? &dc->disk.c->writeback_keys_failed
|
|
|
|
: &dc->disk.c->writeback_keys_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
bch_keybuf_del(&dc->writeback_keys, w);
|
2013-09-23 23:17:31 -07:00
|
|
|
up(&dc->in_flight);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
closure_return_with_destructor(cl, dirty_io_destructor);
|
|
|
|
}
|
|
|
|
|
2015-07-20 15:29:37 +02:00
|
|
|
static void dirty_endio(struct bio *bio)
|
2013-03-23 16:11:31 -07:00
|
|
|
{
|
|
|
|
struct keybuf_key *w = bio->bi_private;
|
|
|
|
struct dirty_io *io = w->private;
|
|
|
|
|
2015-07-20 15:29:37 +02:00
|
|
|
if (bio->bi_error)
|
2013-03-23 16:11:31 -07:00
|
|
|
SET_KEY_DIRTY(&w->key, false);
|
|
|
|
|
|
|
|
closure_put(&io->cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_dirty(struct closure *cl)
|
|
|
|
{
|
|
|
|
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
|
|
|
struct keybuf_key *w = io->bio.bi_private;
|
|
|
|
|
|
|
|
dirty_init(w);
|
|
|
|
io->bio.bi_rw = WRITE;
|
2013-10-11 15:44:27 -07:00
|
|
|
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
|
2013-03-23 16:11:31 -07:00
|
|
|
io->bio.bi_bdev = io->dc->bdev;
|
|
|
|
io->bio.bi_end_io = dirty_endio;
|
|
|
|
|
2013-11-23 23:11:25 -08:00
|
|
|
closure_bio_submit(&io->bio, cl);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2017-09-06 14:25:59 +08:00
|
|
|
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
2015-07-20 15:29:37 +02:00
|
|
|
static void read_dirty_endio(struct bio *bio)
|
2013-03-23 16:11:31 -07:00
|
|
|
{
|
|
|
|
struct keybuf_key *w = bio->bi_private;
|
|
|
|
struct dirty_io *io = w->private;
|
|
|
|
|
|
|
|
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
|
2015-07-20 15:29:37 +02:00
|
|
|
bio->bi_error, "reading dirty data from cache");
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2015-07-20 15:29:37 +02:00
|
|
|
dirty_endio(bio);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void read_dirty_submit(struct closure *cl)
|
|
|
|
{
|
|
|
|
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
|
|
|
|
2013-11-23 23:11:25 -08:00
|
|
|
closure_bio_submit(&io->bio, cl);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2017-09-06 14:25:59 +08:00
|
|
|
continue_at(cl, write_dirty, io->dc->writeback_write_wq);
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
static void read_dirty(struct cached_dev *dc)
|
2013-03-23 16:11:31 -07:00
|
|
|
{
|
2013-07-24 17:50:06 -07:00
|
|
|
unsigned delay = 0;
|
2013-03-23 16:11:31 -07:00
|
|
|
struct keybuf_key *w;
|
|
|
|
struct dirty_io *io;
|
2013-07-24 17:50:06 -07:00
|
|
|
struct closure cl;
|
|
|
|
|
|
|
|
closure_init_stack(&cl);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: if we error, background writeback just spins. Should use some
|
|
|
|
* mempools.
|
|
|
|
*/
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
try_to_freeze();
|
|
|
|
|
2013-03-23 16:11:31 -07:00
|
|
|
w = bch_keybuf_next(&dc->writeback_keys);
|
|
|
|
if (!w)
|
|
|
|
break;
|
|
|
|
|
|
|
|
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
if (KEY_START(&w->key) != dc->last_read ||
|
|
|
|
jiffies_to_msecs(delay) > 50)
|
|
|
|
while (!kthread_should_stop() && delay)
|
2014-05-01 13:48:57 -07:00
|
|
|
delay = schedule_timeout_interruptible(delay);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
dc->last_read = KEY_OFFSET(&w->key);
|
|
|
|
|
|
|
|
io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
|
|
|
|
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!io)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
w->private = io;
|
|
|
|
io->dc = dc;
|
|
|
|
|
|
|
|
dirty_init(w);
|
2013-10-11 15:44:27 -07:00
|
|
|
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
|
2013-03-23 16:11:31 -07:00
|
|
|
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
|
|
|
|
&w->key, 0)->bdev;
|
|
|
|
io->bio.bi_rw = READ;
|
|
|
|
io->bio.bi_end_io = read_dirty_endio;
|
|
|
|
|
2013-06-06 18:15:57 -07:00
|
|
|
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
|
2013-03-23 16:11:31 -07:00
|
|
|
goto err_free;
|
|
|
|
|
2013-04-26 15:39:55 -07:00
|
|
|
trace_bcache_writeback(&w->key);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2013-09-23 23:17:31 -07:00
|
|
|
down(&dc->in_flight);
|
2013-07-24 17:50:06 -07:00
|
|
|
closure_call(&io->cl, read_dirty_submit, NULL, &cl);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
delay = writeback_delay(dc, KEY_SIZE(&w->key));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (0) {
|
|
|
|
err_free:
|
|
|
|
kfree(w->private);
|
|
|
|
err:
|
|
|
|
bch_keybuf_del(&dc->writeback_keys, w);
|
|
|
|
}
|
|
|
|
|
2013-09-23 23:17:31 -07:00
|
|
|
/*
|
|
|
|
* Wait for outstanding writeback IOs to finish (and keybuf slots to be
|
|
|
|
* freed) before refilling again
|
|
|
|
*/
|
2013-07-24 17:50:06 -07:00
|
|
|
closure_sync(&cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Scan for dirty data */
|
|
|
|
|
|
|
|
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
|
|
|
|
uint64_t offset, int nr_sectors)
|
|
|
|
{
|
|
|
|
struct bcache_device *d = c->devices[inode];
|
2013-10-31 15:43:22 -07:00
|
|
|
unsigned stripe_offset, stripe, sectors_dirty;
|
2013-07-24 17:50:06 -07:00
|
|
|
|
|
|
|
if (!d)
|
|
|
|
return;
|
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
stripe = offset_to_stripe(d, offset);
|
2013-07-24 17:50:06 -07:00
|
|
|
stripe_offset = offset & (d->stripe_size - 1);
|
|
|
|
|
|
|
|
while (nr_sectors) {
|
|
|
|
int s = min_t(unsigned, abs(nr_sectors),
|
|
|
|
d->stripe_size - stripe_offset);
|
|
|
|
|
|
|
|
if (nr_sectors < 0)
|
|
|
|
s = -s;
|
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
if (stripe >= d->nr_stripes)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sectors_dirty = atomic_add_return(s,
|
|
|
|
d->stripe_sectors_dirty + stripe);
|
|
|
|
if (sectors_dirty == d->stripe_size)
|
|
|
|
set_bit(stripe, d->full_dirty_stripes);
|
|
|
|
else
|
|
|
|
clear_bit(stripe, d->full_dirty_stripes);
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
nr_sectors -= s;
|
|
|
|
stripe_offset = 0;
|
|
|
|
stripe++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
|
|
|
|
{
|
2015-11-29 18:47:01 -08:00
|
|
|
struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
|
|
|
|
|
|
|
|
BUG_ON(KEY_INODE(k) != dc->disk.id);
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
return KEY_DIRTY(k);
|
|
|
|
}
|
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
static void refill_full_stripes(struct cached_dev *dc)
|
2013-07-24 17:50:06 -07:00
|
|
|
{
|
2013-10-31 15:43:22 -07:00
|
|
|
struct keybuf *buf = &dc->writeback_keys;
|
|
|
|
unsigned start_stripe, stripe, next_stripe;
|
|
|
|
bool wrapped = false;
|
|
|
|
|
|
|
|
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
if (stripe >= dc->disk.nr_stripes)
|
|
|
|
stripe = 0;
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
start_stripe = stripe;
|
2013-07-24 17:50:06 -07:00
|
|
|
|
|
|
|
while (1) {
|
2013-10-31 15:43:22 -07:00
|
|
|
stripe = find_next_bit(dc->disk.full_dirty_stripes,
|
|
|
|
dc->disk.nr_stripes, stripe);
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
if (stripe == dc->disk.nr_stripes)
|
|
|
|
goto next;
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2013-10-31 15:43:22 -07:00
|
|
|
next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
|
|
|
|
dc->disk.nr_stripes, stripe);
|
|
|
|
|
|
|
|
buf->last_scanned = KEY(dc->disk.id,
|
|
|
|
stripe * dc->disk.stripe_size, 0);
|
|
|
|
|
|
|
|
bch_refill_keybuf(dc->disk.c, buf,
|
|
|
|
&KEY(dc->disk.id,
|
|
|
|
next_stripe * dc->disk.stripe_size, 0),
|
|
|
|
dirty_pred);
|
|
|
|
|
|
|
|
if (array_freelist_empty(&buf->freelist))
|
|
|
|
return;
|
|
|
|
|
|
|
|
stripe = next_stripe;
|
|
|
|
next:
|
|
|
|
if (wrapped && stripe > start_stripe)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (stripe == dc->disk.nr_stripes) {
|
|
|
|
stripe = 0;
|
|
|
|
wrapped = true;
|
|
|
|
}
|
2013-07-24 17:50:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-29 18:47:01 -08:00
|
|
|
/*
|
|
|
|
* Returns true if we scanned the entire disk
|
|
|
|
*/
|
2013-07-24 17:50:06 -07:00
|
|
|
static bool refill_dirty(struct cached_dev *dc)
|
|
|
|
{
|
|
|
|
struct keybuf *buf = &dc->writeback_keys;
|
2015-11-29 18:47:01 -08:00
|
|
|
struct bkey start = KEY(dc->disk.id, 0, 0);
|
2013-07-24 17:50:06 -07:00
|
|
|
struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
|
2015-11-29 18:47:01 -08:00
|
|
|
struct bkey start_pos;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make sure keybuf pos is inside the range for this disk - at bringup
|
|
|
|
* we might not be attached yet so this disk's inode nr isn't
|
|
|
|
* initialized then
|
|
|
|
*/
|
|
|
|
if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
|
|
|
|
bkey_cmp(&buf->last_scanned, &end) > 0)
|
|
|
|
buf->last_scanned = start;
|
2013-10-31 15:43:22 -07:00
|
|
|
|
|
|
|
if (dc->partial_stripes_expensive) {
|
|
|
|
refill_full_stripes(dc);
|
|
|
|
if (array_freelist_empty(&buf->freelist))
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2015-11-29 18:47:01 -08:00
|
|
|
start_pos = buf->last_scanned;
|
2013-10-31 15:43:22 -07:00
|
|
|
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
|
2013-07-24 17:50:06 -07:00
|
|
|
|
2015-11-29 18:47:01 -08:00
|
|
|
if (bkey_cmp(&buf->last_scanned, &end) < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we get to the end start scanning again from the beginning, and
|
|
|
|
* only scan up to where we initially started scanning from:
|
|
|
|
*/
|
|
|
|
buf->last_scanned = start;
|
|
|
|
bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
|
|
|
|
|
|
|
|
return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
|
2013-07-24 17:50:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bch_writeback_thread(void *arg)
|
|
|
|
{
|
|
|
|
struct cached_dev *dc = arg;
|
|
|
|
bool searched_full_index;
|
|
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
down_write(&dc->writeback_lock);
|
bcache: properly set task state in bch_writeback_thread()
[ Upstream commit 99361bbf26337186f02561109c17a4c4b1a7536a ]
Kernel thread routine bch_writeback_thread() has the following code block,
447 down_write(&dc->writeback_lock);
448~450 if (check conditions) {
451 up_write(&dc->writeback_lock);
452 set_current_state(TASK_INTERRUPTIBLE);
453
454 if (kthread_should_stop())
455 return 0;
456
457 schedule();
458 continue;
459 }
If condition check is true, its task state is set to TASK_INTERRUPTIBLE
and call schedule() to wait for others to wake up it.
There are 2 issues in current code,
1, Task state is set to TASK_INTERRUPTIBLE after the condition checks, if
another process changes the condition and call wake_up_process(dc->
writeback_thread), then at line 452 task state is set back to
TASK_INTERRUPTIBLE, the writeback kernel thread will lose a chance to be
waken up.
2, At line 454 if kthread_should_stop() is true, writeback kernel thread
will return to kernel/kthread.c:kthread() with TASK_INTERRUPTIBLE and
call do_exit(). It is not good to enter do_exit() with task state
TASK_INTERRUPTIBLE, in following code path might_sleep() is called and a
warning message is reported by __might_sleep(): "WARNING: do not call
blocking ops when !TASK_RUNNING; state=1 set at [xxxx]".
For the first issue, task state should be set before condition checks.
Ineed because dc->writeback_lock is required when modifying all the
conditions, calling set_current_state() inside code block where dc->
writeback_lock is hold is safe. But this is quite implicit, so I still move
set_current_state() before all the condition checks.
For the second issue, frankley speaking it does not hurt when kernel thread
exits with TASK_INTERRUPTIBLE state, but this warning message scares users,
makes them feel there might be something risky with bcache and hurt their
data. Setting task state to TASK_RUNNING before returning fixes this
problem.
In alloc.c:allocator_wait(), there is also a similar issue, and is also
fixed in this patch.
Changelog:
v3: merge two similar fixes into one patch
v2: fix the race issue in v1 patch.
v1: initial buggy fix.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-07 11:41:41 -08:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
bcache: quit dc->writeback_thread when BCACHE_DEV_DETACHING is set
[ Upstream commit fadd94e05c02afec7b70b0b14915624f1782f578 ]
In patch "bcache: fix cached_dev->count usage for bch_cache_set_error()",
cached_dev_get() is called when creating dc->writeback_thread, and
cached_dev_put() is called when exiting dc->writeback_thread. This
modification works well unless people detach the bcache device manually by
'echo 1 > /sys/block/bcache<N>/bcache/detach'
Because this sysfs interface only calls bch_cached_dev_detach() which wakes
up dc->writeback_thread but does not stop it. The reason is, before patch
"bcache: fix cached_dev->count usage for bch_cache_set_error()", inside
bch_writeback_thread(), if cache is not dirty after writeback,
cached_dev_put() will be called here. And in cached_dev_make_request() when
a new write request makes cache from clean to dirty, cached_dev_get() will
be called there. Since we don't operate dc->count in these locations,
refcount d->count cannot be dropped after cache becomes clean, and
cached_dev_detach_finish() won't be called to detach bcache device.
This patch fixes the issue by checking whether BCACHE_DEV_DETACHING is
set inside bch_writeback_thread(). If this bit is set and cache is clean
(no existing writeback_keys), break the while-loop, call cached_dev_put()
and quit the writeback thread.
Please note if cache is still dirty, even BCACHE_DEV_DETACHING is set the
writeback thread should continue to perform writeback, this is the original
design of manually detach.
It is safe to do the following check without locking, let me explain why,
+ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
If the kenrel thread does not sleep and continue to run due to conditions
are not updated in time on the running CPU core, it just consumes more CPU
cycles and has no hurt. This should-sleep-but-run is safe here. We just
focus on the should-run-but-sleep condition, which means the writeback
thread goes to sleep in mistake while it should continue to run.
1, First of all, no matter the writeback thread is hung or not,
kthread_stop() from cached_dev_detach_finish() will wake up it and
terminate by making kthread_should_stop() return true. And in normal
run time, bit on index BCACHE_DEV_DETACHING is always cleared, the
condition
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)
is always true and can be ignored as constant value.
2, If one of the following conditions is true, the writeback thread should
go to sleep,
"!atomic_read(&dc->has_dirty)" or "!dc->writeback_running)"
each of them independently controls the writeback thread should sleep or
not, let's analyse them one by one.
2.1 condition "!atomic_read(&dc->has_dirty)"
If dc->has_dirty is set from 0 to 1 on another CPU core, bcache will
call bch_writeback_queue() immediately or call bch_writeback_add() which
indirectly calls bch_writeback_queue() too. In bch_writeback_queue(),
wake_up_process(dc->writeback_thread) is called. It sets writeback
thread's task state to TASK_RUNNING and following an implicit memory
barrier, then tries to wake up the writeback thread.
In writeback thread, its task state is set to TASK_INTERRUPTIBLE before
doing the condition check. If other CPU core sets the TASK_RUNNING state
after writeback thread setting TASK_INTERRUPTIBLE, the writeback thread
will be scheduled to run very soon because its state is not
TASK_INTERRUPTIBLE. If other CPU core sets the TASK_RUNNING state before
writeback thread setting TASK_INTERRUPTIBLE, the implict memory barrier
of wake_up_process() will make sure modification of dc->has_dirty on
other CPU core is updated and observed on the CPU core of writeback
thread. Therefore the condition check will correctly be false, and
continue writeback code without sleeping.
2.2 condition "!dc->writeback_running)"
dc->writeback_running can be changed via sysfs file, every time it is
modified, a following bch_writeback_queue() is alwasy called. So the
change is always observed on the CPU core of writeback thread. If
dc->writeback_running is changed from 0 to 1 on other CPU core, this
condition check will observe the modification and allow writeback
thread to continue to run without sleeping.
Now we can see, even without a locking protection, multiple conditions
check is safe here, no deadlock or process hang up will happen.
I compose a separte patch because that patch "bcache: fix cached_dev->count
usage for bch_cache_set_error()" already gets a "Reviewed-by:" from Hannes
Reinecke. Also this fix is not trivial and good for a separate patch.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Huijun Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-03-18 17:36:15 -07:00
|
|
|
/*
|
|
|
|
* If the bache device is detaching, skip here and continue
|
|
|
|
* to perform writeback. Otherwise, if no dirty data on cache,
|
|
|
|
* or there is dirty data on cache but writeback is disabled,
|
|
|
|
* the writeback thread should sleep here and wait for others
|
|
|
|
* to wake up it.
|
|
|
|
*/
|
|
|
|
if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
|
|
|
|
(!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
|
2013-07-24 17:50:06 -07:00
|
|
|
up_write(&dc->writeback_lock);
|
|
|
|
|
bcache: properly set task state in bch_writeback_thread()
[ Upstream commit 99361bbf26337186f02561109c17a4c4b1a7536a ]
Kernel thread routine bch_writeback_thread() has the following code block,
447 down_write(&dc->writeback_lock);
448~450 if (check conditions) {
451 up_write(&dc->writeback_lock);
452 set_current_state(TASK_INTERRUPTIBLE);
453
454 if (kthread_should_stop())
455 return 0;
456
457 schedule();
458 continue;
459 }
If condition check is true, its task state is set to TASK_INTERRUPTIBLE
and call schedule() to wait for others to wake up it.
There are 2 issues in current code,
1, Task state is set to TASK_INTERRUPTIBLE after the condition checks, if
another process changes the condition and call wake_up_process(dc->
writeback_thread), then at line 452 task state is set back to
TASK_INTERRUPTIBLE, the writeback kernel thread will lose a chance to be
waken up.
2, At line 454 if kthread_should_stop() is true, writeback kernel thread
will return to kernel/kthread.c:kthread() with TASK_INTERRUPTIBLE and
call do_exit(). It is not good to enter do_exit() with task state
TASK_INTERRUPTIBLE, in following code path might_sleep() is called and a
warning message is reported by __might_sleep(): "WARNING: do not call
blocking ops when !TASK_RUNNING; state=1 set at [xxxx]".
For the first issue, task state should be set before condition checks.
Ineed because dc->writeback_lock is required when modifying all the
conditions, calling set_current_state() inside code block where dc->
writeback_lock is hold is safe. But this is quite implicit, so I still move
set_current_state() before all the condition checks.
For the second issue, frankley speaking it does not hurt when kernel thread
exits with TASK_INTERRUPTIBLE state, but this warning message scares users,
makes them feel there might be something risky with bcache and hurt their
data. Setting task state to TASK_RUNNING before returning fixes this
problem.
In alloc.c:allocator_wait(), there is also a similar issue, and is also
fixed in this patch.
Changelog:
v3: merge two similar fixes into one patch
v2: fix the race issue in v1 patch.
v1: initial buggy fix.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-07 11:41:41 -08:00
|
|
|
if (kthread_should_stop()) {
|
|
|
|
set_current_state(TASK_RUNNING);
|
2013-07-24 17:50:06 -07:00
|
|
|
return 0;
|
bcache: properly set task state in bch_writeback_thread()
[ Upstream commit 99361bbf26337186f02561109c17a4c4b1a7536a ]
Kernel thread routine bch_writeback_thread() has the following code block,
447 down_write(&dc->writeback_lock);
448~450 if (check conditions) {
451 up_write(&dc->writeback_lock);
452 set_current_state(TASK_INTERRUPTIBLE);
453
454 if (kthread_should_stop())
455 return 0;
456
457 schedule();
458 continue;
459 }
If condition check is true, its task state is set to TASK_INTERRUPTIBLE
and call schedule() to wait for others to wake up it.
There are 2 issues in current code,
1, Task state is set to TASK_INTERRUPTIBLE after the condition checks, if
another process changes the condition and call wake_up_process(dc->
writeback_thread), then at line 452 task state is set back to
TASK_INTERRUPTIBLE, the writeback kernel thread will lose a chance to be
waken up.
2, At line 454 if kthread_should_stop() is true, writeback kernel thread
will return to kernel/kthread.c:kthread() with TASK_INTERRUPTIBLE and
call do_exit(). It is not good to enter do_exit() with task state
TASK_INTERRUPTIBLE, in following code path might_sleep() is called and a
warning message is reported by __might_sleep(): "WARNING: do not call
blocking ops when !TASK_RUNNING; state=1 set at [xxxx]".
For the first issue, task state should be set before condition checks.
Ineed because dc->writeback_lock is required when modifying all the
conditions, calling set_current_state() inside code block where dc->
writeback_lock is hold is safe. But this is quite implicit, so I still move
set_current_state() before all the condition checks.
For the second issue, frankley speaking it does not hurt when kernel thread
exits with TASK_INTERRUPTIBLE state, but this warning message scares users,
makes them feel there might be something risky with bcache and hurt their
data. Setting task state to TASK_RUNNING before returning fixes this
problem.
In alloc.c:allocator_wait(), there is also a similar issue, and is also
fixed in this patch.
Changelog:
v3: merge two similar fixes into one patch
v2: fix the race issue in v1 patch.
v1: initial buggy fix.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-07 11:41:41 -08:00
|
|
|
}
|
2013-07-24 17:50:06 -07:00
|
|
|
|
|
|
|
try_to_freeze();
|
|
|
|
schedule();
|
|
|
|
continue;
|
|
|
|
}
|
bcache: properly set task state in bch_writeback_thread()
[ Upstream commit 99361bbf26337186f02561109c17a4c4b1a7536a ]
Kernel thread routine bch_writeback_thread() has the following code block,
447 down_write(&dc->writeback_lock);
448~450 if (check conditions) {
451 up_write(&dc->writeback_lock);
452 set_current_state(TASK_INTERRUPTIBLE);
453
454 if (kthread_should_stop())
455 return 0;
456
457 schedule();
458 continue;
459 }
If condition check is true, its task state is set to TASK_INTERRUPTIBLE
and call schedule() to wait for others to wake up it.
There are 2 issues in current code,
1, Task state is set to TASK_INTERRUPTIBLE after the condition checks, if
another process changes the condition and call wake_up_process(dc->
writeback_thread), then at line 452 task state is set back to
TASK_INTERRUPTIBLE, the writeback kernel thread will lose a chance to be
waken up.
2, At line 454 if kthread_should_stop() is true, writeback kernel thread
will return to kernel/kthread.c:kthread() with TASK_INTERRUPTIBLE and
call do_exit(). It is not good to enter do_exit() with task state
TASK_INTERRUPTIBLE, in following code path might_sleep() is called and a
warning message is reported by __might_sleep(): "WARNING: do not call
blocking ops when !TASK_RUNNING; state=1 set at [xxxx]".
For the first issue, task state should be set before condition checks.
Ineed because dc->writeback_lock is required when modifying all the
conditions, calling set_current_state() inside code block where dc->
writeback_lock is hold is safe. But this is quite implicit, so I still move
set_current_state() before all the condition checks.
For the second issue, frankley speaking it does not hurt when kernel thread
exits with TASK_INTERRUPTIBLE state, but this warning message scares users,
makes them feel there might be something risky with bcache and hurt their
data. Setting task state to TASK_RUNNING before returning fixes this
problem.
In alloc.c:allocator_wait(), there is also a similar issue, and is also
fixed in this patch.
Changelog:
v3: merge two similar fixes into one patch
v2: fix the race issue in v1 patch.
v1: initial buggy fix.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-07 11:41:41 -08:00
|
|
|
set_current_state(TASK_RUNNING);
|
2013-07-24 17:50:06 -07:00
|
|
|
|
|
|
|
searched_full_index = refill_dirty(dc);
|
|
|
|
|
|
|
|
if (searched_full_index &&
|
|
|
|
RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
|
|
|
|
atomic_set(&dc->has_dirty, 0);
|
|
|
|
cached_dev_put(dc);
|
|
|
|
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
|
|
|
|
bch_write_bdev_super(dc, NULL);
|
bcache: quit dc->writeback_thread when BCACHE_DEV_DETACHING is set
[ Upstream commit fadd94e05c02afec7b70b0b14915624f1782f578 ]
In patch "bcache: fix cached_dev->count usage for bch_cache_set_error()",
cached_dev_get() is called when creating dc->writeback_thread, and
cached_dev_put() is called when exiting dc->writeback_thread. This
modification works well unless people detach the bcache device manually by
'echo 1 > /sys/block/bcache<N>/bcache/detach'
Because this sysfs interface only calls bch_cached_dev_detach() which wakes
up dc->writeback_thread but does not stop it. The reason is, before patch
"bcache: fix cached_dev->count usage for bch_cache_set_error()", inside
bch_writeback_thread(), if cache is not dirty after writeback,
cached_dev_put() will be called here. And in cached_dev_make_request() when
a new write request makes cache from clean to dirty, cached_dev_get() will
be called there. Since we don't operate dc->count in these locations,
refcount d->count cannot be dropped after cache becomes clean, and
cached_dev_detach_finish() won't be called to detach bcache device.
This patch fixes the issue by checking whether BCACHE_DEV_DETACHING is
set inside bch_writeback_thread(). If this bit is set and cache is clean
(no existing writeback_keys), break the while-loop, call cached_dev_put()
and quit the writeback thread.
Please note if cache is still dirty, even BCACHE_DEV_DETACHING is set the
writeback thread should continue to perform writeback, this is the original
design of manually detach.
It is safe to do the following check without locking, let me explain why,
+ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
If the kenrel thread does not sleep and continue to run due to conditions
are not updated in time on the running CPU core, it just consumes more CPU
cycles and has no hurt. This should-sleep-but-run is safe here. We just
focus on the should-run-but-sleep condition, which means the writeback
thread goes to sleep in mistake while it should continue to run.
1, First of all, no matter the writeback thread is hung or not,
kthread_stop() from cached_dev_detach_finish() will wake up it and
terminate by making kthread_should_stop() return true. And in normal
run time, bit on index BCACHE_DEV_DETACHING is always cleared, the
condition
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)
is always true and can be ignored as constant value.
2, If one of the following conditions is true, the writeback thread should
go to sleep,
"!atomic_read(&dc->has_dirty)" or "!dc->writeback_running)"
each of them independently controls the writeback thread should sleep or
not, let's analyse them one by one.
2.1 condition "!atomic_read(&dc->has_dirty)"
If dc->has_dirty is set from 0 to 1 on another CPU core, bcache will
call bch_writeback_queue() immediately or call bch_writeback_add() which
indirectly calls bch_writeback_queue() too. In bch_writeback_queue(),
wake_up_process(dc->writeback_thread) is called. It sets writeback
thread's task state to TASK_RUNNING and following an implicit memory
barrier, then tries to wake up the writeback thread.
In writeback thread, its task state is set to TASK_INTERRUPTIBLE before
doing the condition check. If other CPU core sets the TASK_RUNNING state
after writeback thread setting TASK_INTERRUPTIBLE, the writeback thread
will be scheduled to run very soon because its state is not
TASK_INTERRUPTIBLE. If other CPU core sets the TASK_RUNNING state before
writeback thread setting TASK_INTERRUPTIBLE, the implict memory barrier
of wake_up_process() will make sure modification of dc->has_dirty on
other CPU core is updated and observed on the CPU core of writeback
thread. Therefore the condition check will correctly be false, and
continue writeback code without sleeping.
2.2 condition "!dc->writeback_running)"
dc->writeback_running can be changed via sysfs file, every time it is
modified, a following bch_writeback_queue() is alwasy called. So the
change is always observed on the CPU core of writeback thread. If
dc->writeback_running is changed from 0 to 1 on other CPU core, this
condition check will observe the modification and allow writeback
thread to continue to run without sleeping.
Now we can see, even without a locking protection, multiple conditions
check is safe here, no deadlock or process hang up will happen.
I compose a separte patch because that patch "bcache: fix cached_dev->count
usage for bch_cache_set_error()" already gets a "Reviewed-by:" from Hannes
Reinecke. Also this fix is not trivial and good for a separate patch.
Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Huijun Tang <tang.junhui@zte.com.cn>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-03-18 17:36:15 -07:00
|
|
|
/*
|
|
|
|
* If bcache device is detaching via sysfs interface,
|
|
|
|
* writeback thread should stop after there is no dirty
|
|
|
|
* data on cache. BCACHE_DEV_DETACHING flag is set in
|
|
|
|
* bch_cached_dev_detach().
|
|
|
|
*/
|
|
|
|
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
|
|
|
|
break;
|
2013-07-24 17:50:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&dc->writeback_lock);
|
|
|
|
|
|
|
|
bch_ratelimit_reset(&dc->writeback_rate);
|
|
|
|
read_dirty(dc);
|
|
|
|
|
|
|
|
if (searched_full_index) {
|
|
|
|
unsigned delay = dc->writeback_delay * HZ;
|
|
|
|
|
|
|
|
while (delay &&
|
|
|
|
!kthread_should_stop() &&
|
2013-08-21 17:49:09 -07:00
|
|
|
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
|
2014-05-01 13:48:57 -07:00
|
|
|
delay = schedule_timeout_interruptible(delay);
|
2013-07-24 17:50:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-03-23 16:11:31 -07:00
|
|
|
}
|
|
|
|
|
2013-05-11 17:07:26 -07:00
|
|
|
/* Init */
|
|
|
|
|
2013-07-24 17:44:17 -07:00
|
|
|
struct sectors_dirty_init {
|
|
|
|
struct btree_op op;
|
|
|
|
unsigned inode;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
|
2013-09-10 18:48:51 -07:00
|
|
|
struct bkey *k)
|
2013-05-11 17:07:26 -07:00
|
|
|
{
|
2013-07-24 17:44:17 -07:00
|
|
|
struct sectors_dirty_init *op = container_of(_op,
|
|
|
|
struct sectors_dirty_init, op);
|
2013-09-10 18:48:51 -07:00
|
|
|
if (KEY_INODE(k) > op->inode)
|
|
|
|
return MAP_DONE;
|
2013-05-11 17:07:26 -07:00
|
|
|
|
2013-09-10 18:48:51 -07:00
|
|
|
if (KEY_DIRTY(k))
|
|
|
|
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
|
|
|
|
KEY_START(k), KEY_SIZE(k));
|
|
|
|
|
|
|
|
return MAP_CONTINUE;
|
2013-05-11 17:07:26 -07:00
|
|
|
}
|
|
|
|
|
2017-09-07 01:28:53 +08:00
|
|
|
void bch_sectors_dirty_init(struct bcache_device *d)
|
2013-05-11 17:07:26 -07:00
|
|
|
{
|
2013-07-24 17:44:17 -07:00
|
|
|
struct sectors_dirty_init op;
|
2013-05-11 17:07:26 -07:00
|
|
|
|
2013-07-24 18:04:18 -07:00
|
|
|
bch_btree_op_init(&op.op, -1);
|
2017-09-07 01:28:53 +08:00
|
|
|
op.inode = d->id;
|
2013-09-10 18:48:51 -07:00
|
|
|
|
2017-09-07 01:28:53 +08:00
|
|
|
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
|
2013-09-10 18:48:51 -07:00
|
|
|
sectors_dirty_init_fn, 0);
|
2013-11-11 13:58:34 -08:00
|
|
|
|
2017-09-07 01:28:53 +08:00
|
|
|
d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
|
2013-05-11 17:07:26 -07:00
|
|
|
}
|
|
|
|
|
2014-05-01 13:48:57 -07:00
|
|
|
void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
2013-03-23 16:11:31 -07:00
|
|
|
{
|
2013-09-23 23:17:31 -07:00
|
|
|
sema_init(&dc->in_flight, 64);
|
2013-03-23 16:11:31 -07:00
|
|
|
init_rwsem(&dc->writeback_lock);
|
2013-06-05 06:24:39 -07:00
|
|
|
bch_keybuf_init(&dc->writeback_keys);
|
2013-03-23 16:11:31 -07:00
|
|
|
|
|
|
|
dc->writeback_metadata = true;
|
|
|
|
dc->writeback_running = true;
|
|
|
|
dc->writeback_percent = 10;
|
|
|
|
dc->writeback_delay = 30;
|
|
|
|
dc->writeback_rate.rate = 1024;
|
|
|
|
|
2013-11-11 13:58:34 -08:00
|
|
|
dc->writeback_rate_update_seconds = 5;
|
|
|
|
dc->writeback_rate_d_term = 30;
|
|
|
|
dc->writeback_rate_p_term_inverse = 6000;
|
2013-03-23 16:11:31 -07:00
|
|
|
|
2014-05-01 13:48:57 -07:00
|
|
|
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch_cached_dev_writeback_start(struct cached_dev *dc)
|
|
|
|
{
|
2017-09-06 14:25:59 +08:00
|
|
|
dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
|
|
|
|
WQ_MEM_RECLAIM, 0);
|
|
|
|
if (!dc->writeback_write_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-07-24 17:50:06 -07:00
|
|
|
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
|
|
|
|
"bcache_writeback");
|
|
|
|
if (IS_ERR(dc->writeback_thread))
|
|
|
|
return PTR_ERR(dc->writeback_thread);
|
|
|
|
|
2013-03-23 16:11:31 -07:00
|
|
|
schedule_delayed_work(&dc->writeback_rate_update,
|
|
|
|
dc->writeback_rate_update_seconds * HZ);
|
|
|
|
|
2014-05-01 13:48:57 -07:00
|
|
|
bch_writeback_queue(dc);
|
|
|
|
|
2013-03-23 16:11:31 -07:00
|
|
|
return 0;
|
|
|
|
}
|