Merge "block: Initialize bd_bdi on inode initialization"

This commit is contained in:
Linux Build Service Account 2017-10-18 11:08:22 -07:00 committed by Gerrit - the friendly Code Review server
commit 22b27b16d8
36 changed files with 158 additions and 90 deletions

View file

@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg; goto err_free_blkg;
} }
wb_congested = wb_congested_get_create(&q->backing_dev_info, wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, GFP_NOWAIT); blkcg->css.id, GFP_NOWAIT);
if (!wb_congested) { if (!wb_congested) {
ret = -ENOMEM; ret = -ENOMEM;
@ -468,8 +468,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg) const char *blkg_dev_name(struct blkcg_gq *blkg)
{ {
/* some drivers (floppy) instantiate a queue w/o disk registered */ /* some drivers (floppy) instantiate a queue w/o disk registered */
if (blkg->q->backing_dev_info.dev) if (blkg->q->backing_dev_info->dev)
return dev_name(blkg->q->backing_dev_info.dev); return dev_name(blkg->q->backing_dev_info->dev);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(blkg_dev_name); EXPORT_SYMBOL_GPL(blkg_dev_name);

View file

@ -87,7 +87,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
* flip its congestion state for events on other blkcgs. * flip its congestion state for events on other blkcgs.
*/ */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -98,7 +98,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else #else
/* see blk_clear_congested() */ /* see blk_clear_congested() */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -122,14 +122,12 @@ void blk_queue_congestion_threshold(struct request_queue *q)
* @bdev: device * @bdev: device
* *
* Locates the passed device's request queue and returns the address of its * Locates the passed device's request queue and returns the address of its
* backing_dev_info. This function can only be called if @bdev is opened * backing_dev_info. The return value is never NULL however we may return
* and the return value is never NULL. * &noop_backing_dev_info if the bdev is not currently open.
*/ */
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); return bdev->bd_bdi;
return &q->backing_dev_info;
} }
EXPORT_SYMBOL(blk_get_backing_dev_info); EXPORT_SYMBOL(blk_get_backing_dev_info);
@ -597,7 +595,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_flush_integrity(); blk_flush_integrity();
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q); blk_sync_queue(q);
if (q->mq_ops) if (q->mq_ops)
@ -697,7 +695,6 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{ {
struct request_queue *q; struct request_queue *q;
int err;
q = kmem_cache_alloc_node(blk_requestq_cachep, q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id); gfp_mask | __GFP_ZERO, node_id);
@ -712,17 +709,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->bio_split) if (!q->bio_split)
goto fail_id; goto fail_id;
q->backing_dev_info.ra_pages = q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; if (!q->backing_dev_info)
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err)
goto fail_split; goto fail_split;
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
@ -772,7 +769,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref: fail_ref:
percpu_ref_exit(&q->q_usage_counter); percpu_ref_exit(&q->q_usage_counter);
fail_bdi: fail_bdi:
bdi_destroy(&q->backing_dev_info); bdi_put(q->backing_dev_info);
fail_split: fail_split:
bioset_free(q->bio_split); bioset_free(q->bio_split);
fail_id: fail_id:
@ -1195,7 +1192,7 @@ fail_elvpriv:
* disturb iosched and blkcg but weird is bettern than dead. * disturb iosched and blkcg but weird is bettern than dead.
*/ */
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info.dev)); __func__, dev_name(q->backing_dev_info->dev));
rq->cmd_flags &= ~REQ_ELVPRIV; rq->cmd_flags &= ~REQ_ELVPRIV;
rq->elv.icq = NULL; rq->elv.icq = NULL;
@ -3251,7 +3248,7 @@ void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req)); BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
laptop_io_completion(&req->q->backing_dev_info); laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req); blk_delete_timer(req);

View file

@ -418,7 +418,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->tuple_size = template->tuple_size; bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size; bi->tag_size = template->tag_size;
disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
} }
EXPORT_SYMBOL(blk_integrity_register); EXPORT_SYMBOL(blk_integrity_register);
@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_integrity_register);
*/ */
void blk_integrity_unregister(struct gendisk *disk) void blk_integrity_unregister(struct gendisk *disk)
{ {
disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
} }
EXPORT_SYMBOL(blk_integrity_unregister); EXPORT_SYMBOL(blk_integrity_unregister);

View file

@ -75,7 +75,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page) static ssize_t queue_ra_show(struct request_queue *q, char *page)
{ {
unsigned long ra_kb = q->backing_dev_info.ra_pages << unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_CACHE_SHIFT - 10); (PAGE_CACHE_SHIFT - 10);
return queue_var_show(ra_kb, (page)); return queue_var_show(ra_kb, (page));
@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0) if (ret < 0)
return ret; return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret; return ret;
} }
@ -578,7 +578,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q = struct request_queue *q =
container_of(kobj, struct request_queue, kobj); container_of(kobj, struct request_queue, kobj);
bdi_exit(&q->backing_dev_info); bdi_put(q->backing_dev_info);
blkcg_exit_queue(q); blkcg_exit_queue(q);
if (q->elevator) { if (q->elevator) {

View file

@ -611,7 +611,7 @@ void add_disk(struct gendisk *disk)
disk_alloc_events(disk); disk_alloc_events(disk);
/* Register BDI before referencing it from bdev */ /* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info; bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk)); bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL, blk_register_region(disk_devt(disk), disk->minors, NULL,
@ -646,6 +646,8 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_init(&piter, disk, disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) { while ((part = disk_part_iter_next(&piter))) {
bdev_unhash_inode(MKDEV(disk->major,
disk->first_minor + part->partno));
invalidate_partition(disk, part->partno); invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno); delete_partition(disk, part->partno);
} }
@ -661,7 +663,7 @@ void del_gendisk(struct gendisk *disk)
* Unregister bdi before releasing device numbers (as they can * Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs). * get reused and we'd get clashes in sysfs).
*/ */
bdi_unregister(&disk->queue->backing_dev_info); bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk); blk_unregister_queue(disk);
} else { } else {
WARN_ON(1); WARN_ON(1);

View file

@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd); WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP); WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe"; q->backing_dev_info->name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp; d->bufpool = mp;
d->blkq = gd->queue = q; d->blkq = gd->queue = q;
q->queuedata = d; q->queuedata = d;

View file

@ -2393,7 +2393,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) { if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev); q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits); r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device); put_ldev(device);
if (r) if (r)
reason = 'b'; reason = 'b';
@ -2765,8 +2765,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */ /* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev; device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested; q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info.congested_data = device; q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request); blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA); blk_queue_flush(q, REQ_FLUSH | REQ_FUA);

View file

@ -1170,11 +1170,11 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_stack_limits(q, b); blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
q->backing_dev_info.ra_pages, q->backing_dev_info->ra_pages,
b->backing_dev_info.ra_pages); b->backing_dev_info->ra_pages);
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; q->backing_dev_info->ra_pages = b->backing_dev_info->ra_pages;
} }
} }
} }

View file

@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i); seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else { } else {
/* reset device->congestion_reason */ /* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info); bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';

View file

@ -937,7 +937,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) { switch (rbm) {
case RB_CONGESTED_REMOTE: case RB_CONGESTED_REMOTE:
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi); return bdi_read_congested(bdi);
case RB_LEAST_PENDING: case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) > return atomic_read(&device->local_cnt) >

View file

@ -1276,7 +1276,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off); && pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
if (wakeup) { if (wakeup) {
clear_bdi_congested(&pd->disk->queue->backing_dev_info, clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC); BLK_RW_ASYNC);
} }
@ -2405,7 +2405,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (pd->write_congestion_on > 0 if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) { && pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do { do {
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ); congestion_wait(BLK_RW_ASYNC, HZ);

View file

@ -3780,7 +3780,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q; disk->queue = q;

View file

@ -1014,7 +1014,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev); struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0; int ret = 0;
if (bdi_congested(&q->backing_dev_info, bits)) if (bdi_congested(q->backing_dev_info, bits))
return 1; return 1;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
@ -1023,7 +1023,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
cached_dev_put(dc); cached_dev_put(dc);
@ -1037,7 +1037,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk; struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request; g->queue->make_request_fn = cached_dev_make_request;
g->queue->backing_dev_info.congested_fn = cached_dev_congested; g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl; dc->disk.ioctl = cached_dev_ioctl;
} }
@ -1130,7 +1130,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
@ -1141,7 +1141,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk; struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request; g->queue->make_request_fn = flash_dev_make_request;
g->queue->backing_dev_info.congested_fn = flash_dev_congested; g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss; d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl; d->ioctl = flash_dev_ioctl;
} }

View file

@ -802,7 +802,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL); blk_queue_make_request(q, NULL);
d->disk->queue = q; d->disk->queue = q;
q->queuedata = d; q->queuedata = d;
q->backing_dev_info.congested_data = d; q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX; q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX; q->limits.max_segment_size = UINT_MAX;
@ -1129,9 +1129,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk, set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset); dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
dc->disk.disk->queue->backing_dev_info.ra_pages = dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info.ra_pages, max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info.ra_pages); q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc); bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc); bch_cached_dev_writeback_init(dc);

View file

@ -2288,7 +2288,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits) static int is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View file

@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits) static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View file

@ -1660,7 +1660,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (likely(q)) if (likely(q))
r |= bdi_congested(&q->backing_dev_info, bdi_bits); r |= bdi_congested(q->backing_dev_info, bdi_bits);
else else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s", DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md), dm_device_name(t->md),

View file

@ -2634,7 +2634,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1; return 1;
q = bdev_get_queue(pt->data_dev->bdev); q = bdev_get_queue(pt->data_dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static void requeue_bios(struct pool *pool) static void requeue_bios(struct pool *pool)

View file

@ -2220,7 +2220,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* the query about congestion status of request_queue * the query about congestion status of request_queue
*/ */
if (dm_request_based(md)) if (dm_request_based(md))
r = md->queue->backing_dev_info.wb.state & r = md->queue->backing_dev_info->wb.state &
bdi_bits; bdi_bits;
else else
r = dm_table_any_congested(map, bdi_bits); r = dm_table_any_congested(map, bdi_bits);
@ -2302,7 +2302,7 @@ static void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used * - must do so here (in alloc_dev callchain) before queue is used
*/ */
md->queue->queuedata = md; md->queue->queuedata = md;
md->queue->backing_dev_info.congested_data = md; md->queue->backing_dev_info->congested_data = md;
} }
static void dm_init_old_md_queue(struct mapped_device *md) static void dm_init_old_md_queue(struct mapped_device *md)
@ -2313,7 +2313,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
/* /*
* Initialize aspects of queue that aren't relevant for blk-mq * Initialize aspects of queue that aren't relevant for blk-mq
*/ */
md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
} }

View file

@ -68,7 +68,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < conf->raid_disks && !ret ; i++) { for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
rcu_read_unlock(); rcu_read_unlock();

View file

@ -5284,8 +5284,8 @@ int md_run(struct mddev *mddev)
return err; return err;
} }
if (mddev->queue) { if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested; mddev->queue->backing_dev_info->congested_fn = md_congested;
} }
if (pers->sync_request) { if (pers->sync_request) {
if (mddev->kobj.sd && if (mddev->kobj.sd &&
@ -5642,7 +5642,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev); __md_stop_writes(mddev);
__md_stop(mddev); __md_stop(mddev);
mddev->queue->backing_dev_info.congested_fn = NULL; mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */ /* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);

View file

@ -166,7 +166,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the /* Just like multipath_map, we just check the
* first available device * first available device
*/ */

View file

@ -35,7 +35,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) { for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev); struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
} }
@ -415,8 +415,8 @@ static int raid0_run(struct mddev *mddev)
*/ */
int stripe = mddev->raid_disks * int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE; (mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info->ra_pages = 2* stripe;
} }
dump_zones(mddev); dump_zones(mddev);

View file

@ -730,9 +730,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed * non-congested targets, it can be removed
*/ */
if ((bits & (1 << WB_async_congested)) || 1) if ((bits & (1 << WB_async_congested)) || 1)
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
else else
ret &= bdi_congested(&q->backing_dev_info, bits); ret &= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();

View file

@ -838,7 +838,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
@ -3698,8 +3698,8 @@ static int run(struct mddev *mddev)
* maybe... * maybe...
*/ */
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
if (md_integrity_register(mddev)) if (md_integrity_register(mddev))
@ -4493,8 +4493,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks * int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
conf->fullsync = 0; conf->fullsync = 0;
} }

View file

@ -6123,10 +6123,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev); mddev_suspend(mddev);
conf->skip_copy = new; conf->skip_copy = new;
if (new) if (new)
mddev->queue->backing_dev_info.capabilities |= mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
else else
mddev->queue->backing_dev_info.capabilities &= mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES; ~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev); mddev_resume(mddev);
} }
@ -6970,8 +6970,8 @@ static int run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE); ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
@ -7552,8 +7552,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9) int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
} }
} }

View file

@ -532,6 +532,7 @@ static void init_once(void *foo)
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks); INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif #endif
bdev->bd_bdi = &noop_backing_dev_info;
inode_init_once(&ei->vfs_inode); inode_init_once(&ei->vfs_inode);
/* Initialize mutex for freeze. */ /* Initialize mutex for freeze. */
mutex_init(&bdev->bd_fsfreeze_mutex); mutex_init(&bdev->bd_fsfreeze_mutex);
@ -557,6 +558,10 @@ static void bdev_evict_inode(struct inode *inode)
} }
list_del_init(&bdev->bd_list); list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock); spin_unlock(&bdev_lock);
if (bdev->bd_bdi != &noop_backing_dev_info) {
bdi_put(bdev->bd_bdi);
bdev->bd_bdi = &noop_backing_dev_info;
}
} }
static const struct super_operations bdev_sops = { static const struct super_operations bdev_sops = {
@ -623,6 +628,21 @@ static int bdev_set(struct inode *inode, void *data)
static LIST_HEAD(all_bdevs); static LIST_HEAD(all_bdevs);
/*
* If there is a bdev inode for this device, unhash it so that it gets evicted
* as soon as last inode reference is dropped.
*/
void bdev_unhash_inode(dev_t dev)
{
struct inode *inode;
inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
if (inode) {
remove_inode_hash(inode);
iput(inode);
}
}
struct block_device *bdget(dev_t dev) struct block_device *bdget(dev_t dev)
{ {
struct block_device *bdev; struct block_device *bdev;
@ -1201,6 +1221,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk; bdev->bd_disk = disk;
bdev->bd_queue = disk->queue; bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev; bdev->bd_contains = bdev;
if (bdev->bd_bdi == &noop_backing_dev_info)
bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0; bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
if (!partno) { if (!partno) {
ret = -ENXIO; ret = -ENXIO;
@ -1302,6 +1325,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL; bdev->bd_disk = NULL;
bdev->bd_part = NULL; bdev->bd_part = NULL;
bdev->bd_queue = NULL; bdev->bd_queue = NULL;
bdi_put(bdev->bd_bdi);
bdev->bd_bdi = &noop_backing_dev_info;
if (bdev != bdev->bd_contains) if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1); __blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL; bdev->bd_contains = NULL;

View file

@ -1222,7 +1222,7 @@ static int set_gfs2_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can * We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super() * overwrite this in ->fill_super()
*/ */
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }

View file

@ -1079,7 +1079,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1; sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX; sb->s_max_links = NILFS_LINK_MAX;
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb); err = load_nilfs(nilfs, sb);
if (err) if (err)

View file

@ -968,7 +968,7 @@ static int set_bdev_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can * We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super() * overwrite this in ->fill_super()
*/ */
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }

View file

@ -10,6 +10,7 @@
#include <linux/flex_proportions.h> #include <linux/flex_proportions.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kref.h>
struct page; struct page;
struct device; struct device;
@ -141,6 +142,7 @@ struct backing_dev_info {
void *congested_data; /* Pointer to aux data for congested func */ void *congested_data; /* Pointer to aux data for congested func */
char *name; char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int min_ratio; unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac; unsigned int max_ratio, max_prop_frac;

View file

@ -18,7 +18,14 @@
#include <linux/slab.h> #include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi); int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_exit(struct backing_dev_info *bdi);
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
return bdi;
}
void bdi_put(struct backing_dev_info *bdi);
__printf(3, 4) __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_destroy(struct backing_dev_info *bdi); void bdi_destroy(struct backing_dev_info *bdi);
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason); bool range_cyclic, enum wb_reason reason);

View file

@ -332,7 +332,7 @@ struct request_queue {
*/ */
struct delayed_work delay_work; struct delayed_work delay_work;
struct backing_dev_info backing_dev_info; struct backing_dev_info *backing_dev_info;
/* /*
* The queue owner gets to use this for whatever they like. * The queue owner gets to use this for whatever they like.

View file

@ -481,6 +481,7 @@ struct block_device {
int bd_invalidated; int bd_invalidated;
struct gendisk * bd_disk; struct gendisk * bd_disk;
struct request_queue * bd_queue; struct request_queue * bd_queue;
struct backing_dev_info *bd_bdi;
struct list_head bd_list; struct list_head bd_list;
/* /*
* Private data. You must have bd_claim'ed the block_device * Private data. You must have bd_claim'ed the block_device
@ -2331,6 +2332,7 @@ extern struct kmem_cache *names_cachep;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *); extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *);
extern void bdev_unhash_inode(dev_t dev);
extern struct block_device *bdget(dev_t); extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev); extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size); extern void bd_set_size(struct block_device *, loff_t size);

View file

@ -237,6 +237,7 @@ static __init int bdi_class_init(void)
bdi_class->dev_groups = bdi_dev_groups; bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init(); bdi_debug_init();
return 0; return 0;
} }
postcore_initcall(bdi_class_init); postcore_initcall(bdi_class_init);
@ -780,6 +781,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->dev = NULL; bdi->dev = NULL;
kref_init(&bdi->refcnt);
bdi->min_ratio = 0; bdi->min_ratio = 0;
bdi->max_ratio = 100; bdi->max_ratio = 100;
bdi->max_prop_frac = FPROP_FRAC_BASE; bdi->max_prop_frac = FPROP_FRAC_BASE;
@ -795,6 +797,22 @@ int bdi_init(struct backing_dev_info *bdi)
} }
EXPORT_SYMBOL(bdi_init); EXPORT_SYMBOL(bdi_init);
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
{
struct backing_dev_info *bdi;
bdi = kmalloc_node(sizeof(struct backing_dev_info),
gfp_mask | __GFP_ZERO, node_id);
if (!bdi)
return NULL;
if (bdi_init(bdi)) {
kfree(bdi);
return NULL;
}
return bdi;
}
int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...) const char *fmt, ...)
{ {
@ -875,12 +893,26 @@ void bdi_unregister(struct backing_dev_info *bdi)
} }
} }
void bdi_exit(struct backing_dev_info *bdi) static void bdi_exit(struct backing_dev_info *bdi)
{ {
WARN_ON_ONCE(bdi->dev); WARN_ON_ONCE(bdi->dev);
wb_exit(&bdi->wb); wb_exit(&bdi->wb);
} }
static void release_bdi(struct kref *ref)
{
struct backing_dev_info *bdi =
container_of(ref, struct backing_dev_info, refcnt);
bdi_exit(bdi);
kfree(bdi);
}
void bdi_put(struct backing_dev_info *bdi)
{
kref_put(&bdi->refcnt, release_bdi);
}
void bdi_destroy(struct backing_dev_info *bdi) void bdi_destroy(struct backing_dev_info *bdi)
{ {
bdi_unregister(bdi); bdi_unregister(bdi);

View file

@ -1993,11 +1993,11 @@ void laptop_mode_timer_fn(unsigned long data)
* We want to write everything out, not just down to the dirty * We want to write everything out, not just down to the dirty
* threshold * threshold
*/ */
if (!bdi_has_dirty_io(&q->backing_dev_info)) if (!bdi_has_dirty_io(q->backing_dev_info))
return; return;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
if (wb_has_dirty_io(wb)) if (wb_has_dirty_io(wb))
wb_start_writeback(wb, nr_pages, true, wb_start_writeback(wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER); WB_REASON_LAPTOP_TIMER);