block: Use pointer to backing_dev_info from request_queue
We will want to have struct backing_dev_info allocated separately from struct request_queue. As the first step add pointer to backing_dev_info to request_queue and convert all users touching it. No functional changes in this patch. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@fb.com> Change-Id: I77fbb181de7e39c83fbfba8cfb128d6ace161f31 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git Git-commit: 97419acd22a0bacc52dbc34d5bbc96d315e48acb [riteshh@codeaurora.org: resolved merge conflicts] Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
This commit is contained in:
parent
393737a28b
commit
1c6dd64534
31 changed files with 81 additions and 79 deletions
|
@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
goto err_free_blkg;
|
||||
}
|
||||
|
||||
wb_congested = wb_congested_get_create(&q->backing_dev_info,
|
||||
wb_congested = wb_congested_get_create(q->backing_dev_info,
|
||||
blkcg->css.id, GFP_NOWAIT);
|
||||
if (!wb_congested) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -468,8 +468,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
|
|||
const char *blkg_dev_name(struct blkcg_gq *blkg)
|
||||
{
|
||||
/* some drivers (floppy) instantiate a queue w/o disk registered */
|
||||
if (blkg->q->backing_dev_info.dev)
|
||||
return dev_name(blkg->q->backing_dev_info.dev);
|
||||
if (blkg->q->backing_dev_info->dev)
|
||||
return dev_name(blkg->q->backing_dev_info->dev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_dev_name);
|
||||
|
|
|
@ -87,7 +87,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
|
|||
* flip its congestion state for events on other blkcgs.
|
||||
*/
|
||||
if (rl == &rl->q->root_rl)
|
||||
clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
||||
clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
|
|||
#else
|
||||
/* see blk_clear_congested() */
|
||||
if (rl == &rl->q->root_rl)
|
||||
set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
||||
set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
|
|||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
return &q->backing_dev_info;
|
||||
return q->backing_dev_info;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_backing_dev_info);
|
||||
|
||||
|
@ -597,7 +597,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
blk_flush_integrity();
|
||||
|
||||
/* @q won't process any more request, flush async actions */
|
||||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
|
||||
blk_sync_queue(q);
|
||||
|
||||
if (q->mq_ops)
|
||||
|
@ -712,17 +712,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
if (!q->bio_split)
|
||||
goto fail_id;
|
||||
|
||||
q->backing_dev_info.ra_pages =
|
||||
q->backing_dev_info = &q->_backing_dev_info;
|
||||
q->backing_dev_info->ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info.name = "block";
|
||||
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info->name = "block";
|
||||
q->node = node_id;
|
||||
|
||||
err = bdi_init(&q->backing_dev_info);
|
||||
err = bdi_init(q->backing_dev_info);
|
||||
if (err)
|
||||
goto fail_split;
|
||||
|
||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, (unsigned long) q);
|
||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
|
@ -772,7 +773,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
fail_ref:
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
fail_bdi:
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
bdi_destroy(q->backing_dev_info);
|
||||
fail_split:
|
||||
bioset_free(q->bio_split);
|
||||
fail_id:
|
||||
|
@ -1195,7 +1196,7 @@ fail_elvpriv:
|
|||
* disturb iosched and blkcg but weird is bettern than dead.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
|
||||
__func__, dev_name(q->backing_dev_info.dev));
|
||||
__func__, dev_name(q->backing_dev_info->dev));
|
||||
|
||||
rq->cmd_flags &= ~REQ_ELVPRIV;
|
||||
rq->elv.icq = NULL;
|
||||
|
@ -3251,7 +3252,7 @@ void blk_finish_request(struct request *req, int error)
|
|||
BUG_ON(blk_queued_rq(req));
|
||||
|
||||
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
|
||||
laptop_io_completion(&req->q->backing_dev_info);
|
||||
laptop_io_completion(req->q->backing_dev_info);
|
||||
|
||||
blk_delete_timer(req);
|
||||
|
||||
|
|
|
@ -418,7 +418,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
|
|||
bi->tuple_size = template->tuple_size;
|
||||
bi->tag_size = template->tag_size;
|
||||
|
||||
disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_register);
|
||||
|
||||
|
@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_integrity_register);
|
|||
*/
|
||||
void blk_integrity_unregister(struct gendisk *disk)
|
||||
{
|
||||
disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||
|
|
|
@ -75,7 +75,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
|||
|
||||
static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
||||
unsigned long ra_kb = q->backing_dev_info->ra_pages <<
|
||||
(PAGE_CACHE_SHIFT - 10);
|
||||
|
||||
return queue_var_show(ra_kb, (page));
|
||||
|
@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
||||
q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
struct request_queue *q =
|
||||
container_of(kobj, struct request_queue, kobj);
|
||||
|
||||
bdi_exit(&q->backing_dev_info);
|
||||
bdi_exit(q->backing_dev_info);
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
|
|
|
@ -611,7 +611,7 @@ void add_disk(struct gendisk *disk)
|
|||
disk_alloc_events(disk);
|
||||
|
||||
/* Register BDI before referencing it from bdev */
|
||||
bdi = &disk->queue->backing_dev_info;
|
||||
bdi = disk->queue->backing_dev_info;
|
||||
bdi_register_owner(bdi, disk_to_dev(disk));
|
||||
|
||||
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
||||
|
@ -663,7 +663,7 @@ void del_gendisk(struct gendisk *disk)
|
|||
* Unregister bdi before releasing device numbers (as they can
|
||||
* get reused and we'd get clashes in sysfs).
|
||||
*/
|
||||
bdi_unregister(&disk->queue->backing_dev_info);
|
||||
bdi_unregister(disk->queue->backing_dev_info);
|
||||
blk_unregister_queue(disk);
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
|
|
|
@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
|
|||
WARN_ON(d->gd);
|
||||
WARN_ON(d->flags & DEVFL_UP);
|
||||
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
|
||||
q->backing_dev_info.name = "aoe";
|
||||
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
|
||||
q->backing_dev_info->name = "aoe";
|
||||
q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
|
||||
d->bufpool = mp;
|
||||
d->blkq = gd->queue = q;
|
||||
q->queuedata = d;
|
||||
|
|
|
@ -2393,7 +2393,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
|
|||
|
||||
if (get_ldev(device)) {
|
||||
q = bdev_get_queue(device->ldev->backing_bdev);
|
||||
r = bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
r = bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
put_ldev(device);
|
||||
if (r)
|
||||
reason = 'b';
|
||||
|
@ -2765,8 +2765,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
/* we have no partitions. we contain only ourselves. */
|
||||
device->this_bdev->bd_contains = device->this_bdev;
|
||||
|
||||
q->backing_dev_info.congested_fn = drbd_congested;
|
||||
q->backing_dev_info.congested_data = device;
|
||||
q->backing_dev_info->congested_fn = drbd_congested;
|
||||
q->backing_dev_info->congested_data = device;
|
||||
|
||||
blk_queue_make_request(q, drbd_make_request);
|
||||
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
|
||||
|
|
|
@ -1170,11 +1170,11 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
|||
|
||||
blk_queue_stack_limits(q, b);
|
||||
|
||||
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
|
||||
if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) {
|
||||
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
|
||||
q->backing_dev_info.ra_pages,
|
||||
b->backing_dev_info.ra_pages);
|
||||
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
|
||||
q->backing_dev_info->ra_pages,
|
||||
b->backing_dev_info->ra_pages);
|
||||
q->backing_dev_info->ra_pages = b->backing_dev_info->ra_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
|
||||
} else {
|
||||
/* reset device->congestion_reason */
|
||||
bdi_rw_congested(&device->rq_queue->backing_dev_info);
|
||||
bdi_rw_congested(device->rq_queue->backing_dev_info);
|
||||
|
||||
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
|
||||
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
|
||||
|
|
|
@ -937,7 +937,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
|
|||
|
||||
switch (rbm) {
|
||||
case RB_CONGESTED_REMOTE:
|
||||
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
|
||||
bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
|
||||
return bdi_read_congested(bdi);
|
||||
case RB_LEAST_PENDING:
|
||||
return atomic_read(&device->local_cnt) >
|
||||
|
|
|
@ -1276,7 +1276,7 @@ try_next_bio:
|
|||
&& pd->bio_queue_size <= pd->write_congestion_off);
|
||||
spin_unlock(&pd->lock);
|
||||
if (wakeup) {
|
||||
clear_bdi_congested(&pd->disk->queue->backing_dev_info,
|
||||
clear_bdi_congested(pd->disk->queue->backing_dev_info,
|
||||
BLK_RW_ASYNC);
|
||||
}
|
||||
|
||||
|
@ -2405,7 +2405,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
|||
spin_lock(&pd->lock);
|
||||
if (pd->write_congestion_on > 0
|
||||
&& pd->bio_queue_size >= pd->write_congestion_on) {
|
||||
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
|
||||
set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
|
||||
do {
|
||||
spin_unlock(&pd->lock);
|
||||
congestion_wait(BLK_RW_ASYNC, HZ);
|
||||
|
|
|
@ -3780,7 +3780,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
|||
q->limits.discard_zeroes_data = 1;
|
||||
|
||||
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
|
||||
q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
|
||||
disk->queue = q;
|
||||
|
||||
|
|
|
@ -1014,7 +1014,7 @@ static int cached_dev_congested(void *data, int bits)
|
|||
struct request_queue *q = bdev_get_queue(dc->bdev);
|
||||
int ret = 0;
|
||||
|
||||
if (bdi_congested(&q->backing_dev_info, bits))
|
||||
if (bdi_congested(q->backing_dev_info, bits))
|
||||
return 1;
|
||||
|
||||
if (cached_dev_get(dc)) {
|
||||
|
@ -1023,7 +1023,7 @@ static int cached_dev_congested(void *data, int bits)
|
|||
|
||||
for_each_cache(ca, d->c, i) {
|
||||
q = bdev_get_queue(ca->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
cached_dev_put(dc);
|
||||
|
@ -1037,7 +1037,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
|
|||
struct gendisk *g = dc->disk.disk;
|
||||
|
||||
g->queue->make_request_fn = cached_dev_make_request;
|
||||
g->queue->backing_dev_info.congested_fn = cached_dev_congested;
|
||||
g->queue->backing_dev_info->congested_fn = cached_dev_congested;
|
||||
dc->disk.cache_miss = cached_dev_cache_miss;
|
||||
dc->disk.ioctl = cached_dev_ioctl;
|
||||
}
|
||||
|
@ -1130,7 +1130,7 @@ static int flash_dev_congested(void *data, int bits)
|
|||
|
||||
for_each_cache(ca, d->c, i) {
|
||||
q = bdev_get_queue(ca->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1141,7 +1141,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
|
|||
struct gendisk *g = d->disk;
|
||||
|
||||
g->queue->make_request_fn = flash_dev_make_request;
|
||||
g->queue->backing_dev_info.congested_fn = flash_dev_congested;
|
||||
g->queue->backing_dev_info->congested_fn = flash_dev_congested;
|
||||
d->cache_miss = flash_dev_cache_miss;
|
||||
d->ioctl = flash_dev_ioctl;
|
||||
}
|
||||
|
|
|
@ -802,7 +802,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
|||
blk_queue_make_request(q, NULL);
|
||||
d->disk->queue = q;
|
||||
q->queuedata = d;
|
||||
q->backing_dev_info.congested_data = d;
|
||||
q->backing_dev_info->congested_data = d;
|
||||
q->limits.max_hw_sectors = UINT_MAX;
|
||||
q->limits.max_sectors = UINT_MAX;
|
||||
q->limits.max_segment_size = UINT_MAX;
|
||||
|
@ -1129,9 +1129,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
|
|||
set_capacity(dc->disk.disk,
|
||||
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
|
||||
|
||||
dc->disk.disk->queue->backing_dev_info.ra_pages =
|
||||
max(dc->disk.disk->queue->backing_dev_info.ra_pages,
|
||||
q->backing_dev_info.ra_pages);
|
||||
dc->disk.disk->queue->backing_dev_info->ra_pages =
|
||||
max(dc->disk.disk->queue->backing_dev_info->ra_pages,
|
||||
q->backing_dev_info->ra_pages);
|
||||
|
||||
bch_cached_dev_request_init(dc);
|
||||
bch_cached_dev_writeback_init(dc);
|
||||
|
|
|
@ -2288,7 +2288,7 @@ static void do_waker(struct work_struct *ws)
|
|||
static int is_congested(struct dm_dev *dev, int bdi_bits)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
||||
|
|
|
@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
|
|||
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
||||
|
|
|
@ -1660,7 +1660,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
|
|||
char b[BDEVNAME_SIZE];
|
||||
|
||||
if (likely(q))
|
||||
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
r |= bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
else
|
||||
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
|
||||
dm_device_name(t->md),
|
||||
|
|
|
@ -2634,7 +2634,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
|||
return 1;
|
||||
|
||||
q = bdev_get_queue(pt->data_dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static void requeue_bios(struct pool *pool)
|
||||
|
|
|
@ -2220,7 +2220,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
|
|||
* the query about congestion status of request_queue
|
||||
*/
|
||||
if (dm_request_based(md))
|
||||
r = md->queue->backing_dev_info.wb.state &
|
||||
r = md->queue->backing_dev_info->wb.state &
|
||||
bdi_bits;
|
||||
else
|
||||
r = dm_table_any_congested(map, bdi_bits);
|
||||
|
@ -2302,7 +2302,7 @@ static void dm_init_md_queue(struct mapped_device *md)
|
|||
* - must do so here (in alloc_dev callchain) before queue is used
|
||||
*/
|
||||
md->queue->queuedata = md;
|
||||
md->queue->backing_dev_info.congested_data = md;
|
||||
md->queue->backing_dev_info->congested_data = md;
|
||||
}
|
||||
|
||||
static void dm_init_old_md_queue(struct mapped_device *md)
|
||||
|
@ -2313,7 +2313,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
|
|||
/*
|
||||
* Initialize aspects of queue that aren't relevant for blk-mq
|
||||
*/
|
||||
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
||||
md->queue->backing_dev_info->congested_fn = dm_any_congested;
|
||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ static int linear_congested(struct mddev *mddev, int bits)
|
|||
|
||||
for (i = 0; i < conf->raid_disks && !ret ; i++) {
|
||||
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -5284,8 +5284,8 @@ int md_run(struct mddev *mddev)
|
|||
return err;
|
||||
}
|
||||
if (mddev->queue) {
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
mddev->queue->backing_dev_info.congested_fn = md_congested;
|
||||
mddev->queue->backing_dev_info->congested_data = mddev;
|
||||
mddev->queue->backing_dev_info->congested_fn = md_congested;
|
||||
}
|
||||
if (pers->sync_request) {
|
||||
if (mddev->kobj.sd &&
|
||||
|
@ -5642,7 +5642,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
|||
|
||||
__md_stop_writes(mddev);
|
||||
__md_stop(mddev);
|
||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||
mddev->queue->backing_dev_info->congested_fn = NULL;
|
||||
|
||||
/* tell userspace to handle 'inactive' */
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
|
|
|
@ -166,7 +166,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
|
|||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
/* Just like multipath_map, we just check the
|
||||
* first available device
|
||||
*/
|
||||
|
|
|
@ -35,7 +35,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
|
|||
for (i = 0; i < raid_disks && !ret ; i++) {
|
||||
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -415,8 +415,8 @@ static int raid0_run(struct mddev *mddev)
|
|||
*/
|
||||
int stripe = mddev->raid_disks *
|
||||
(mddev->chunk_sectors << 9) / PAGE_SIZE;
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2* stripe;
|
||||
}
|
||||
|
||||
dump_zones(mddev);
|
||||
|
|
|
@ -730,9 +730,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
|
|||
* non-congested targets, it can be removed
|
||||
*/
|
||||
if ((bits & (1 << WB_async_congested)) || 1)
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
else
|
||||
ret &= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret &= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -838,7 +838,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
|
|||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -3698,8 +3698,8 @@ static int run(struct mddev *mddev)
|
|||
* maybe...
|
||||
*/
|
||||
stripe /= conf->geo.near_copies;
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
|
||||
if (md_integrity_register(mddev))
|
||||
|
@ -4493,8 +4493,8 @@ static void end_reshape(struct r10conf *conf)
|
|||
int stripe = conf->geo.raid_disks *
|
||||
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
|
||||
stripe /= conf->geo.near_copies;
|
||||
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
conf->fullsync = 0;
|
||||
}
|
||||
|
|
|
@ -6123,10 +6123,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
|
|||
mddev_suspend(mddev);
|
||||
conf->skip_copy = new;
|
||||
if (new)
|
||||
mddev->queue->backing_dev_info.capabilities |=
|
||||
mddev->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
else
|
||||
mddev->queue->backing_dev_info.capabilities &=
|
||||
mddev->queue->backing_dev_info->capabilities &=
|
||||
~BDI_CAP_STABLE_WRITES;
|
||||
mddev_resume(mddev);
|
||||
}
|
||||
|
@ -6970,8 +6970,8 @@ static int run(struct mddev *mddev)
|
|||
int data_disks = conf->previous_raid_disks - conf->max_degraded;
|
||||
int stripe = data_disks *
|
||||
((mddev->chunk_sectors << 9) / PAGE_SIZE);
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
|
||||
chunk_size = mddev->chunk_sectors << 9;
|
||||
blk_queue_io_min(mddev->queue, chunk_size);
|
||||
|
@ -7552,8 +7552,8 @@ static void end_reshape(struct r5conf *conf)
|
|||
int data_disks = conf->raid_disks - conf->max_degraded;
|
||||
int stripe = data_disks * ((conf->chunk_sectors << 9)
|
||||
/ PAGE_SIZE);
|
||||
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1222,7 +1222,7 @@ static int set_gfs2_super(struct super_block *s, void *data)
|
|||
* We set the bdi here to the queue backing, file systems can
|
||||
* overwrite this in ->fill_super()
|
||||
*/
|
||||
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
|
||||
s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1079,7 +1079,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_time_gran = 1;
|
||||
sb->s_max_links = NILFS_LINK_MAX;
|
||||
|
||||
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
|
||||
sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
|
||||
|
||||
err = load_nilfs(nilfs, sb);
|
||||
if (err)
|
||||
|
|
|
@ -968,7 +968,7 @@ static int set_bdev_super(struct super_block *s, void *data)
|
|||
* We set the bdi here to the queue backing, file systems can
|
||||
* overwrite this in ->fill_super()
|
||||
*/
|
||||
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
|
||||
s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -332,7 +332,8 @@ struct request_queue {
|
|||
*/
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct backing_dev_info backing_dev_info;
|
||||
struct backing_dev_info *backing_dev_info;
|
||||
struct backing_dev_info _backing_dev_info;
|
||||
|
||||
/*
|
||||
* The queue owner gets to use this for whatever they like.
|
||||
|
|
|
@ -1993,11 +1993,11 @@ void laptop_mode_timer_fn(unsigned long data)
|
|||
* We want to write everything out, not just down to the dirty
|
||||
* threshold
|
||||
*/
|
||||
if (!bdi_has_dirty_io(&q->backing_dev_info))
|
||||
if (!bdi_has_dirty_io(q->backing_dev_info))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
|
||||
list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
|
||||
if (wb_has_dirty_io(wb))
|
||||
wb_start_writeback(wb, nr_pages, true,
|
||||
WB_REASON_LAPTOP_TIMER);
|
||||
|
|
Loading…
Add table
Reference in a new issue