Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: cfq-iosched: Do not idle on async queues blk-cgroup: Fix potential deadlock in blk-cgroup block: fix bugs in bio-integrity mempool usage block: fix bio_add_page for non trivial merge_bvec_fn case drbd: null dereference bug drbd: fix max_segment_size initialization
This commit is contained in:
commit
1a45dcfe25
6 changed files with 13 additions and 7 deletions
|
@ -147,16 +147,16 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
blkcg = cgroup_to_blkio_cgroup(cgroup);
|
blkcg = cgroup_to_blkio_cgroup(cgroup);
|
||||||
|
spin_lock(&blkio_list_lock);
|
||||||
spin_lock_irq(&blkcg->lock);
|
spin_lock_irq(&blkcg->lock);
|
||||||
blkcg->weight = (unsigned int)val;
|
blkcg->weight = (unsigned int)val;
|
||||||
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
|
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
|
||||||
spin_lock(&blkio_list_lock);
|
|
||||||
list_for_each_entry(blkiop, &blkio_list, list)
|
list_for_each_entry(blkiop, &blkio_list, list)
|
||||||
blkiop->ops.blkio_update_group_weight_fn(blkg,
|
blkiop->ops.blkio_update_group_weight_fn(blkg,
|
||||||
blkcg->weight);
|
blkcg->weight);
|
||||||
spin_unlock(&blkio_list_lock);
|
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&blkcg->lock);
|
spin_unlock_irq(&blkcg->lock);
|
||||||
|
spin_unlock(&blkio_list_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1803,7 +1803,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
* Otherwise, we do only if they are the last ones
|
* Otherwise, we do only if they are the last ones
|
||||||
* in their service tree.
|
* in their service tree.
|
||||||
*/
|
*/
|
||||||
return service_tree->count == 1;
|
return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
||||||
|
|
|
@ -2973,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
|
||||||
goto out_no_q;
|
goto out_no_q;
|
||||||
mdev->rq_queue = q;
|
mdev->rq_queue = q;
|
||||||
q->queuedata = mdev;
|
q->queuedata = mdev;
|
||||||
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
|
|
||||||
|
|
||||||
disk = alloc_disk(1);
|
disk = alloc_disk(1);
|
||||||
if (!disk)
|
if (!disk)
|
||||||
|
@ -2997,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
|
||||||
q->backing_dev_info.congested_data = mdev;
|
q->backing_dev_info.congested_data = mdev;
|
||||||
|
|
||||||
blk_queue_make_request(q, drbd_make_request_26);
|
blk_queue_make_request(q, drbd_make_request_26);
|
||||||
|
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
|
||||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||||
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
||||||
q->queue_lock = &mdev->req_lock; /* needed since we use */
|
q->queue_lock = &mdev->req_lock; /* needed since we use */
|
||||||
|
|
|
@ -1224,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
|
||||||
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
||||||
if (!epoch) {
|
if (!epoch) {
|
||||||
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
|
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
|
||||||
issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
|
issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
|
||||||
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
||||||
if (issue_flush) {
|
if (issue_flush) {
|
||||||
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
|
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
|
||||||
|
|
|
@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
|
||||||
|
|
||||||
static inline int use_bip_pool(unsigned int idx)
|
static inline int use_bip_pool(unsigned int idx)
|
||||||
{
|
{
|
||||||
if (idx == BIOVEC_NR_POOLS)
|
if (idx == BIOVEC_MAX_IDX)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
|
||||||
|
|
||||||
/* Use mempool if lower order alloc failed or max vecs were requested */
|
/* Use mempool if lower order alloc failed or max vecs were requested */
|
||||||
if (bip == NULL) {
|
if (bip == NULL) {
|
||||||
|
idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
|
||||||
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
|
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
|
||||||
|
|
||||||
if (unlikely(bip == NULL)) {
|
if (unlikely(bip == NULL)) {
|
||||||
|
|
7
fs/bio.c
7
fs/bio.c
|
@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||||
|
|
||||||
if (page == prev->bv_page &&
|
if (page == prev->bv_page &&
|
||||||
offset == prev->bv_offset + prev->bv_len) {
|
offset == prev->bv_offset + prev->bv_len) {
|
||||||
|
unsigned int prev_bv_len = prev->bv_len;
|
||||||
prev->bv_len += len;
|
prev->bv_len += len;
|
||||||
|
|
||||||
if (q->merge_bvec_fn) {
|
if (q->merge_bvec_fn) {
|
||||||
struct bvec_merge_data bvm = {
|
struct bvec_merge_data bvm = {
|
||||||
|
/* prev_bvec is already charged in
|
||||||
|
bi_size, discharge it in order to
|
||||||
|
simulate merging updated prev_bvec
|
||||||
|
as new bvec. */
|
||||||
.bi_bdev = bio->bi_bdev,
|
.bi_bdev = bio->bi_bdev,
|
||||||
.bi_sector = bio->bi_sector,
|
.bi_sector = bio->bi_sector,
|
||||||
.bi_size = bio->bi_size,
|
.bi_size = bio->bi_size - prev_bv_len,
|
||||||
.bi_rw = bio->bi_rw,
|
.bi_rw = bio->bi_rw,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue