Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull Ceph fixes from Sage Weil: "Just two small items from Ilya: The first patch fixes the RBD readahead to grab full objects. The second fixes the write ops to prevent undue promotion when a cache tier is configured on the server side" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: rbd: use writefull op for object size writes rbd: set max_sectors explicitly
This commit is contained in:
commit
59bcce1216
2 changed files with 17 additions and 6 deletions
|
@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
||||||
rbd_osd_read_callback(obj_request);
|
rbd_osd_read_callback(obj_request);
|
||||||
break;
|
break;
|
||||||
case CEPH_OSD_OP_SETALLOCHINT:
|
case CEPH_OSD_OP_SETALLOCHINT:
|
||||||
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
|
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
|
||||||
|
osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case CEPH_OSD_OP_WRITE:
|
case CEPH_OSD_OP_WRITE:
|
||||||
|
case CEPH_OSD_OP_WRITEFULL:
|
||||||
rbd_osd_write_callback(obj_request);
|
rbd_osd_write_callback(obj_request);
|
||||||
break;
|
break;
|
||||||
case CEPH_OSD_OP_STAT:
|
case CEPH_OSD_OP_STAT:
|
||||||
|
@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
|
||||||
opcode = CEPH_OSD_OP_ZERO;
|
opcode = CEPH_OSD_OP_ZERO;
|
||||||
}
|
}
|
||||||
} else if (op_type == OBJ_OP_WRITE) {
|
} else if (op_type == OBJ_OP_WRITE) {
|
||||||
opcode = CEPH_OSD_OP_WRITE;
|
if (!offset && length == object_size)
|
||||||
|
opcode = CEPH_OSD_OP_WRITEFULL;
|
||||||
|
else
|
||||||
|
opcode = CEPH_OSD_OP_WRITE;
|
||||||
osd_req_op_alloc_hint_init(osd_request, num_ops,
|
osd_req_op_alloc_hint_init(osd_request, num_ops,
|
||||||
object_size, object_size);
|
object_size, object_size);
|
||||||
num_ops++;
|
num_ops++;
|
||||||
|
@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
||||||
/* set io sizes to object size */
|
/* set io sizes to object size */
|
||||||
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
||||||
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
||||||
|
q->limits.max_sectors = queue_max_hw_sectors(q);
|
||||||
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
||||||
blk_queue_max_segment_size(q, segment_size);
|
blk_queue_max_segment_size(q, segment_size);
|
||||||
blk_queue_io_min(q, segment_size);
|
blk_queue_io_min(q, segment_size);
|
||||||
|
|
|
@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
|
||||||
switch (op->op) {
|
switch (op->op) {
|
||||||
case CEPH_OSD_OP_READ:
|
case CEPH_OSD_OP_READ:
|
||||||
case CEPH_OSD_OP_WRITE:
|
case CEPH_OSD_OP_WRITE:
|
||||||
|
case CEPH_OSD_OP_WRITEFULL:
|
||||||
ceph_osd_data_release(&op->extent.osd_data);
|
ceph_osd_data_release(&op->extent.osd_data);
|
||||||
break;
|
break;
|
||||||
case CEPH_OSD_OP_CALL:
|
case CEPH_OSD_OP_CALL:
|
||||||
|
@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
|
||||||
size_t payload_len = 0;
|
size_t payload_len = 0;
|
||||||
|
|
||||||
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
|
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
|
||||||
opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
|
opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
|
||||||
|
opcode != CEPH_OSD_OP_TRUNCATE);
|
||||||
|
|
||||||
op->extent.offset = offset;
|
op->extent.offset = offset;
|
||||||
op->extent.length = length;
|
op->extent.length = length;
|
||||||
op->extent.truncate_size = truncate_size;
|
op->extent.truncate_size = truncate_size;
|
||||||
op->extent.truncate_seq = truncate_seq;
|
op->extent.truncate_seq = truncate_seq;
|
||||||
if (opcode == CEPH_OSD_OP_WRITE)
|
if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
|
||||||
payload_len += length;
|
payload_len += length;
|
||||||
|
|
||||||
op->payload_len = payload_len;
|
op->payload_len = payload_len;
|
||||||
|
@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
|
||||||
break;
|
break;
|
||||||
case CEPH_OSD_OP_READ:
|
case CEPH_OSD_OP_READ:
|
||||||
case CEPH_OSD_OP_WRITE:
|
case CEPH_OSD_OP_WRITE:
|
||||||
|
case CEPH_OSD_OP_WRITEFULL:
|
||||||
case CEPH_OSD_OP_ZERO:
|
case CEPH_OSD_OP_ZERO:
|
||||||
case CEPH_OSD_OP_TRUNCATE:
|
case CEPH_OSD_OP_TRUNCATE:
|
||||||
if (src->op == CEPH_OSD_OP_WRITE)
|
if (src->op == CEPH_OSD_OP_WRITE ||
|
||||||
|
src->op == CEPH_OSD_OP_WRITEFULL)
|
||||||
request_data_len = src->extent.length;
|
request_data_len = src->extent.length;
|
||||||
dst->extent.offset = cpu_to_le64(src->extent.offset);
|
dst->extent.offset = cpu_to_le64(src->extent.offset);
|
||||||
dst->extent.length = cpu_to_le64(src->extent.length);
|
dst->extent.length = cpu_to_le64(src->extent.length);
|
||||||
|
@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
|
||||||
dst->extent.truncate_seq =
|
dst->extent.truncate_seq =
|
||||||
cpu_to_le32(src->extent.truncate_seq);
|
cpu_to_le32(src->extent.truncate_seq);
|
||||||
osd_data = &src->extent.osd_data;
|
osd_data = &src->extent.osd_data;
|
||||||
if (src->op == CEPH_OSD_OP_WRITE)
|
if (src->op == CEPH_OSD_OP_WRITE ||
|
||||||
|
src->op == CEPH_OSD_OP_WRITEFULL)
|
||||||
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
||||||
else
|
else
|
||||||
ceph_osdc_msg_data_add(req->r_reply, osd_data);
|
ceph_osdc_msg_data_add(req->r_reply, osd_data);
|
||||||
|
|
Loading…
Add table
Reference in a new issue