scsi: storvsc: Don't assume that the scatterlist is not chained
The current code assumes that the scatterlists presented are not chained. Fix the code to not make this assumption. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: James Bottomley <JBottomley@Odin.com>
This commit is contained in:
parent
8de580742f
commit
aaced9934d
1 changed files with 57 additions and 42 deletions
|
@ -626,19 +626,6 @@ cleanup:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disgusting wrapper functions */
|
|
||||||
static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
|
|
||||||
{
|
|
||||||
void *addr = kmap_atomic(sg_page(sgl + idx));
|
|
||||||
return (unsigned long)addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void sg_kunmap_atomic(unsigned long addr)
|
|
||||||
{
|
|
||||||
kunmap_atomic((void *)addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Assume the original sgl has enough room */
|
/* Assume the original sgl has enough room */
|
||||||
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
||||||
struct scatterlist *bounce_sgl,
|
struct scatterlist *bounce_sgl,
|
||||||
|
@ -653,32 +640,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
||||||
unsigned long bounce_addr = 0;
|
unsigned long bounce_addr = 0;
|
||||||
unsigned long dest_addr = 0;
|
unsigned long dest_addr = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct scatterlist *cur_dest_sgl;
|
||||||
|
struct scatterlist *cur_src_sgl;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
cur_dest_sgl = orig_sgl;
|
||||||
|
cur_src_sgl = bounce_sgl;
|
||||||
for (i = 0; i < orig_sgl_count; i++) {
|
for (i = 0; i < orig_sgl_count; i++) {
|
||||||
dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
|
dest_addr = (unsigned long)
|
||||||
|
kmap_atomic(sg_page(cur_dest_sgl)) +
|
||||||
|
cur_dest_sgl->offset;
|
||||||
dest = dest_addr;
|
dest = dest_addr;
|
||||||
destlen = orig_sgl[i].length;
|
destlen = cur_dest_sgl->length;
|
||||||
|
|
||||||
if (bounce_addr == 0)
|
if (bounce_addr == 0)
|
||||||
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
|
bounce_addr = (unsigned long)kmap_atomic(
|
||||||
|
sg_page(cur_src_sgl));
|
||||||
|
|
||||||
while (destlen) {
|
while (destlen) {
|
||||||
src = bounce_addr + bounce_sgl[j].offset;
|
src = bounce_addr + cur_src_sgl->offset;
|
||||||
srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
|
srclen = cur_src_sgl->length - cur_src_sgl->offset;
|
||||||
|
|
||||||
copylen = min(srclen, destlen);
|
copylen = min(srclen, destlen);
|
||||||
memcpy((void *)dest, (void *)src, copylen);
|
memcpy((void *)dest, (void *)src, copylen);
|
||||||
|
|
||||||
total_copied += copylen;
|
total_copied += copylen;
|
||||||
bounce_sgl[j].offset += copylen;
|
cur_src_sgl->offset += copylen;
|
||||||
destlen -= copylen;
|
destlen -= copylen;
|
||||||
dest += copylen;
|
dest += copylen;
|
||||||
|
|
||||||
if (bounce_sgl[j].offset == bounce_sgl[j].length) {
|
if (cur_src_sgl->offset == cur_src_sgl->length) {
|
||||||
/* full */
|
/* full */
|
||||||
sg_kunmap_atomic(bounce_addr);
|
kunmap_atomic((void *)bounce_addr);
|
||||||
j++;
|
j++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -692,21 +685,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
||||||
/*
|
/*
|
||||||
* We are done; cleanup and return.
|
* We are done; cleanup and return.
|
||||||
*/
|
*/
|
||||||
sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
|
kunmap_atomic((void *)(dest_addr -
|
||||||
|
cur_dest_sgl->offset));
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return total_copied;
|
return total_copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if we need to use another bounce buffer */
|
/* if we need to use another bounce buffer */
|
||||||
if (destlen || i != orig_sgl_count - 1)
|
if (destlen || i != orig_sgl_count - 1) {
|
||||||
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
|
cur_src_sgl = sg_next(cur_src_sgl);
|
||||||
|
bounce_addr = (unsigned long)
|
||||||
|
kmap_atomic(
|
||||||
|
sg_page(cur_src_sgl));
|
||||||
|
}
|
||||||
} else if (destlen == 0 && i == orig_sgl_count - 1) {
|
} else if (destlen == 0 && i == orig_sgl_count - 1) {
|
||||||
/* unmap the last bounce that is < PAGE_SIZE */
|
/* unmap the last bounce that is < PAGE_SIZE */
|
||||||
sg_kunmap_atomic(bounce_addr);
|
kunmap_atomic((void *)bounce_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
|
kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
|
||||||
|
cur_dest_sgl = sg_next(cur_dest_sgl);
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -727,48 +726,61 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
|
||||||
unsigned long bounce_addr = 0;
|
unsigned long bounce_addr = 0;
|
||||||
unsigned long src_addr = 0;
|
unsigned long src_addr = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct scatterlist *cur_src_sgl;
|
||||||
|
struct scatterlist *cur_dest_sgl;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
cur_src_sgl = orig_sgl;
|
||||||
|
cur_dest_sgl = bounce_sgl;
|
||||||
|
|
||||||
for (i = 0; i < orig_sgl_count; i++) {
|
for (i = 0; i < orig_sgl_count; i++) {
|
||||||
src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
|
src_addr = (unsigned long)
|
||||||
|
kmap_atomic(sg_page(cur_src_sgl)) +
|
||||||
|
cur_src_sgl->offset;
|
||||||
src = src_addr;
|
src = src_addr;
|
||||||
srclen = orig_sgl[i].length;
|
srclen = cur_src_sgl->length;
|
||||||
|
|
||||||
if (bounce_addr == 0)
|
if (bounce_addr == 0)
|
||||||
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
|
bounce_addr = (unsigned long)
|
||||||
|
kmap_atomic(sg_page(cur_dest_sgl));
|
||||||
|
|
||||||
while (srclen) {
|
while (srclen) {
|
||||||
/* assume bounce offset always == 0 */
|
/* assume bounce offset always == 0 */
|
||||||
dest = bounce_addr + bounce_sgl[j].length;
|
dest = bounce_addr + cur_dest_sgl->length;
|
||||||
destlen = PAGE_SIZE - bounce_sgl[j].length;
|
destlen = PAGE_SIZE - cur_dest_sgl->length;
|
||||||
|
|
||||||
copylen = min(srclen, destlen);
|
copylen = min(srclen, destlen);
|
||||||
memcpy((void *)dest, (void *)src, copylen);
|
memcpy((void *)dest, (void *)src, copylen);
|
||||||
|
|
||||||
total_copied += copylen;
|
total_copied += copylen;
|
||||||
bounce_sgl[j].length += copylen;
|
cur_dest_sgl->length += copylen;
|
||||||
srclen -= copylen;
|
srclen -= copylen;
|
||||||
src += copylen;
|
src += copylen;
|
||||||
|
|
||||||
if (bounce_sgl[j].length == PAGE_SIZE) {
|
if (cur_dest_sgl->length == PAGE_SIZE) {
|
||||||
/* full..move to next entry */
|
/* full..move to next entry */
|
||||||
sg_kunmap_atomic(bounce_addr);
|
kunmap_atomic((void *)bounce_addr);
|
||||||
bounce_addr = 0;
|
bounce_addr = 0;
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if we need to use another bounce buffer */
|
/* if we need to use another bounce buffer */
|
||||||
if (srclen && bounce_addr == 0)
|
if (srclen && bounce_addr == 0) {
|
||||||
bounce_addr = sg_kmap_atomic(bounce_sgl, j);
|
cur_dest_sgl = sg_next(cur_dest_sgl);
|
||||||
|
bounce_addr = (unsigned long)
|
||||||
|
kmap_atomic(
|
||||||
|
sg_page(cur_dest_sgl));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
|
kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
|
||||||
|
cur_src_sgl = sg_next(cur_src_sgl);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bounce_addr)
|
if (bounce_addr)
|
||||||
sg_kunmap_atomic(bounce_addr);
|
kunmap_atomic((void *)bounce_addr);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
@ -1536,6 +1548,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
unsigned int sg_count = 0;
|
unsigned int sg_count = 0;
|
||||||
struct vmscsi_request *vm_srb;
|
struct vmscsi_request *vm_srb;
|
||||||
|
struct scatterlist *cur_sgl;
|
||||||
|
|
||||||
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
|
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
|
||||||
/*
|
/*
|
||||||
|
@ -1617,10 +1630,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd_request->data_buffer.offset = sgl[0].offset;
|
cmd_request->data_buffer.offset = sgl[0].offset;
|
||||||
|
cur_sgl = sgl;
|
||||||
for (i = 0; i < sg_count; i++)
|
for (i = 0; i < sg_count; i++) {
|
||||||
cmd_request->data_buffer.pfn_array[i] =
|
cmd_request->data_buffer.pfn_array[i] =
|
||||||
page_to_pfn(sg_page((&sgl[i])));
|
page_to_pfn(sg_page((cur_sgl)));
|
||||||
|
cur_sgl = sg_next(cur_sgl);
|
||||||
|
}
|
||||||
|
|
||||||
} else if (scsi_sglist(scmnd)) {
|
} else if (scsi_sglist(scmnd)) {
|
||||||
cmd_request->data_buffer.offset =
|
cmd_request->data_buffer.offset =
|
||||||
|
|
Loading…
Add table
Reference in a new issue