ceph: fix reading inline data when i_size > PAGE_SIZE
when inode has inline data but its size > PAGE_SIZE (it was truncated to larger size), previous direct read code return -EIO. This patch adds code to return zeros for data whose offset > PAGE_SIZE. Signed-off-by: Yan, Zheng <zyan@redhat.com>
This commit is contained in:
parent
86d8f67b26
commit
fcc02d2a03
2 changed files with 26 additions and 15 deletions
|
@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page)
|
||||||
u64 len = PAGE_CACHE_SIZE;
|
u64 len = PAGE_CACHE_SIZE;
|
||||||
|
|
||||||
if (off >= i_size_read(inode)) {
|
if (off >= i_size_read(inode)) {
|
||||||
zero_user_segment(page, err, PAGE_CACHE_SIZE);
|
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (ci->i_inline_version != CEPH_INLINE_NONE) {
|
||||||
* Uptodate inline data should have been added into page cache
|
/*
|
||||||
* while getting Fcr caps.
|
* Uptodate inline data should have been added
|
||||||
*/
|
* into page cache while getting Fcr caps.
|
||||||
if (ci->i_inline_version != CEPH_INLINE_NONE)
|
*/
|
||||||
return -EINVAL;
|
if (off == 0)
|
||||||
|
return -EINVAL;
|
||||||
|
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
|
||||||
|
SetPageUptodate(page);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
err = ceph_readpage_from_fscache(inode, page);
|
err = ceph_readpage_from_fscache(inode, page);
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
|
|
|
@ -879,28 +879,34 @@ again:
|
||||||
|
|
||||||
i_size = i_size_read(inode);
|
i_size = i_size_read(inode);
|
||||||
if (retry_op == READ_INLINE) {
|
if (retry_op == READ_INLINE) {
|
||||||
/* does not support inline data > PAGE_SIZE */
|
BUG_ON(ret > 0 || read > 0);
|
||||||
if (i_size > PAGE_CACHE_SIZE) {
|
if (iocb->ki_pos < i_size &&
|
||||||
ret = -EIO;
|
iocb->ki_pos < PAGE_CACHE_SIZE) {
|
||||||
} else if (iocb->ki_pos < i_size) {
|
|
||||||
loff_t end = min_t(loff_t, i_size,
|
loff_t end = min_t(loff_t, i_size,
|
||||||
iocb->ki_pos + len);
|
iocb->ki_pos + len);
|
||||||
|
end = min_t(loff_t, end, PAGE_CACHE_SIZE);
|
||||||
if (statret < end)
|
if (statret < end)
|
||||||
zero_user_segment(page, statret, end);
|
zero_user_segment(page, statret, end);
|
||||||
ret = copy_page_to_iter(page,
|
ret = copy_page_to_iter(page,
|
||||||
iocb->ki_pos & ~PAGE_MASK,
|
iocb->ki_pos & ~PAGE_MASK,
|
||||||
end - iocb->ki_pos, to);
|
end - iocb->ki_pos, to);
|
||||||
iocb->ki_pos += ret;
|
iocb->ki_pos += ret;
|
||||||
} else {
|
read += ret;
|
||||||
ret = 0;
|
}
|
||||||
|
if (iocb->ki_pos < i_size && read < len) {
|
||||||
|
size_t zlen = min_t(size_t, len - read,
|
||||||
|
i_size - iocb->ki_pos);
|
||||||
|
ret = iov_iter_zero(zlen, to);
|
||||||
|
iocb->ki_pos += ret;
|
||||||
|
read += ret;
|
||||||
}
|
}
|
||||||
__free_pages(page, 0);
|
__free_pages(page, 0);
|
||||||
return ret;
|
return read;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* hit EOF or hole? */
|
/* hit EOF or hole? */
|
||||||
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
|
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
|
||||||
ret < len) {
|
ret < len) {
|
||||||
dout("sync_read hit hole, ppos %lld < size %lld"
|
dout("sync_read hit hole, ppos %lld < size %lld"
|
||||||
", reading more\n", iocb->ki_pos,
|
", reading more\n", iocb->ki_pos,
|
||||||
inode->i_size);
|
inode->i_size);
|
||||||
|
|
Loading…
Add table
Reference in a new issue