Merge git://git.jan-o-sch.net/btrfs-unstable into integration
Conflicts: fs/btrfs/Makefile fs/btrfs/extent_io.c fs/btrfs/extent_io.h fs/btrfs/scrub.c Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
commit
806468f8bf
14 changed files with 1930 additions and 280 deletions
|
@ -8,6 +8,6 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
||||||
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
|
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
|
||||||
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
||||||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||||
reada.o
|
reada.o backref.o
|
||||||
|
|
||||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||||
|
|
776
fs/btrfs/backref.c
Normal file
776
fs/btrfs/backref.c
Normal file
|
@ -0,0 +1,776 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2011 STRATO. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public
|
||||||
|
* License v2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public
|
||||||
|
* License along with this program; if not, write to the
|
||||||
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||||
|
* Boston, MA 021110-1307, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ctree.h"
|
||||||
|
#include "disk-io.h"
|
||||||
|
#include "backref.h"
|
||||||
|
|
||||||
|
struct __data_ref {
|
||||||
|
struct list_head list;
|
||||||
|
u64 inum;
|
||||||
|
u64 root;
|
||||||
|
u64 extent_data_item_offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct __shared_ref {
|
||||||
|
struct list_head list;
|
||||||
|
u64 disk_byte;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __inode_info(u64 inum, u64 ioff, u8 key_type,
|
||||||
|
struct btrfs_root *fs_root, struct btrfs_path *path,
|
||||||
|
struct btrfs_key *found_key)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct btrfs_key key;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
|
||||||
|
key.type = key_type;
|
||||||
|
key.objectid = inum;
|
||||||
|
key.offset = ioff;
|
||||||
|
|
||||||
|
ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
eb = path->nodes[0];
|
||||||
|
if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
|
||||||
|
ret = btrfs_next_leaf(fs_root, path);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
eb = path->nodes[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
|
||||||
|
if (found_key->type != key.type || found_key->objectid != key.objectid)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this makes the path point to (inum INODE_ITEM ioff)
|
||||||
|
*/
|
||||||
|
int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct btrfs_key key;
|
||||||
|
return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
|
||||||
|
&key);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
struct btrfs_key *found_key)
|
||||||
|
{
|
||||||
|
return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
|
||||||
|
found_key);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
|
||||||
|
* of the path are separated by '/' and the path is guaranteed to be
|
||||||
|
* 0-terminated. the path is only given within the current file system.
|
||||||
|
* Therefore, it never starts with a '/'. the caller is responsible to provide
|
||||||
|
* "size" bytes in "dest". the dest buffer will be filled backwards. finally,
|
||||||
|
* the start point of the resulting string is returned. this pointer is within
|
||||||
|
* dest, normally.
|
||||||
|
* in case the path buffer would overflow, the pointer is decremented further
|
||||||
|
* as if output was written to the buffer, though no more output is actually
|
||||||
|
* generated. that way, the caller can determine how much space would be
|
||||||
|
* required for the path to fit into the buffer. in that case, the returned
|
||||||
|
* value will be smaller than dest. callers must check this!
|
||||||
|
*/
|
||||||
|
static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
||||||
|
struct btrfs_inode_ref *iref,
|
||||||
|
struct extent_buffer *eb_in, u64 parent,
|
||||||
|
char *dest, u32 size)
|
||||||
|
{
|
||||||
|
u32 len;
|
||||||
|
int slot;
|
||||||
|
u64 next_inum;
|
||||||
|
int ret;
|
||||||
|
s64 bytes_left = size - 1;
|
||||||
|
struct extent_buffer *eb = eb_in;
|
||||||
|
struct btrfs_key found_key;
|
||||||
|
|
||||||
|
if (bytes_left >= 0)
|
||||||
|
dest[bytes_left] = '\0';
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
len = btrfs_inode_ref_name_len(eb, iref);
|
||||||
|
bytes_left -= len;
|
||||||
|
if (bytes_left >= 0)
|
||||||
|
read_extent_buffer(eb, dest + bytes_left,
|
||||||
|
(unsigned long)(iref + 1), len);
|
||||||
|
if (eb != eb_in)
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
next_inum = found_key.offset;
|
||||||
|
|
||||||
|
/* regular exit ahead */
|
||||||
|
if (parent == next_inum)
|
||||||
|
break;
|
||||||
|
|
||||||
|
slot = path->slots[0];
|
||||||
|
eb = path->nodes[0];
|
||||||
|
/* make sure we can use eb after releasing the path */
|
||||||
|
if (eb != eb_in)
|
||||||
|
atomic_inc(&eb->refs);
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
|
||||||
|
parent = next_inum;
|
||||||
|
--bytes_left;
|
||||||
|
if (bytes_left >= 0)
|
||||||
|
dest[bytes_left] = '/';
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
return dest + bytes_left;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this makes the path point to (logical EXTENT_ITEM *)
|
||||||
|
* returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
|
||||||
|
* tree blocks and <0 on error.
|
||||||
|
*/
|
||||||
|
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
|
||||||
|
struct btrfs_path *path, struct btrfs_key *found_key)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u64 flags;
|
||||||
|
u32 item_size;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_extent_item *ei;
|
||||||
|
struct btrfs_key key;
|
||||||
|
|
||||||
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
|
key.objectid = logical;
|
||||||
|
key.offset = (u64)-1;
|
||||||
|
|
||||||
|
ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
ret = btrfs_previous_item(fs_info->extent_root, path,
|
||||||
|
0, BTRFS_EXTENT_ITEM_KEY);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
|
||||||
|
if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
|
||||||
|
found_key->objectid > logical ||
|
||||||
|
found_key->objectid + found_key->offset <= logical)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
eb = path->nodes[0];
|
||||||
|
item_size = btrfs_item_size_nr(eb, path->slots[0]);
|
||||||
|
BUG_ON(item_size < sizeof(*ei));
|
||||||
|
|
||||||
|
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
|
||||||
|
flags = btrfs_extent_flags(eb, ei);
|
||||||
|
|
||||||
|
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
|
||||||
|
return BTRFS_EXTENT_FLAG_TREE_BLOCK;
|
||||||
|
if (flags & BTRFS_EXTENT_FLAG_DATA)
|
||||||
|
return BTRFS_EXTENT_FLAG_DATA;
|
||||||
|
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* helper function to iterate extent inline refs. ptr must point to a 0 value
|
||||||
|
* for the first call and may be modified. it is used to track state.
|
||||||
|
* if more refs exist, 0 is returned and the next call to
|
||||||
|
* __get_extent_inline_ref must pass the modified ptr parameter to get the
|
||||||
|
* next ref. after the last ref was processed, 1 is returned.
|
||||||
|
* returns <0 on error
|
||||||
|
*/
|
||||||
|
static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
|
||||||
|
struct btrfs_extent_item *ei, u32 item_size,
|
||||||
|
struct btrfs_extent_inline_ref **out_eiref,
|
||||||
|
int *out_type)
|
||||||
|
{
|
||||||
|
unsigned long end;
|
||||||
|
u64 flags;
|
||||||
|
struct btrfs_tree_block_info *info;
|
||||||
|
|
||||||
|
if (!*ptr) {
|
||||||
|
/* first call */
|
||||||
|
flags = btrfs_extent_flags(eb, ei);
|
||||||
|
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||||
|
info = (struct btrfs_tree_block_info *)(ei + 1);
|
||||||
|
*out_eiref =
|
||||||
|
(struct btrfs_extent_inline_ref *)(info + 1);
|
||||||
|
} else {
|
||||||
|
*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
|
||||||
|
}
|
||||||
|
*ptr = (unsigned long)*out_eiref;
|
||||||
|
if ((void *)*ptr >= (void *)ei + item_size)
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
end = (unsigned long)ei + item_size;
|
||||||
|
*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
|
||||||
|
*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
|
||||||
|
|
||||||
|
*ptr += btrfs_extent_inline_ref_size(*out_type);
|
||||||
|
WARN_ON(*ptr > end);
|
||||||
|
if (*ptr == end)
|
||||||
|
return 1; /* last */
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* reads the tree block backref for an extent. tree level and root are returned
|
||||||
|
* through out_level and out_root. ptr must point to a 0 value for the first
|
||||||
|
* call and may be modified (see __get_extent_inline_ref comment).
|
||||||
|
* returns 0 if data was provided, 1 if there was no more data to provide or
|
||||||
|
* <0 on error.
|
||||||
|
*/
|
||||||
|
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
|
||||||
|
struct btrfs_extent_item *ei, u32 item_size,
|
||||||
|
u64 *out_root, u8 *out_level)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
int type;
|
||||||
|
struct btrfs_tree_block_info *info;
|
||||||
|
struct btrfs_extent_inline_ref *eiref;
|
||||||
|
|
||||||
|
if (*ptr == (unsigned long)-1)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
|
||||||
|
&eiref, &type);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||||
|
type == BTRFS_SHARED_BLOCK_REF_KEY)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (ret == 1)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* we can treat both ref types equally here */
|
||||||
|
info = (struct btrfs_tree_block_info *)(ei + 1);
|
||||||
|
*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
|
||||||
|
*out_level = btrfs_tree_block_level(eb, info);
|
||||||
|
|
||||||
|
if (ret == 1)
|
||||||
|
*ptr = (unsigned long)-1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __data_list_add(struct list_head *head, u64 inum,
|
||||||
|
u64 extent_data_item_offset, u64 root)
|
||||||
|
{
|
||||||
|
struct __data_ref *ref;
|
||||||
|
|
||||||
|
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||||
|
if (!ref)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ref->inum = inum;
|
||||||
|
ref->extent_data_item_offset = extent_data_item_offset;
|
||||||
|
ref->root = root;
|
||||||
|
list_add_tail(&ref->list, head);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
|
||||||
|
struct btrfs_extent_data_ref *dref)
|
||||||
|
{
|
||||||
|
return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
|
||||||
|
btrfs_extent_data_ref_offset(eb, dref),
|
||||||
|
btrfs_extent_data_ref_root(eb, dref));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __shared_list_add(struct list_head *head, u64 disk_byte)
|
||||||
|
{
|
||||||
|
struct __shared_ref *ref;
|
||||||
|
|
||||||
|
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||||
|
if (!ref)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ref->disk_byte = disk_byte;
|
||||||
|
list_add_tail(&ref->list, head);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 logical, u64 inum,
|
||||||
|
u64 extent_data_item_offset,
|
||||||
|
u64 extent_offset,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
struct list_head *data_refs,
|
||||||
|
iterate_extent_inodes_t *iterate,
|
||||||
|
void *ctx)
|
||||||
|
{
|
||||||
|
u64 ref_root;
|
||||||
|
u32 item_size;
|
||||||
|
struct btrfs_key key;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_extent_item *ei;
|
||||||
|
struct btrfs_extent_inline_ref *eiref;
|
||||||
|
struct __data_ref *ref;
|
||||||
|
int ret;
|
||||||
|
int type;
|
||||||
|
int last;
|
||||||
|
unsigned long ptr = 0;
|
||||||
|
|
||||||
|
WARN_ON(!list_empty(data_refs));
|
||||||
|
ret = extent_from_logical(fs_info, logical, path, &key);
|
||||||
|
if (ret & BTRFS_EXTENT_FLAG_DATA)
|
||||||
|
ret = -EIO;
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
eb = path->nodes[0];
|
||||||
|
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
|
||||||
|
item_size = btrfs_item_size_nr(eb, path->slots[0]);
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
ref_root = 0;
|
||||||
|
/*
|
||||||
|
* as done in iterate_extent_inodes, we first build a list of refs to
|
||||||
|
* iterate, then free the path and then iterate them to avoid deadlocks.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
|
||||||
|
&eiref, &type);
|
||||||
|
if (last < 0) {
|
||||||
|
ret = last;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||||
|
type == BTRFS_SHARED_BLOCK_REF_KEY) {
|
||||||
|
ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
|
||||||
|
ret = __data_list_add(data_refs, inum,
|
||||||
|
extent_data_item_offset,
|
||||||
|
ref_root);
|
||||||
|
}
|
||||||
|
} while (!ret && !last);
|
||||||
|
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
if (ref_root == 0) {
|
||||||
|
printk(KERN_ERR "btrfs: failed to find tree block ref "
|
||||||
|
"for shared data backref %llu\n", logical);
|
||||||
|
WARN_ON(1);
|
||||||
|
ret = -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
while (!list_empty(data_refs)) {
|
||||||
|
ref = list_first_entry(data_refs, struct __data_ref, list);
|
||||||
|
list_del(&ref->list);
|
||||||
|
if (!ret)
|
||||||
|
ret = iterate(ref->inum, extent_offset +
|
||||||
|
ref->extent_data_item_offset,
|
||||||
|
ref->root, ctx);
|
||||||
|
kfree(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 logical, u64 orig_extent_item_objectid,
|
||||||
|
u64 extent_offset, struct btrfs_path *path,
|
||||||
|
struct list_head *data_refs,
|
||||||
|
iterate_extent_inodes_t *iterate,
|
||||||
|
void *ctx)
|
||||||
|
{
|
||||||
|
u64 disk_byte;
|
||||||
|
struct btrfs_key key;
|
||||||
|
struct btrfs_file_extent_item *fi;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
int slot;
|
||||||
|
int nritems;
|
||||||
|
int ret;
|
||||||
|
int found = 0;
|
||||||
|
|
||||||
|
eb = read_tree_block(fs_info->tree_root, logical,
|
||||||
|
fs_info->tree_root->leafsize, 0);
|
||||||
|
if (!eb)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* from the shared data ref, we only have the leaf but we need
|
||||||
|
* the key. thus, we must look into all items and see that we
|
||||||
|
* find one (some) with a reference to our extent item.
|
||||||
|
*/
|
||||||
|
nritems = btrfs_header_nritems(eb);
|
||||||
|
for (slot = 0; slot < nritems; ++slot) {
|
||||||
|
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||||
|
if (key.type != BTRFS_EXTENT_DATA_KEY)
|
||||||
|
continue;
|
||||||
|
fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
|
||||||
|
if (!fi) {
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
|
||||||
|
if (disk_byte != orig_extent_item_objectid) {
|
||||||
|
if (found)
|
||||||
|
break;
|
||||||
|
else
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
++found;
|
||||||
|
ret = __iter_shared_inline_ref_inodes(fs_info, logical,
|
||||||
|
key.objectid,
|
||||||
|
key.offset,
|
||||||
|
extent_offset, path,
|
||||||
|
data_refs,
|
||||||
|
iterate, ctx);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
printk(KERN_ERR "btrfs: failed to follow shared data backref "
|
||||||
|
"to parent %llu\n", logical);
|
||||||
|
WARN_ON(1);
|
||||||
|
ret = -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* calls iterate() for every inode that references the extent identified by
|
||||||
|
* the given parameters. will use the path given as a parameter and return it
|
||||||
|
* released.
|
||||||
|
* when the iterator function returns a non-zero value, iteration stops.
|
||||||
|
*/
|
||||||
|
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
u64 extent_item_objectid,
|
||||||
|
u64 extent_offset,
|
||||||
|
iterate_extent_inodes_t *iterate, void *ctx)
|
||||||
|
{
|
||||||
|
unsigned long ptr = 0;
|
||||||
|
int last;
|
||||||
|
int ret;
|
||||||
|
int type;
|
||||||
|
u64 logical;
|
||||||
|
u32 item_size;
|
||||||
|
struct btrfs_extent_inline_ref *eiref;
|
||||||
|
struct btrfs_extent_data_ref *dref;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_extent_item *ei;
|
||||||
|
struct btrfs_key key;
|
||||||
|
struct list_head data_refs = LIST_HEAD_INIT(data_refs);
|
||||||
|
struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
|
||||||
|
struct __data_ref *ref_d;
|
||||||
|
struct __shared_ref *ref_s;
|
||||||
|
|
||||||
|
eb = path->nodes[0];
|
||||||
|
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
|
||||||
|
item_size = btrfs_item_size_nr(eb, path->slots[0]);
|
||||||
|
|
||||||
|
/* first we iterate the inline refs, ... */
|
||||||
|
do {
|
||||||
|
last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
|
||||||
|
&eiref, &type);
|
||||||
|
if (last == -ENOENT) {
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (last < 0) {
|
||||||
|
ret = last;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
|
||||||
|
dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
|
||||||
|
ret = __data_list_add_eb(&data_refs, eb, dref);
|
||||||
|
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
|
||||||
|
logical = btrfs_extent_inline_ref_offset(eb, eiref);
|
||||||
|
ret = __shared_list_add(&shared_refs, logical);
|
||||||
|
}
|
||||||
|
} while (!ret && !last);
|
||||||
|
|
||||||
|
/* ... then we proceed to in-tree references and ... */
|
||||||
|
while (!ret) {
|
||||||
|
++path->slots[0];
|
||||||
|
if (path->slots[0] > btrfs_header_nritems(eb)) {
|
||||||
|
ret = btrfs_next_leaf(fs_info->extent_root, path);
|
||||||
|
if (ret) {
|
||||||
|
if (ret == 1)
|
||||||
|
ret = 0; /* we're done */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
eb = path->nodes[0];
|
||||||
|
}
|
||||||
|
btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
|
||||||
|
if (key.objectid != extent_item_objectid)
|
||||||
|
break;
|
||||||
|
if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
|
||||||
|
dref = btrfs_item_ptr(eb, path->slots[0],
|
||||||
|
struct btrfs_extent_data_ref);
|
||||||
|
ret = __data_list_add_eb(&data_refs, eb, dref);
|
||||||
|
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
|
||||||
|
ret = __shared_list_add(&shared_refs, key.offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ... only at the very end we can process the refs we found. this is
|
||||||
|
* because the iterator function we call is allowed to make tree lookups
|
||||||
|
* and we have to avoid deadlocks. additionally, we need more tree
|
||||||
|
* lookups ourselves for shared data refs.
|
||||||
|
*/
|
||||||
|
while (!list_empty(&data_refs)) {
|
||||||
|
ref_d = list_first_entry(&data_refs, struct __data_ref, list);
|
||||||
|
list_del(&ref_d->list);
|
||||||
|
if (!ret)
|
||||||
|
ret = iterate(ref_d->inum, extent_offset +
|
||||||
|
ref_d->extent_data_item_offset,
|
||||||
|
ref_d->root, ctx);
|
||||||
|
kfree(ref_d);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (!list_empty(&shared_refs)) {
|
||||||
|
ref_s = list_first_entry(&shared_refs, struct __shared_ref,
|
||||||
|
list);
|
||||||
|
list_del(&ref_s->list);
|
||||||
|
if (!ret)
|
||||||
|
ret = __iter_shared_inline_ref(fs_info,
|
||||||
|
ref_s->disk_byte,
|
||||||
|
extent_item_objectid,
|
||||||
|
extent_offset, path,
|
||||||
|
&data_refs,
|
||||||
|
iterate, ctx);
|
||||||
|
kfree(ref_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
iterate_extent_inodes_t *iterate, void *ctx)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u64 offset;
|
||||||
|
struct btrfs_key found_key;
|
||||||
|
|
||||||
|
ret = extent_from_logical(fs_info, logical, path,
|
||||||
|
&found_key);
|
||||||
|
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
|
||||||
|
ret = -EINVAL;
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
offset = logical - found_key.objectid;
|
||||||
|
ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
|
||||||
|
offset, iterate, ctx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
iterate_irefs_t *iterate, void *ctx)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
int slot;
|
||||||
|
u32 cur;
|
||||||
|
u32 len;
|
||||||
|
u32 name_len;
|
||||||
|
u64 parent = 0;
|
||||||
|
int found = 0;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_item *item;
|
||||||
|
struct btrfs_inode_ref *iref;
|
||||||
|
struct btrfs_key found_key;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
|
||||||
|
&found_key);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
if (ret) {
|
||||||
|
ret = found ? 0 : -ENOENT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
++found;
|
||||||
|
|
||||||
|
parent = found_key.offset;
|
||||||
|
slot = path->slots[0];
|
||||||
|
eb = path->nodes[0];
|
||||||
|
/* make sure we can use eb after releasing the path */
|
||||||
|
atomic_inc(&eb->refs);
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
item = btrfs_item_nr(eb, slot);
|
||||||
|
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
|
||||||
|
|
||||||
|
for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
|
||||||
|
name_len = btrfs_inode_ref_name_len(eb, iref);
|
||||||
|
/* path must be released before calling iterate()! */
|
||||||
|
ret = iterate(parent, iref, eb, ctx);
|
||||||
|
if (ret) {
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
len = sizeof(*iref) + name_len;
|
||||||
|
iref = (struct btrfs_inode_ref *)((char *)iref + len);
|
||||||
|
}
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* returns 0 if the path could be dumped (probably truncated)
|
||||||
|
* returns <0 in case of an error
|
||||||
|
*/
|
||||||
|
static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
|
||||||
|
struct extent_buffer *eb, void *ctx)
|
||||||
|
{
|
||||||
|
struct inode_fs_paths *ipath = ctx;
|
||||||
|
char *fspath;
|
||||||
|
char *fspath_min;
|
||||||
|
int i = ipath->fspath->elem_cnt;
|
||||||
|
const int s_ptr = sizeof(char *);
|
||||||
|
u32 bytes_left;
|
||||||
|
|
||||||
|
bytes_left = ipath->fspath->bytes_left > s_ptr ?
|
||||||
|
ipath->fspath->bytes_left - s_ptr : 0;
|
||||||
|
|
||||||
|
fspath_min = (char *)ipath->fspath->str + (i + 1) * s_ptr;
|
||||||
|
fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
|
||||||
|
inum, fspath_min, bytes_left);
|
||||||
|
if (IS_ERR(fspath))
|
||||||
|
return PTR_ERR(fspath);
|
||||||
|
|
||||||
|
if (fspath > fspath_min) {
|
||||||
|
ipath->fspath->str[i] = fspath;
|
||||||
|
++ipath->fspath->elem_cnt;
|
||||||
|
ipath->fspath->bytes_left = fspath - fspath_min;
|
||||||
|
} else {
|
||||||
|
++ipath->fspath->elem_missed;
|
||||||
|
ipath->fspath->bytes_missing += fspath_min - fspath;
|
||||||
|
ipath->fspath->bytes_left = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this dumps all file system paths to the inode into the ipath struct, provided
|
||||||
|
* is has been created large enough. each path is zero-terminated and accessed
|
||||||
|
* from ipath->fspath->str[i].
|
||||||
|
* when it returns, there are ipath->fspath->elem_cnt number of paths available
|
||||||
|
* in ipath->fspath->str[]. when the allocated space wasn't sufficient, the
|
||||||
|
* number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
|
||||||
|
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
|
||||||
|
* have been needed to return all paths.
|
||||||
|
*/
|
||||||
|
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
|
||||||
|
{
|
||||||
|
return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
|
||||||
|
inode_to_path, ipath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* allocates space to return multiple file system paths for an inode.
|
||||||
|
* total_bytes to allocate are passed, note that space usable for actual path
|
||||||
|
* information will be total_bytes - sizeof(struct inode_fs_paths).
|
||||||
|
* the returned pointer must be freed with free_ipath() in the end.
|
||||||
|
*/
|
||||||
|
struct btrfs_data_container *init_data_container(u32 total_bytes)
|
||||||
|
{
|
||||||
|
struct btrfs_data_container *data;
|
||||||
|
size_t alloc_bytes;
|
||||||
|
|
||||||
|
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
|
||||||
|
data = kmalloc(alloc_bytes, GFP_NOFS);
|
||||||
|
if (!data)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
if (total_bytes >= sizeof(*data)) {
|
||||||
|
data->bytes_left = total_bytes - sizeof(*data);
|
||||||
|
data->bytes_missing = 0;
|
||||||
|
} else {
|
||||||
|
data->bytes_missing = sizeof(*data) - total_bytes;
|
||||||
|
data->bytes_left = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
data->elem_cnt = 0;
|
||||||
|
data->elem_missed = 0;
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* allocates space to return multiple file system paths for an inode.
|
||||||
|
* total_bytes to allocate are passed, note that space usable for actual path
|
||||||
|
* information will be total_bytes - sizeof(struct inode_fs_paths).
|
||||||
|
* the returned pointer must be freed with free_ipath() in the end.
|
||||||
|
*/
|
||||||
|
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct inode_fs_paths *ifp;
|
||||||
|
struct btrfs_data_container *fspath;
|
||||||
|
|
||||||
|
fspath = init_data_container(total_bytes);
|
||||||
|
if (IS_ERR(fspath))
|
||||||
|
return (void *)fspath;
|
||||||
|
|
||||||
|
ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
|
||||||
|
if (!ifp) {
|
||||||
|
kfree(fspath);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
|
ifp->btrfs_path = path;
|
||||||
|
ifp->fspath = fspath;
|
||||||
|
ifp->fs_root = fs_root;
|
||||||
|
|
||||||
|
return ifp;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_ipath(struct inode_fs_paths *ipath)
|
||||||
|
{
|
||||||
|
kfree(ipath);
|
||||||
|
}
|
62
fs/btrfs/backref.h
Normal file
62
fs/btrfs/backref.h
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2011 STRATO. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public
|
||||||
|
* License v2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public
|
||||||
|
* License along with this program; if not, write to the
|
||||||
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||||
|
* Boston, MA 021110-1307, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __BTRFS_BACKREF__
|
||||||
|
#define __BTRFS_BACKREF__
|
||||||
|
|
||||||
|
#include "ioctl.h"
|
||||||
|
|
||||||
|
struct inode_fs_paths {
|
||||||
|
struct btrfs_path *btrfs_path;
|
||||||
|
struct btrfs_root *fs_root;
|
||||||
|
struct btrfs_data_container *fspath;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
|
||||||
|
void *ctx);
|
||||||
|
typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref,
|
||||||
|
struct extent_buffer *eb, void *ctx);
|
||||||
|
|
||||||
|
int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path);
|
||||||
|
|
||||||
|
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
|
||||||
|
struct btrfs_path *path, struct btrfs_key *found_key);
|
||||||
|
|
||||||
|
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
|
||||||
|
struct btrfs_extent_item *ei, u32 item_size,
|
||||||
|
u64 *out_root, u8 *out_level);
|
||||||
|
|
||||||
|
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
u64 extent_item_objectid,
|
||||||
|
u64 extent_offset,
|
||||||
|
iterate_extent_inodes_t *iterate, void *ctx);
|
||||||
|
|
||||||
|
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
iterate_extent_inodes_t *iterate, void *ctx);
|
||||||
|
|
||||||
|
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
|
||||||
|
|
||||||
|
struct btrfs_data_container *init_data_container(u32 total_bytes);
|
||||||
|
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
|
||||||
|
struct btrfs_path *path);
|
||||||
|
void free_ipath(struct inode_fs_paths *ipath);
|
||||||
|
|
||||||
|
#endif
|
|
@ -620,7 +620,7 @@ out:
|
||||||
|
|
||||||
static int btree_io_failed_hook(struct bio *failed_bio,
|
static int btree_io_failed_hook(struct bio *failed_bio,
|
||||||
struct page *page, u64 start, u64 end,
|
struct page *page, u64 start, u64 end,
|
||||||
struct extent_state *state)
|
u64 mirror_num, struct extent_state *state)
|
||||||
{
|
{
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
unsigned long len;
|
unsigned long len;
|
||||||
|
@ -944,7 +944,7 @@ static int btree_readpage(struct file *file, struct page *page)
|
||||||
{
|
{
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
return extent_read_full_page(tree, page, btree_get_extent);
|
return extent_read_full_page(tree, page, btree_get_extent, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
|
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
|
||||||
|
|
|
@ -1788,18 +1788,18 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u64 discarded_bytes = 0;
|
u64 discarded_bytes = 0;
|
||||||
struct btrfs_multi_bio *multi = NULL;
|
struct btrfs_bio *bbio = NULL;
|
||||||
|
|
||||||
|
|
||||||
/* Tell the block device(s) that the sectors can be discarded */
|
/* Tell the block device(s) that the sectors can be discarded */
|
||||||
ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
|
ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
|
||||||
bytenr, &num_bytes, &multi, 0);
|
bytenr, &num_bytes, &bbio, 0);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
struct btrfs_bio_stripe *stripe = multi->stripes;
|
struct btrfs_bio_stripe *stripe = bbio->stripes;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
||||||
for (i = 0; i < multi->num_stripes; i++, stripe++) {
|
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
|
||||||
if (!stripe->dev->can_discard)
|
if (!stripe->dev->can_discard)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1818,7 +1818,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
|
||||||
*/
|
*/
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
kfree(multi);
|
kfree(bbio);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (actual_bytes)
|
if (actual_bytes)
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "btrfs_inode.h"
|
#include "btrfs_inode.h"
|
||||||
|
#include "volumes.h"
|
||||||
|
|
||||||
static struct kmem_cache *extent_state_cache;
|
static struct kmem_cache *extent_state_cache;
|
||||||
static struct kmem_cache *extent_buffer_cache;
|
static struct kmem_cache *extent_buffer_cache;
|
||||||
|
@ -1787,6 +1788,368 @@ static int check_page_writeback(struct extent_io_tree *tree,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When IO fails, either with EIO or csum verification fails, we
|
||||||
|
* try other mirrors that might have a good copy of the data. This
|
||||||
|
* io_failure_record is used to record state as we go through all the
|
||||||
|
* mirrors. If another mirror has good data, the page is set up to date
|
||||||
|
* and things continue. If a good mirror can't be found, the original
|
||||||
|
* bio end_io callback is called to indicate things have failed.
|
||||||
|
*/
|
||||||
|
struct io_failure_record {
|
||||||
|
struct page *page;
|
||||||
|
u64 start;
|
||||||
|
u64 len;
|
||||||
|
u64 logical;
|
||||||
|
unsigned long bio_flags;
|
||||||
|
int this_mirror;
|
||||||
|
int failed_mirror;
|
||||||
|
int in_validation;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
|
||||||
|
int did_repair)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
int err = 0;
|
||||||
|
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
|
|
||||||
|
set_state_private(failure_tree, rec->start, 0);
|
||||||
|
ret = clear_extent_bits(failure_tree, rec->start,
|
||||||
|
rec->start + rec->len - 1,
|
||||||
|
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
|
||||||
|
if (ret)
|
||||||
|
err = ret;
|
||||||
|
|
||||||
|
if (did_repair) {
|
||||||
|
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||||
|
rec->start + rec->len - 1,
|
||||||
|
EXTENT_DAMAGED, GFP_NOFS);
|
||||||
|
if (ret && !err)
|
||||||
|
err = ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(rec);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void repair_io_failure_callback(struct bio *bio, int err)
|
||||||
|
{
|
||||||
|
complete(bio->bi_private);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this bypasses the standard btrfs submit functions deliberately, as
|
||||||
|
* the standard behavior is to write all copies in a raid setup. here we only
|
||||||
|
* want to write the one bad copy. so we do the mapping for ourselves and issue
|
||||||
|
* submit_bio directly.
|
||||||
|
* to avoid any synchonization issues, wait for the data after writing, which
|
||||||
|
* actually prevents the read that triggered the error from finishing.
|
||||||
|
* currently, there can be no more than two copies of every data bit. thus,
|
||||||
|
* exactly one rewrite is required.
|
||||||
|
*/
|
||||||
|
int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
|
||||||
|
u64 length, u64 logical, struct page *page,
|
||||||
|
int mirror_num)
|
||||||
|
{
|
||||||
|
struct bio *bio;
|
||||||
|
struct btrfs_device *dev;
|
||||||
|
DECLARE_COMPLETION_ONSTACK(compl);
|
||||||
|
u64 map_length = 0;
|
||||||
|
u64 sector;
|
||||||
|
struct btrfs_bio *bbio = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
BUG_ON(!mirror_num);
|
||||||
|
|
||||||
|
bio = bio_alloc(GFP_NOFS, 1);
|
||||||
|
if (!bio)
|
||||||
|
return -EIO;
|
||||||
|
bio->bi_private = &compl;
|
||||||
|
bio->bi_end_io = repair_io_failure_callback;
|
||||||
|
bio->bi_size = 0;
|
||||||
|
map_length = length;
|
||||||
|
|
||||||
|
ret = btrfs_map_block(map_tree, WRITE, logical,
|
||||||
|
&map_length, &bbio, mirror_num);
|
||||||
|
if (ret) {
|
||||||
|
bio_put(bio);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
BUG_ON(mirror_num != bbio->mirror_num);
|
||||||
|
sector = bbio->stripes[mirror_num-1].physical >> 9;
|
||||||
|
bio->bi_sector = sector;
|
||||||
|
dev = bbio->stripes[mirror_num-1].dev;
|
||||||
|
kfree(bbio);
|
||||||
|
if (!dev || !dev->bdev || !dev->writeable) {
|
||||||
|
bio_put(bio);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
bio->bi_bdev = dev->bdev;
|
||||||
|
bio_add_page(bio, page, length, start-page_offset(page));
|
||||||
|
submit_bio(WRITE_SYNC, bio);
|
||||||
|
wait_for_completion(&compl);
|
||||||
|
|
||||||
|
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
|
||||||
|
/* try to remap that extent elsewhere? */
|
||||||
|
bio_put(bio);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
|
||||||
|
"sector %llu)\n", page->mapping->host->i_ino, start,
|
||||||
|
dev->name, sector);
|
||||||
|
|
||||||
|
bio_put(bio);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* each time an IO finishes, we do a fast check in the IO failure tree
|
||||||
|
* to see if we need to process or clean up an io_failure_record
|
||||||
|
*/
|
||||||
|
static int clean_io_failure(u64 start, struct page *page)
|
||||||
|
{
|
||||||
|
u64 private;
|
||||||
|
u64 private_failure;
|
||||||
|
struct io_failure_record *failrec;
|
||||||
|
struct btrfs_mapping_tree *map_tree;
|
||||||
|
struct extent_state *state;
|
||||||
|
int num_copies;
|
||||||
|
int did_repair = 0;
|
||||||
|
int ret;
|
||||||
|
struct inode *inode = page->mapping->host;
|
||||||
|
|
||||||
|
private = 0;
|
||||||
|
ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
|
||||||
|
(u64)-1, 1, EXTENT_DIRTY, 0);
|
||||||
|
if (!ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
|
||||||
|
&private_failure);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
failrec = (struct io_failure_record *)(unsigned long) private_failure;
|
||||||
|
BUG_ON(!failrec->this_mirror);
|
||||||
|
|
||||||
|
if (failrec->in_validation) {
|
||||||
|
/* there was no real error, just free the record */
|
||||||
|
pr_debug("clean_io_failure: freeing dummy error at %llu\n",
|
||||||
|
failrec->start);
|
||||||
|
did_repair = 1;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&BTRFS_I(inode)->io_tree.lock);
|
||||||
|
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
|
||||||
|
failrec->start,
|
||||||
|
EXTENT_LOCKED);
|
||||||
|
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
|
||||||
|
|
||||||
|
if (state && state->start == failrec->start) {
|
||||||
|
map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
|
||||||
|
num_copies = btrfs_num_copies(map_tree, failrec->logical,
|
||||||
|
failrec->len);
|
||||||
|
if (num_copies > 1) {
|
||||||
|
ret = repair_io_failure(map_tree, start, failrec->len,
|
||||||
|
failrec->logical, page,
|
||||||
|
failrec->failed_mirror);
|
||||||
|
did_repair = !ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (!ret)
|
||||||
|
ret = free_io_failure(inode, failrec, did_repair);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this is a generic handler for readpage errors (default
|
||||||
|
* readpage_io_failed_hook). if other copies exist, read those and write back
|
||||||
|
* good data to the failed position. does not investigate in remapping the
|
||||||
|
* failed extent elsewhere, hoping the device will be smart enough to do this as
|
||||||
|
* needed
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int bio_readpage_error(struct bio *failed_bio, struct page *page,
|
||||||
|
u64 start, u64 end, int failed_mirror,
|
||||||
|
struct extent_state *state)
|
||||||
|
{
|
||||||
|
struct io_failure_record *failrec = NULL;
|
||||||
|
u64 private;
|
||||||
|
struct extent_map *em;
|
||||||
|
struct inode *inode = page->mapping->host;
|
||||||
|
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
|
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||||
|
struct bio *bio;
|
||||||
|
int num_copies;
|
||||||
|
int ret;
|
||||||
|
int read_mode;
|
||||||
|
u64 logical;
|
||||||
|
|
||||||
|
BUG_ON(failed_bio->bi_rw & REQ_WRITE);
|
||||||
|
|
||||||
|
ret = get_state_private(failure_tree, start, &private);
|
||||||
|
if (ret) {
|
||||||
|
failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
|
||||||
|
if (!failrec)
|
||||||
|
return -ENOMEM;
|
||||||
|
failrec->start = start;
|
||||||
|
failrec->len = end - start + 1;
|
||||||
|
failrec->this_mirror = 0;
|
||||||
|
failrec->bio_flags = 0;
|
||||||
|
failrec->in_validation = 0;
|
||||||
|
|
||||||
|
read_lock(&em_tree->lock);
|
||||||
|
em = lookup_extent_mapping(em_tree, start, failrec->len);
|
||||||
|
if (!em) {
|
||||||
|
read_unlock(&em_tree->lock);
|
||||||
|
kfree(failrec);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (em->start > start || em->start + em->len < start) {
|
||||||
|
free_extent_map(em);
|
||||||
|
em = NULL;
|
||||||
|
}
|
||||||
|
read_unlock(&em_tree->lock);
|
||||||
|
|
||||||
|
if (!em || IS_ERR(em)) {
|
||||||
|
kfree(failrec);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
logical = start - em->start;
|
||||||
|
logical = em->block_start + logical;
|
||||||
|
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
|
||||||
|
logical = em->block_start;
|
||||||
|
failrec->bio_flags = EXTENT_BIO_COMPRESSED;
|
||||||
|
extent_set_compress_type(&failrec->bio_flags,
|
||||||
|
em->compress_type);
|
||||||
|
}
|
||||||
|
pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
|
||||||
|
"len=%llu\n", logical, start, failrec->len);
|
||||||
|
failrec->logical = logical;
|
||||||
|
free_extent_map(em);
|
||||||
|
|
||||||
|
/* set the bits in the private failure tree */
|
||||||
|
ret = set_extent_bits(failure_tree, start, end,
|
||||||
|
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
|
||||||
|
if (ret >= 0)
|
||||||
|
ret = set_state_private(failure_tree, start,
|
||||||
|
(u64)(unsigned long)failrec);
|
||||||
|
/* set the bits in the inode's tree */
|
||||||
|
if (ret >= 0)
|
||||||
|
ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
|
||||||
|
GFP_NOFS);
|
||||||
|
if (ret < 0) {
|
||||||
|
kfree(failrec);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
failrec = (struct io_failure_record *)(unsigned long)private;
|
||||||
|
pr_debug("bio_readpage_error: (found) logical=%llu, "
|
||||||
|
"start=%llu, len=%llu, validation=%d\n",
|
||||||
|
failrec->logical, failrec->start, failrec->len,
|
||||||
|
failrec->in_validation);
|
||||||
|
/*
|
||||||
|
* when data can be on disk more than twice, add to failrec here
|
||||||
|
* (e.g. with a list for failed_mirror) to make
|
||||||
|
* clean_io_failure() clean all those errors at once.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
num_copies = btrfs_num_copies(
|
||||||
|
&BTRFS_I(inode)->root->fs_info->mapping_tree,
|
||||||
|
failrec->logical, failrec->len);
|
||||||
|
if (num_copies == 1) {
|
||||||
|
/*
|
||||||
|
* we only have a single copy of the data, so don't bother with
|
||||||
|
* all the retry and error correction code that follows. no
|
||||||
|
* matter what the error is, it is very likely to persist.
|
||||||
|
*/
|
||||||
|
pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
|
||||||
|
"state=%p, num_copies=%d, next_mirror %d, "
|
||||||
|
"failed_mirror %d\n", state, num_copies,
|
||||||
|
failrec->this_mirror, failed_mirror);
|
||||||
|
free_io_failure(inode, failrec, 0);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!state) {
|
||||||
|
spin_lock(&tree->lock);
|
||||||
|
state = find_first_extent_bit_state(tree, failrec->start,
|
||||||
|
EXTENT_LOCKED);
|
||||||
|
if (state && state->start != failrec->start)
|
||||||
|
state = NULL;
|
||||||
|
spin_unlock(&tree->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* there are two premises:
|
||||||
|
* a) deliver good data to the caller
|
||||||
|
* b) correct the bad sectors on disk
|
||||||
|
*/
|
||||||
|
if (failed_bio->bi_vcnt > 1) {
|
||||||
|
/*
|
||||||
|
* to fulfill b), we need to know the exact failing sectors, as
|
||||||
|
* we don't want to rewrite any more than the failed ones. thus,
|
||||||
|
* we need separate read requests for the failed bio
|
||||||
|
*
|
||||||
|
* if the following BUG_ON triggers, our validation request got
|
||||||
|
* merged. we need separate requests for our algorithm to work.
|
||||||
|
*/
|
||||||
|
BUG_ON(failrec->in_validation);
|
||||||
|
failrec->in_validation = 1;
|
||||||
|
failrec->this_mirror = failed_mirror;
|
||||||
|
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* we're ready to fulfill a) and b) alongside. get a good copy
|
||||||
|
* of the failed sector and if we succeed, we have setup
|
||||||
|
* everything for repair_io_failure to do the rest for us.
|
||||||
|
*/
|
||||||
|
if (failrec->in_validation) {
|
||||||
|
BUG_ON(failrec->this_mirror != failed_mirror);
|
||||||
|
failrec->in_validation = 0;
|
||||||
|
failrec->this_mirror = 0;
|
||||||
|
}
|
||||||
|
failrec->failed_mirror = failed_mirror;
|
||||||
|
failrec->this_mirror++;
|
||||||
|
if (failrec->this_mirror == failed_mirror)
|
||||||
|
failrec->this_mirror++;
|
||||||
|
read_mode = READ_SYNC;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!state || failrec->this_mirror > num_copies) {
|
||||||
|
pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
|
||||||
|
"next_mirror %d, failed_mirror %d\n", state,
|
||||||
|
num_copies, failrec->this_mirror, failed_mirror);
|
||||||
|
free_io_failure(inode, failrec, 0);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
bio = bio_alloc(GFP_NOFS, 1);
|
||||||
|
bio->bi_private = state;
|
||||||
|
bio->bi_end_io = failed_bio->bi_end_io;
|
||||||
|
bio->bi_sector = failrec->logical >> 9;
|
||||||
|
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
|
||||||
|
bio->bi_size = 0;
|
||||||
|
|
||||||
|
bio_add_page(bio, page, failrec->len, start - page_offset(page));
|
||||||
|
|
||||||
|
pr_debug("bio_readpage_error: submitting new read[%#x] to "
|
||||||
|
"this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
|
||||||
|
failrec->this_mirror, num_copies, failrec->in_validation);
|
||||||
|
|
||||||
|
tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror,
|
||||||
|
failrec->bio_flags, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* lots and lots of room for performance fixes in the end_bio funcs */
|
/* lots and lots of room for performance fixes in the end_bio funcs */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1885,6 +2248,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||||
struct extent_state *cached = NULL;
|
struct extent_state *cached = NULL;
|
||||||
struct extent_state *state;
|
struct extent_state *state;
|
||||||
|
|
||||||
|
pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
|
||||||
|
"mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
|
||||||
|
(long int)bio->bi_bdev);
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
|
|
||||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
||||||
|
@ -1915,11 +2281,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||||
state);
|
state);
|
||||||
if (ret)
|
if (ret)
|
||||||
uptodate = 0;
|
uptodate = 0;
|
||||||
|
else
|
||||||
|
clean_io_failure(start, page);
|
||||||
}
|
}
|
||||||
if (!uptodate && tree->ops &&
|
if (!uptodate) {
|
||||||
tree->ops->readpage_io_failed_hook) {
|
u64 failed_mirror;
|
||||||
ret = tree->ops->readpage_io_failed_hook(bio, page,
|
failed_mirror = (u64)bio->bi_bdev;
|
||||||
start, end, state);
|
if (tree->ops && tree->ops->readpage_io_failed_hook)
|
||||||
|
ret = tree->ops->readpage_io_failed_hook(
|
||||||
|
bio, page, start, end,
|
||||||
|
failed_mirror, state);
|
||||||
|
else
|
||||||
|
ret = bio_readpage_error(bio, page, start, end,
|
||||||
|
failed_mirror, NULL);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
uptodate =
|
uptodate =
|
||||||
test_bit(BIO_UPTODATE, &bio->bi_flags);
|
test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||||
|
@ -1999,6 +2373,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
|
||||||
mirror_num, bio_flags, start);
|
mirror_num, bio_flags, start);
|
||||||
else
|
else
|
||||||
submit_bio(rw, bio);
|
submit_bio(rw, bio);
|
||||||
|
|
||||||
if (bio_flagged(bio, BIO_EOPNOTSUPP))
|
if (bio_flagged(bio, BIO_EOPNOTSUPP))
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EOPNOTSUPP;
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
@ -2264,16 +2639,16 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
get_extent_t *get_extent)
|
get_extent_t *get_extent, int mirror_num)
|
||||||
{
|
{
|
||||||
struct bio *bio = NULL;
|
struct bio *bio = NULL;
|
||||||
unsigned long bio_flags = 0;
|
unsigned long bio_flags = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
|
ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
|
||||||
&bio_flags);
|
&bio_flags);
|
||||||
if (bio)
|
if (bio)
|
||||||
ret = submit_one_bio(READ, bio, 0, bio_flags);
|
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3127,7 +3502,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *extent_buffer_page(struct extent_buffer *eb,
|
inline struct page *extent_buffer_page(struct extent_buffer *eb,
|
||||||
unsigned long i)
|
unsigned long i)
|
||||||
{
|
{
|
||||||
struct page *p;
|
struct page *p;
|
||||||
|
@ -3152,7 +3527,7 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb,
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long num_extent_pages(u64 start, u64 len)
|
inline unsigned long num_extent_pages(u64 start, u64 len)
|
||||||
{
|
{
|
||||||
return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
|
return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
|
||||||
(start >> PAGE_CACHE_SHIFT);
|
(start >> PAGE_CACHE_SHIFT);
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#define EXTENT_DO_ACCOUNTING (1 << 11)
|
#define EXTENT_DO_ACCOUNTING (1 << 11)
|
||||||
#define EXTENT_FIRST_DELALLOC (1 << 12)
|
#define EXTENT_FIRST_DELALLOC (1 << 12)
|
||||||
#define EXTENT_NEED_WAIT (1 << 13)
|
#define EXTENT_NEED_WAIT (1 << 13)
|
||||||
|
#define EXTENT_DAMAGED (1 << 14)
|
||||||
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
|
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
|
||||||
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
|
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
|
||||||
|
|
||||||
|
@ -69,7 +70,7 @@ struct extent_io_ops {
|
||||||
unsigned long bio_flags);
|
unsigned long bio_flags);
|
||||||
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
|
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
|
||||||
int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
|
int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
|
||||||
u64 start, u64 end,
|
u64 start, u64 end, u64 failed_mirror,
|
||||||
struct extent_state *state);
|
struct extent_state *state);
|
||||||
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
|
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
|
||||||
u64 start, u64 end,
|
u64 start, u64 end,
|
||||||
|
@ -188,7 +189,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
|
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
gfp_t mask);
|
gfp_t mask);
|
||||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
get_extent_t *get_extent);
|
get_extent_t *get_extent, int mirror_num);
|
||||||
int __init extent_io_init(void);
|
int __init extent_io_init(void);
|
||||||
void extent_io_exit(void);
|
void extent_io_exit(void);
|
||||||
|
|
||||||
|
@ -259,6 +260,8 @@ void free_extent_buffer(struct extent_buffer *eb);
|
||||||
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||||
struct extent_buffer *eb, u64 start, int wait,
|
struct extent_buffer *eb, u64 start, int wait,
|
||||||
get_extent_t *get_extent, int mirror_num);
|
get_extent_t *get_extent, int mirror_num);
|
||||||
|
unsigned long num_extent_pages(u64 start, u64 len);
|
||||||
|
struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i);
|
||||||
|
|
||||||
static inline void extent_buffer_get(struct extent_buffer *eb)
|
static inline void extent_buffer_get(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
|
@ -308,4 +311,10 @@ int extent_clear_unlock_delalloc(struct inode *inode,
|
||||||
struct bio *
|
struct bio *
|
||||||
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
|
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
|
||||||
gfp_t gfp_flags);
|
gfp_t gfp_flags);
|
||||||
|
|
||||||
|
struct btrfs_mapping_tree;
|
||||||
|
|
||||||
|
int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
|
||||||
|
u64 length, u64 logical, struct page *page,
|
||||||
|
int mirror_num);
|
||||||
#endif
|
#endif
|
||||||
|
|
157
fs/btrfs/inode.c
157
fs/btrfs/inode.c
|
@ -45,10 +45,10 @@
|
||||||
#include "btrfs_inode.h"
|
#include "btrfs_inode.h"
|
||||||
#include "ioctl.h"
|
#include "ioctl.h"
|
||||||
#include "print-tree.h"
|
#include "print-tree.h"
|
||||||
#include "volumes.h"
|
|
||||||
#include "ordered-data.h"
|
#include "ordered-data.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "tree-log.h"
|
#include "tree-log.h"
|
||||||
|
#include "volumes.h"
|
||||||
#include "compression.h"
|
#include "compression.h"
|
||||||
#include "locking.h"
|
#include "locking.h"
|
||||||
#include "free-space-cache.h"
|
#include "free-space-cache.h"
|
||||||
|
@ -1822,154 +1822,10 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
|
||||||
return btrfs_finish_ordered_io(page->mapping->host, start, end);
|
return btrfs_finish_ordered_io(page->mapping->host, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* When IO fails, either with EIO or csum verification fails, we
|
|
||||||
* try other mirrors that might have a good copy of the data. This
|
|
||||||
* io_failure_record is used to record state as we go through all the
|
|
||||||
* mirrors. If another mirror has good data, the page is set up to date
|
|
||||||
* and things continue. If a good mirror can't be found, the original
|
|
||||||
* bio end_io callback is called to indicate things have failed.
|
|
||||||
*/
|
|
||||||
struct io_failure_record {
|
|
||||||
struct page *page;
|
|
||||||
u64 start;
|
|
||||||
u64 len;
|
|
||||||
u64 logical;
|
|
||||||
unsigned long bio_flags;
|
|
||||||
int last_mirror;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int btrfs_io_failed_hook(struct bio *failed_bio,
|
|
||||||
struct page *page, u64 start, u64 end,
|
|
||||||
struct extent_state *state)
|
|
||||||
{
|
|
||||||
struct io_failure_record *failrec = NULL;
|
|
||||||
u64 private;
|
|
||||||
struct extent_map *em;
|
|
||||||
struct inode *inode = page->mapping->host;
|
|
||||||
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
|
||||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
|
||||||
struct bio *bio;
|
|
||||||
int num_copies;
|
|
||||||
int ret;
|
|
||||||
int rw;
|
|
||||||
u64 logical;
|
|
||||||
|
|
||||||
ret = get_state_private(failure_tree, start, &private);
|
|
||||||
if (ret) {
|
|
||||||
failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
|
|
||||||
if (!failrec)
|
|
||||||
return -ENOMEM;
|
|
||||||
failrec->start = start;
|
|
||||||
failrec->len = end - start + 1;
|
|
||||||
failrec->last_mirror = 0;
|
|
||||||
failrec->bio_flags = 0;
|
|
||||||
|
|
||||||
read_lock(&em_tree->lock);
|
|
||||||
em = lookup_extent_mapping(em_tree, start, failrec->len);
|
|
||||||
if (em->start > start || em->start + em->len < start) {
|
|
||||||
free_extent_map(em);
|
|
||||||
em = NULL;
|
|
||||||
}
|
|
||||||
read_unlock(&em_tree->lock);
|
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(em)) {
|
|
||||||
kfree(failrec);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
logical = start - em->start;
|
|
||||||
logical = em->block_start + logical;
|
|
||||||
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
|
|
||||||
logical = em->block_start;
|
|
||||||
failrec->bio_flags = EXTENT_BIO_COMPRESSED;
|
|
||||||
extent_set_compress_type(&failrec->bio_flags,
|
|
||||||
em->compress_type);
|
|
||||||
}
|
|
||||||
failrec->logical = logical;
|
|
||||||
free_extent_map(em);
|
|
||||||
set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
|
|
||||||
EXTENT_DIRTY, GFP_NOFS);
|
|
||||||
set_state_private(failure_tree, start,
|
|
||||||
(u64)(unsigned long)failrec);
|
|
||||||
} else {
|
|
||||||
failrec = (struct io_failure_record *)(unsigned long)private;
|
|
||||||
}
|
|
||||||
num_copies = btrfs_num_copies(
|
|
||||||
&BTRFS_I(inode)->root->fs_info->mapping_tree,
|
|
||||||
failrec->logical, failrec->len);
|
|
||||||
failrec->last_mirror++;
|
|
||||||
if (!state) {
|
|
||||||
spin_lock(&BTRFS_I(inode)->io_tree.lock);
|
|
||||||
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
|
|
||||||
failrec->start,
|
|
||||||
EXTENT_LOCKED);
|
|
||||||
if (state && state->start != failrec->start)
|
|
||||||
state = NULL;
|
|
||||||
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
|
|
||||||
}
|
|
||||||
if (!state || failrec->last_mirror > num_copies) {
|
|
||||||
set_state_private(failure_tree, failrec->start, 0);
|
|
||||||
clear_extent_bits(failure_tree, failrec->start,
|
|
||||||
failrec->start + failrec->len - 1,
|
|
||||||
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
|
|
||||||
kfree(failrec);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
bio = bio_alloc(GFP_NOFS, 1);
|
|
||||||
bio->bi_private = state;
|
|
||||||
bio->bi_end_io = failed_bio->bi_end_io;
|
|
||||||
bio->bi_sector = failrec->logical >> 9;
|
|
||||||
bio->bi_bdev = failed_bio->bi_bdev;
|
|
||||||
bio->bi_size = 0;
|
|
||||||
|
|
||||||
bio_add_page(bio, page, failrec->len, start - page_offset(page));
|
|
||||||
if (failed_bio->bi_rw & REQ_WRITE)
|
|
||||||
rw = WRITE;
|
|
||||||
else
|
|
||||||
rw = READ;
|
|
||||||
|
|
||||||
ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
|
|
||||||
failrec->last_mirror,
|
|
||||||
failrec->bio_flags, 0);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* each time an IO finishes, we do a fast check in the IO failure tree
|
|
||||||
* to see if we need to process or clean up an io_failure_record
|
|
||||||
*/
|
|
||||||
static int btrfs_clean_io_failures(struct inode *inode, u64 start)
|
|
||||||
{
|
|
||||||
u64 private;
|
|
||||||
u64 private_failure;
|
|
||||||
struct io_failure_record *failure;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
private = 0;
|
|
||||||
if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
|
|
||||||
(u64)-1, 1, EXTENT_DIRTY, 0)) {
|
|
||||||
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
|
|
||||||
start, &private_failure);
|
|
||||||
if (ret == 0) {
|
|
||||||
failure = (struct io_failure_record *)(unsigned long)
|
|
||||||
private_failure;
|
|
||||||
set_state_private(&BTRFS_I(inode)->io_failure_tree,
|
|
||||||
failure->start, 0);
|
|
||||||
clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
|
|
||||||
failure->start,
|
|
||||||
failure->start + failure->len - 1,
|
|
||||||
EXTENT_DIRTY | EXTENT_LOCKED,
|
|
||||||
GFP_NOFS);
|
|
||||||
kfree(failure);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* when reads are done, we need to check csums to verify the data is correct
|
* when reads are done, we need to check csums to verify the data is correct
|
||||||
* if there's a match, we allow the bio to finish. If not, we go through
|
* if there's a match, we allow the bio to finish. If not, the code in
|
||||||
* the io_failure_record routines to find good copies
|
* extent_io.c will try to find good copies for us.
|
||||||
*/
|
*/
|
||||||
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
||||||
struct extent_state *state)
|
struct extent_state *state)
|
||||||
|
@ -2015,10 +1871,6 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
||||||
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
good:
|
good:
|
||||||
/* if the io failure tree for this inode is non-empty,
|
|
||||||
* check to see if we've recovered from a failed IO
|
|
||||||
*/
|
|
||||||
btrfs_clean_io_failures(inode, start);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
zeroit:
|
zeroit:
|
||||||
|
@ -6273,7 +6125,7 @@ int btrfs_readpage(struct file *file, struct page *page)
|
||||||
{
|
{
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
return extent_read_full_page(tree, page, btrfs_get_extent);
|
return extent_read_full_page(tree, page, btrfs_get_extent, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
|
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
|
@ -7406,7 +7258,6 @@ static struct extent_io_ops btrfs_extent_io_ops = {
|
||||||
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
|
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
|
||||||
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
|
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
|
||||||
.writepage_start_hook = btrfs_writepage_start_hook,
|
.writepage_start_hook = btrfs_writepage_start_hook,
|
||||||
.readpage_io_failed_hook = btrfs_io_failed_hook,
|
|
||||||
.set_bit_hook = btrfs_set_bit_hook,
|
.set_bit_hook = btrfs_set_bit_hook,
|
||||||
.clear_bit_hook = btrfs_clear_bit_hook,
|
.clear_bit_hook = btrfs_clear_bit_hook,
|
||||||
.merge_extent_hook = btrfs_merge_extent_hook,
|
.merge_extent_hook = btrfs_merge_extent_hook,
|
||||||
|
|
143
fs/btrfs/ioctl.c
143
fs/btrfs/ioctl.c
|
@ -51,6 +51,7 @@
|
||||||
#include "volumes.h"
|
#include "volumes.h"
|
||||||
#include "locking.h"
|
#include "locking.h"
|
||||||
#include "inode-map.h"
|
#include "inode-map.h"
|
||||||
|
#include "backref.h"
|
||||||
|
|
||||||
/* Mask out flags that are inappropriate for the given type of inode. */
|
/* Mask out flags that are inappropriate for the given type of inode. */
|
||||||
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
|
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
|
||||||
|
@ -2890,6 +2891,144 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
int i;
|
||||||
|
unsigned long rel_ptr;
|
||||||
|
int size;
|
||||||
|
struct btrfs_ioctl_ino_path_args *ipa = NULL;
|
||||||
|
struct inode_fs_paths *ipath = NULL;
|
||||||
|
struct btrfs_path *path;
|
||||||
|
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
path = btrfs_alloc_path();
|
||||||
|
if (!path) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ipa = memdup_user(arg, sizeof(*ipa));
|
||||||
|
if (IS_ERR(ipa)) {
|
||||||
|
ret = PTR_ERR(ipa);
|
||||||
|
ipa = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = min_t(u32, ipa->size, 4096);
|
||||||
|
ipath = init_ipath(size, root, path);
|
||||||
|
if (IS_ERR(ipath)) {
|
||||||
|
ret = PTR_ERR(ipath);
|
||||||
|
ipath = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = paths_from_inode(ipa->inum, ipath);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
|
||||||
|
rel_ptr = ipath->fspath->str[i] - (char *)ipath->fspath->str;
|
||||||
|
ipath->fspath->str[i] = (void *)rel_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = copy_to_user(ipa->fspath, ipath->fspath, size);
|
||||||
|
if (ret) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
btrfs_free_path(path);
|
||||||
|
free_ipath(ipath);
|
||||||
|
kfree(ipa);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
|
||||||
|
{
|
||||||
|
struct btrfs_data_container *inodes = ctx;
|
||||||
|
const size_t c = 3 * sizeof(u64);
|
||||||
|
|
||||||
|
if (inodes->bytes_left >= c) {
|
||||||
|
inodes->bytes_left -= c;
|
||||||
|
inodes->val[inodes->elem_cnt] = inum;
|
||||||
|
inodes->val[inodes->elem_cnt + 1] = offset;
|
||||||
|
inodes->val[inodes->elem_cnt + 2] = root;
|
||||||
|
inodes->elem_cnt += 3;
|
||||||
|
} else {
|
||||||
|
inodes->bytes_missing += c - inodes->bytes_left;
|
||||||
|
inodes->bytes_left = 0;
|
||||||
|
inodes->elem_missed += 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
int size;
|
||||||
|
u64 extent_offset;
|
||||||
|
struct btrfs_ioctl_logical_ino_args *loi;
|
||||||
|
struct btrfs_data_container *inodes = NULL;
|
||||||
|
struct btrfs_path *path = NULL;
|
||||||
|
struct btrfs_key key;
|
||||||
|
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
loi = memdup_user(arg, sizeof(*loi));
|
||||||
|
if (IS_ERR(loi)) {
|
||||||
|
ret = PTR_ERR(loi);
|
||||||
|
loi = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
path = btrfs_alloc_path();
|
||||||
|
if (!path) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = min_t(u32, loi->size, 4096);
|
||||||
|
inodes = init_data_container(size);
|
||||||
|
if (IS_ERR(inodes)) {
|
||||||
|
ret = PTR_ERR(inodes);
|
||||||
|
inodes = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
|
||||||
|
|
||||||
|
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
|
||||||
|
ret = -ENOENT;
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
extent_offset = loi->logical - key.objectid;
|
||||||
|
ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
|
||||||
|
extent_offset, build_ino_list, inodes);
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = copy_to_user(loi->inodes, inodes, size);
|
||||||
|
if (ret)
|
||||||
|
ret = -EFAULT;
|
||||||
|
|
||||||
|
out:
|
||||||
|
btrfs_free_path(path);
|
||||||
|
kfree(inodes);
|
||||||
|
kfree(loi);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
long btrfs_ioctl(struct file *file, unsigned int
|
long btrfs_ioctl(struct file *file, unsigned int
|
||||||
cmd, unsigned long arg)
|
cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
|
@ -2947,6 +3086,10 @@ long btrfs_ioctl(struct file *file, unsigned int
|
||||||
return btrfs_ioctl_tree_search(file, argp);
|
return btrfs_ioctl_tree_search(file, argp);
|
||||||
case BTRFS_IOC_INO_LOOKUP:
|
case BTRFS_IOC_INO_LOOKUP:
|
||||||
return btrfs_ioctl_ino_lookup(file, argp);
|
return btrfs_ioctl_ino_lookup(file, argp);
|
||||||
|
case BTRFS_IOC_INO_PATHS:
|
||||||
|
return btrfs_ioctl_ino_to_path(root, argp);
|
||||||
|
case BTRFS_IOC_LOGICAL_INO:
|
||||||
|
return btrfs_ioctl_logical_to_ino(root, argp);
|
||||||
case BTRFS_IOC_SPACE_INFO:
|
case BTRFS_IOC_SPACE_INFO:
|
||||||
return btrfs_ioctl_space_info(root, argp);
|
return btrfs_ioctl_space_info(root, argp);
|
||||||
case BTRFS_IOC_SYNC:
|
case BTRFS_IOC_SYNC:
|
||||||
|
|
|
@ -193,6 +193,31 @@ struct btrfs_ioctl_space_args {
|
||||||
struct btrfs_ioctl_space_info spaces[0];
|
struct btrfs_ioctl_space_info spaces[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct btrfs_data_container {
|
||||||
|
__u32 bytes_left; /* out -- bytes not needed to deliver output */
|
||||||
|
__u32 bytes_missing; /* out -- additional bytes needed for result */
|
||||||
|
__u32 elem_cnt; /* out */
|
||||||
|
__u32 elem_missed; /* out */
|
||||||
|
union {
|
||||||
|
char *str[0]; /* out */
|
||||||
|
__u64 val[0]; /* out */
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct btrfs_ioctl_ino_path_args {
|
||||||
|
__u64 inum; /* in */
|
||||||
|
__u32 size; /* in */
|
||||||
|
__u64 reserved[4];
|
||||||
|
struct btrfs_data_container *fspath; /* out */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct btrfs_ioctl_logical_ino_args {
|
||||||
|
__u64 logical; /* in */
|
||||||
|
__u32 size; /* in */
|
||||||
|
__u64 reserved[4];
|
||||||
|
struct btrfs_data_container *inodes; /* out */
|
||||||
|
};
|
||||||
|
|
||||||
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
|
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
|
||||||
struct btrfs_ioctl_vol_args)
|
struct btrfs_ioctl_vol_args)
|
||||||
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
|
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
|
||||||
|
@ -248,4 +273,9 @@ struct btrfs_ioctl_space_args {
|
||||||
struct btrfs_ioctl_dev_info_args)
|
struct btrfs_ioctl_dev_info_args)
|
||||||
#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
|
#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
|
||||||
struct btrfs_ioctl_fs_info_args)
|
struct btrfs_ioctl_fs_info_args)
|
||||||
|
#define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
|
||||||
|
struct btrfs_ioctl_ino_path_args)
|
||||||
|
#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
|
||||||
|
struct btrfs_ioctl_ino_path_args)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -247,7 +247,7 @@ int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
|
||||||
|
|
||||||
static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
|
static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_device *dev, u64 logical,
|
struct btrfs_device *dev, u64 logical,
|
||||||
struct btrfs_multi_bio *multi)
|
struct btrfs_bio *multi)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int looped = 0;
|
int looped = 0;
|
||||||
|
@ -327,7 +327,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
|
||||||
struct reada_extent *re = NULL;
|
struct reada_extent *re = NULL;
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
||||||
struct btrfs_multi_bio *multi = NULL;
|
struct btrfs_bio *multi = NULL;
|
||||||
struct btrfs_device *dev;
|
struct btrfs_device *dev;
|
||||||
u32 blocksize;
|
u32 blocksize;
|
||||||
u64 length;
|
u64 length;
|
||||||
|
|
474
fs/btrfs/scrub.c
474
fs/btrfs/scrub.c
|
@ -17,10 +17,14 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/ratelimit.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "volumes.h"
|
#include "volumes.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
#include "ordered-data.h"
|
#include "ordered-data.h"
|
||||||
|
#include "transaction.h"
|
||||||
|
#include "backref.h"
|
||||||
|
#include "extent_io.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is only the first step towards a full-features scrub. It reads all
|
* This is only the first step towards a full-features scrub. It reads all
|
||||||
|
@ -60,7 +64,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix);
|
||||||
struct scrub_page {
|
struct scrub_page {
|
||||||
u64 flags; /* extent flags */
|
u64 flags; /* extent flags */
|
||||||
u64 generation;
|
u64 generation;
|
||||||
u64 mirror_num;
|
int mirror_num;
|
||||||
int have_csum;
|
int have_csum;
|
||||||
u8 csum[BTRFS_CSUM_SIZE];
|
u8 csum[BTRFS_CSUM_SIZE];
|
||||||
};
|
};
|
||||||
|
@ -84,6 +88,7 @@ struct scrub_dev {
|
||||||
int first_free;
|
int first_free;
|
||||||
int curr;
|
int curr;
|
||||||
atomic_t in_flight;
|
atomic_t in_flight;
|
||||||
|
atomic_t fixup_cnt;
|
||||||
spinlock_t list_lock;
|
spinlock_t list_lock;
|
||||||
wait_queue_head_t list_wait;
|
wait_queue_head_t list_wait;
|
||||||
u16 csum_size;
|
u16 csum_size;
|
||||||
|
@ -97,6 +102,27 @@ struct scrub_dev {
|
||||||
spinlock_t stat_lock;
|
spinlock_t stat_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct scrub_fixup_nodatasum {
|
||||||
|
struct scrub_dev *sdev;
|
||||||
|
u64 logical;
|
||||||
|
struct btrfs_root *root;
|
||||||
|
struct btrfs_work work;
|
||||||
|
int mirror_num;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct scrub_warning {
|
||||||
|
struct btrfs_path *path;
|
||||||
|
u64 extent_item_size;
|
||||||
|
char *scratch_buf;
|
||||||
|
char *msg_buf;
|
||||||
|
const char *errstr;
|
||||||
|
sector_t sector;
|
||||||
|
u64 logical;
|
||||||
|
struct btrfs_device *dev;
|
||||||
|
int msg_bufsize;
|
||||||
|
int scratch_bufsize;
|
||||||
|
};
|
||||||
|
|
||||||
static void scrub_free_csums(struct scrub_dev *sdev)
|
static void scrub_free_csums(struct scrub_dev *sdev)
|
||||||
{
|
{
|
||||||
while (!list_empty(&sdev->csum_list)) {
|
while (!list_empty(&sdev->csum_list)) {
|
||||||
|
@ -178,6 +204,7 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
|
||||||
sdev->first_free = 0;
|
sdev->first_free = 0;
|
||||||
sdev->curr = -1;
|
sdev->curr = -1;
|
||||||
atomic_set(&sdev->in_flight, 0);
|
atomic_set(&sdev->in_flight, 0);
|
||||||
|
atomic_set(&sdev->fixup_cnt, 0);
|
||||||
atomic_set(&sdev->cancel_req, 0);
|
atomic_set(&sdev->cancel_req, 0);
|
||||||
sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
|
sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
|
||||||
INIT_LIST_HEAD(&sdev->csum_list);
|
INIT_LIST_HEAD(&sdev->csum_list);
|
||||||
|
@ -192,24 +219,361 @@ nomem:
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
||||||
|
{
|
||||||
|
u64 isize;
|
||||||
|
u32 nlink;
|
||||||
|
int ret;
|
||||||
|
int i;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_inode_item *inode_item;
|
||||||
|
struct scrub_warning *swarn = ctx;
|
||||||
|
struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
|
||||||
|
struct inode_fs_paths *ipath = NULL;
|
||||||
|
struct btrfs_root *local_root;
|
||||||
|
struct btrfs_key root_key;
|
||||||
|
|
||||||
|
root_key.objectid = root;
|
||||||
|
root_key.type = BTRFS_ROOT_ITEM_KEY;
|
||||||
|
root_key.offset = (u64)-1;
|
||||||
|
local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
|
||||||
|
if (IS_ERR(local_root)) {
|
||||||
|
ret = PTR_ERR(local_root);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = inode_item_info(inum, 0, local_root, swarn->path);
|
||||||
|
if (ret) {
|
||||||
|
btrfs_release_path(swarn->path);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
eb = swarn->path->nodes[0];
|
||||||
|
inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
|
||||||
|
struct btrfs_inode_item);
|
||||||
|
isize = btrfs_inode_size(eb, inode_item);
|
||||||
|
nlink = btrfs_inode_nlink(eb, inode_item);
|
||||||
|
btrfs_release_path(swarn->path);
|
||||||
|
|
||||||
|
ipath = init_ipath(4096, local_root, swarn->path);
|
||||||
|
ret = paths_from_inode(inum, ipath);
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we deliberately ignore the bit ipath might have been too small to
|
||||||
|
* hold all of the paths here
|
||||||
|
*/
|
||||||
|
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
|
||||||
|
printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
|
||||||
|
"%s, sector %llu, root %llu, inode %llu, offset %llu, "
|
||||||
|
"length %llu, links %u (path: %s)\n", swarn->errstr,
|
||||||
|
swarn->logical, swarn->dev->name,
|
||||||
|
(unsigned long long)swarn->sector, root, inum, offset,
|
||||||
|
min(isize - offset, (u64)PAGE_SIZE), nlink,
|
||||||
|
ipath->fspath->str[i]);
|
||||||
|
|
||||||
|
free_ipath(ipath);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
|
||||||
|
"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
|
||||||
|
"resolving failed with ret=%d\n", swarn->errstr,
|
||||||
|
swarn->logical, swarn->dev->name,
|
||||||
|
(unsigned long long)swarn->sector, root, inum, offset, ret);
|
||||||
|
|
||||||
|
free_ipath(ipath);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
|
||||||
|
int ix)
|
||||||
|
{
|
||||||
|
struct btrfs_device *dev = sbio->sdev->dev;
|
||||||
|
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
|
||||||
|
struct btrfs_path *path;
|
||||||
|
struct btrfs_key found_key;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
struct btrfs_extent_item *ei;
|
||||||
|
struct scrub_warning swarn;
|
||||||
|
u32 item_size;
|
||||||
|
int ret;
|
||||||
|
u64 ref_root;
|
||||||
|
u8 ref_level;
|
||||||
|
unsigned long ptr = 0;
|
||||||
|
const int bufsize = 4096;
|
||||||
|
u64 extent_offset;
|
||||||
|
|
||||||
|
path = btrfs_alloc_path();
|
||||||
|
|
||||||
|
swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
|
||||||
|
swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
|
||||||
|
swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
|
||||||
|
swarn.logical = sbio->logical + ix * PAGE_SIZE;
|
||||||
|
swarn.errstr = errstr;
|
||||||
|
swarn.dev = dev;
|
||||||
|
swarn.msg_bufsize = bufsize;
|
||||||
|
swarn.scratch_bufsize = bufsize;
|
||||||
|
|
||||||
|
if (!path || !swarn.scratch_buf || !swarn.msg_buf)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
extent_offset = swarn.logical - found_key.objectid;
|
||||||
|
swarn.extent_item_size = found_key.offset;
|
||||||
|
|
||||||
|
eb = path->nodes[0];
|
||||||
|
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
|
||||||
|
item_size = btrfs_item_size_nr(eb, path->slots[0]);
|
||||||
|
|
||||||
|
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||||
|
do {
|
||||||
|
ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
|
||||||
|
&ref_root, &ref_level);
|
||||||
|
printk(KERN_WARNING "%s at logical %llu on dev %s, "
|
||||||
|
"sector %llu: metadata %s (level %d) in tree "
|
||||||
|
"%llu\n", errstr, swarn.logical, dev->name,
|
||||||
|
(unsigned long long)swarn.sector,
|
||||||
|
ref_level ? "node" : "leaf",
|
||||||
|
ret < 0 ? -1 : ref_level,
|
||||||
|
ret < 0 ? -1 : ref_root);
|
||||||
|
} while (ret != 1);
|
||||||
|
} else {
|
||||||
|
swarn.path = path;
|
||||||
|
iterate_extent_inodes(fs_info, path, found_key.objectid,
|
||||||
|
extent_offset,
|
||||||
|
scrub_print_warning_inode, &swarn);
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
btrfs_free_path(path);
|
||||||
|
kfree(swarn.scratch_buf);
|
||||||
|
kfree(swarn.msg_buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
|
||||||
|
{
|
||||||
|
struct page *page = NULL;
|
||||||
|
unsigned long index;
|
||||||
|
struct scrub_fixup_nodatasum *fixup = ctx;
|
||||||
|
int ret;
|
||||||
|
int corrected = 0;
|
||||||
|
struct btrfs_key key;
|
||||||
|
struct inode *inode = NULL;
|
||||||
|
u64 end = offset + PAGE_SIZE - 1;
|
||||||
|
struct btrfs_root *local_root;
|
||||||
|
|
||||||
|
key.objectid = root;
|
||||||
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||||
|
key.offset = (u64)-1;
|
||||||
|
local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
|
||||||
|
if (IS_ERR(local_root))
|
||||||
|
return PTR_ERR(local_root);
|
||||||
|
|
||||||
|
key.type = BTRFS_INODE_ITEM_KEY;
|
||||||
|
key.objectid = inum;
|
||||||
|
key.offset = 0;
|
||||||
|
inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
|
||||||
|
if (IS_ERR(inode))
|
||||||
|
return PTR_ERR(inode);
|
||||||
|
|
||||||
|
index = offset >> PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
|
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||||
|
if (!page) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (PageUptodate(page)) {
|
||||||
|
struct btrfs_mapping_tree *map_tree;
|
||||||
|
if (PageDirty(page)) {
|
||||||
|
/*
|
||||||
|
* we need to write the data to the defect sector. the
|
||||||
|
* data that was in that sector is not in memory,
|
||||||
|
* because the page was modified. we must not write the
|
||||||
|
* modified page to that sector.
|
||||||
|
*
|
||||||
|
* TODO: what could be done here: wait for the delalloc
|
||||||
|
* runner to write out that page (might involve
|
||||||
|
* COW) and see whether the sector is still
|
||||||
|
* referenced afterwards.
|
||||||
|
*
|
||||||
|
* For the meantime, we'll treat this error
|
||||||
|
* incorrectable, although there is a chance that a
|
||||||
|
* later scrub will find the bad sector again and that
|
||||||
|
* there's no dirty page in memory, then.
|
||||||
|
*/
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
|
||||||
|
ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
|
||||||
|
fixup->logical, page,
|
||||||
|
fixup->mirror_num);
|
||||||
|
unlock_page(page);
|
||||||
|
corrected = !ret;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* we need to get good data first. the general readpage path
|
||||||
|
* will call repair_io_failure for us, we just have to make
|
||||||
|
* sure we read the bad mirror.
|
||||||
|
*/
|
||||||
|
ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||||
|
EXTENT_DAMAGED, GFP_NOFS);
|
||||||
|
if (ret) {
|
||||||
|
/* set_extent_bits should give proper error */
|
||||||
|
WARN_ON(ret > 0);
|
||||||
|
if (ret > 0)
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
|
||||||
|
btrfs_get_extent,
|
||||||
|
fixup->mirror_num);
|
||||||
|
wait_on_page_locked(page);
|
||||||
|
|
||||||
|
corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
|
||||||
|
end, EXTENT_DAMAGED, 0, NULL);
|
||||||
|
if (!corrected)
|
||||||
|
clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||||
|
EXTENT_DAMAGED, GFP_NOFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (page)
|
||||||
|
put_page(page);
|
||||||
|
if (inode)
|
||||||
|
iput(inode);
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (ret == 0 && corrected) {
|
||||||
|
/*
|
||||||
|
* we only need to call readpage for one of the inodes belonging
|
||||||
|
* to this extent. so make iterate_extent_inodes stop
|
||||||
|
*/
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void scrub_fixup_nodatasum(struct btrfs_work *work)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct scrub_fixup_nodatasum *fixup;
|
||||||
|
struct scrub_dev *sdev;
|
||||||
|
struct btrfs_trans_handle *trans = NULL;
|
||||||
|
struct btrfs_fs_info *fs_info;
|
||||||
|
struct btrfs_path *path;
|
||||||
|
int uncorrectable = 0;
|
||||||
|
|
||||||
|
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
|
||||||
|
sdev = fixup->sdev;
|
||||||
|
fs_info = fixup->root->fs_info;
|
||||||
|
|
||||||
|
path = btrfs_alloc_path();
|
||||||
|
if (!path) {
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.malloc_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
uncorrectable = 1;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
trans = btrfs_join_transaction(fixup->root);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
uncorrectable = 1;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* the idea is to trigger a regular read through the standard path. we
|
||||||
|
* read a page from the (failed) logical address by specifying the
|
||||||
|
* corresponding copynum of the failed sector. thus, that readpage is
|
||||||
|
* expected to fail.
|
||||||
|
* that is the point where on-the-fly error correction will kick in
|
||||||
|
* (once it's finished) and rewrite the failed sector if a good copy
|
||||||
|
* can be found.
|
||||||
|
*/
|
||||||
|
ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
|
||||||
|
path, scrub_fixup_readpage,
|
||||||
|
fixup);
|
||||||
|
if (ret < 0) {
|
||||||
|
uncorrectable = 1;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
WARN_ON(ret != 1);
|
||||||
|
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.corrected_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (trans && !IS_ERR(trans))
|
||||||
|
btrfs_end_transaction(trans, fixup->root);
|
||||||
|
if (uncorrectable) {
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.uncorrectable_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
printk_ratelimited(KERN_ERR "btrfs: unable to fixup "
|
||||||
|
"(nodatasum) error at logical %llu\n",
|
||||||
|
fixup->logical);
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_free_path(path);
|
||||||
|
kfree(fixup);
|
||||||
|
|
||||||
|
/* see caller why we're pretending to be paused in the scrub counters */
|
||||||
|
mutex_lock(&fs_info->scrub_lock);
|
||||||
|
atomic_dec(&fs_info->scrubs_running);
|
||||||
|
atomic_dec(&fs_info->scrubs_paused);
|
||||||
|
mutex_unlock(&fs_info->scrub_lock);
|
||||||
|
atomic_dec(&sdev->fixup_cnt);
|
||||||
|
wake_up(&fs_info->scrub_pause_wait);
|
||||||
|
wake_up(&sdev->list_wait);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* scrub_recheck_error gets called when either verification of the page
|
* scrub_recheck_error gets called when either verification of the page
|
||||||
* failed or the bio failed to read, e.g. with EIO. In the latter case,
|
* failed or the bio failed to read, e.g. with EIO. In the latter case,
|
||||||
* recheck_error gets called for every page in the bio, even though only
|
* recheck_error gets called for every page in the bio, even though only
|
||||||
* one may be bad
|
* one may be bad
|
||||||
*/
|
*/
|
||||||
static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
|
static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
|
||||||
{
|
{
|
||||||
|
struct scrub_dev *sdev = sbio->sdev;
|
||||||
|
u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
|
||||||
|
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
if (sbio->err) {
|
if (sbio->err) {
|
||||||
if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
|
if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
|
||||||
(sbio->physical + ix * PAGE_SIZE) >> 9,
|
|
||||||
sbio->bio->bi_io_vec[ix].bv_page) == 0) {
|
sbio->bio->bi_io_vec[ix].bv_page) == 0) {
|
||||||
if (scrub_fixup_check(sbio, ix) == 0)
|
if (scrub_fixup_check(sbio, ix) == 0)
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
if (__ratelimit(&_rs))
|
||||||
|
scrub_print_warning("i/o error", sbio, ix);
|
||||||
|
} else {
|
||||||
|
if (__ratelimit(&_rs))
|
||||||
|
scrub_print_warning("checksum error", sbio, ix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.read_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
|
||||||
scrub_fixup(sbio, ix);
|
scrub_fixup(sbio, ix);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
|
static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
|
||||||
|
@ -247,7 +611,8 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
|
||||||
struct scrub_dev *sdev = sbio->sdev;
|
struct scrub_dev *sdev = sbio->sdev;
|
||||||
struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
|
struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
|
||||||
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
||||||
struct btrfs_multi_bio *multi = NULL;
|
struct btrfs_bio *bbio = NULL;
|
||||||
|
struct scrub_fixup_nodatasum *fixup;
|
||||||
u64 logical = sbio->logical + ix * PAGE_SIZE;
|
u64 logical = sbio->logical + ix * PAGE_SIZE;
|
||||||
u64 length;
|
u64 length;
|
||||||
int i;
|
int i;
|
||||||
|
@ -256,18 +621,36 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
|
||||||
|
|
||||||
if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
|
if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
|
||||||
(sbio->spag[ix].have_csum == 0)) {
|
(sbio->spag[ix].have_csum == 0)) {
|
||||||
/*
|
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
|
||||||
* nodatasum, don't try to fix anything
|
if (!fixup)
|
||||||
* FIXME: we can do better, open the inode and trigger a
|
|
||||||
* writeback
|
|
||||||
*/
|
|
||||||
goto uncorrectable;
|
goto uncorrectable;
|
||||||
|
fixup->sdev = sdev;
|
||||||
|
fixup->logical = logical;
|
||||||
|
fixup->root = fs_info->extent_root;
|
||||||
|
fixup->mirror_num = sbio->spag[ix].mirror_num;
|
||||||
|
/*
|
||||||
|
* increment scrubs_running to prevent cancel requests from
|
||||||
|
* completing as long as a fixup worker is running. we must also
|
||||||
|
* increment scrubs_paused to prevent deadlocking on pause
|
||||||
|
* requests used for transactions commits (as the worker uses a
|
||||||
|
* transaction context). it is safe to regard the fixup worker
|
||||||
|
* as paused for all matters practical. effectively, we only
|
||||||
|
* avoid cancellation requests from completing.
|
||||||
|
*/
|
||||||
|
mutex_lock(&fs_info->scrub_lock);
|
||||||
|
atomic_inc(&fs_info->scrubs_running);
|
||||||
|
atomic_inc(&fs_info->scrubs_paused);
|
||||||
|
mutex_unlock(&fs_info->scrub_lock);
|
||||||
|
atomic_inc(&sdev->fixup_cnt);
|
||||||
|
fixup->work.func = scrub_fixup_nodatasum;
|
||||||
|
btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
length = PAGE_SIZE;
|
length = PAGE_SIZE;
|
||||||
ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
|
ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
|
||||||
&multi, 0);
|
&bbio, 0);
|
||||||
if (ret || !multi || length < PAGE_SIZE) {
|
if (ret || !bbio || length < PAGE_SIZE) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"scrub_fixup: btrfs_map_block failed us for %llu\n",
|
"scrub_fixup: btrfs_map_block failed us for %llu\n",
|
||||||
(unsigned long long)logical);
|
(unsigned long long)logical);
|
||||||
|
@ -275,19 +658,19 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (multi->num_stripes == 1)
|
if (bbio->num_stripes == 1)
|
||||||
/* there aren't any replicas */
|
/* there aren't any replicas */
|
||||||
goto uncorrectable;
|
goto uncorrectable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* first find a good copy
|
* first find a good copy
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < multi->num_stripes; ++i) {
|
for (i = 0; i < bbio->num_stripes; ++i) {
|
||||||
if (i == sbio->spag[ix].mirror_num)
|
if (i + 1 == sbio->spag[ix].mirror_num)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
|
if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
|
||||||
multi->stripes[i].physical >> 9,
|
bbio->stripes[i].physical >> 9,
|
||||||
sbio->bio->bi_io_vec[ix].bv_page)) {
|
sbio->bio->bi_io_vec[ix].bv_page)) {
|
||||||
/* I/O-error, this is not a good copy */
|
/* I/O-error, this is not a good copy */
|
||||||
continue;
|
continue;
|
||||||
|
@ -296,7 +679,7 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
|
||||||
if (scrub_fixup_check(sbio, ix) == 0)
|
if (scrub_fixup_check(sbio, ix) == 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == multi->num_stripes)
|
if (i == bbio->num_stripes)
|
||||||
goto uncorrectable;
|
goto uncorrectable;
|
||||||
|
|
||||||
if (!sdev->readonly) {
|
if (!sdev->readonly) {
|
||||||
|
@ -311,25 +694,23 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(multi);
|
kfree(bbio);
|
||||||
spin_lock(&sdev->stat_lock);
|
spin_lock(&sdev->stat_lock);
|
||||||
++sdev->stat.corrected_errors;
|
++sdev->stat.corrected_errors;
|
||||||
spin_unlock(&sdev->stat_lock);
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
|
||||||
if (printk_ratelimit())
|
printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
|
||||||
printk(KERN_ERR "btrfs: fixed up at %llu\n",
|
|
||||||
(unsigned long long)logical);
|
(unsigned long long)logical);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
uncorrectable:
|
uncorrectable:
|
||||||
kfree(multi);
|
kfree(bbio);
|
||||||
spin_lock(&sdev->stat_lock);
|
spin_lock(&sdev->stat_lock);
|
||||||
++sdev->stat.uncorrectable_errors;
|
++sdev->stat.uncorrectable_errors;
|
||||||
spin_unlock(&sdev->stat_lock);
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
|
||||||
if (printk_ratelimit())
|
printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
|
||||||
printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
|
"logical %llu\n", (unsigned long long)logical);
|
||||||
(unsigned long long)logical);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
|
static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
|
||||||
|
@ -379,8 +760,14 @@ static void scrub_checksum(struct btrfs_work *work)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (sbio->err) {
|
if (sbio->err) {
|
||||||
|
ret = 0;
|
||||||
for (i = 0; i < sbio->count; ++i)
|
for (i = 0; i < sbio->count; ++i)
|
||||||
scrub_recheck_error(sbio, i);
|
ret |= scrub_recheck_error(sbio, i);
|
||||||
|
if (!ret) {
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.unverified_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
}
|
||||||
|
|
||||||
sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
||||||
sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
|
sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
|
||||||
|
@ -393,10 +780,6 @@ static void scrub_checksum(struct btrfs_work *work)
|
||||||
bi->bv_offset = 0;
|
bi->bv_offset = 0;
|
||||||
bi->bv_len = PAGE_SIZE;
|
bi->bv_len = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&sdev->stat_lock);
|
|
||||||
++sdev->stat.read_errors;
|
|
||||||
spin_unlock(&sdev->stat_lock);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
for (i = 0; i < sbio->count; ++i) {
|
for (i = 0; i < sbio->count; ++i) {
|
||||||
|
@ -417,8 +800,14 @@ static void scrub_checksum(struct btrfs_work *work)
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
}
|
}
|
||||||
kunmap_atomic(buffer, KM_USER0);
|
kunmap_atomic(buffer, KM_USER0);
|
||||||
if (ret)
|
if (ret) {
|
||||||
scrub_recheck_error(sbio, i);
|
ret = scrub_recheck_error(sbio, i);
|
||||||
|
if (!ret) {
|
||||||
|
spin_lock(&sdev->stat_lock);
|
||||||
|
++sdev->stat.unverified_errors;
|
||||||
|
spin_unlock(&sdev->stat_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -601,7 +990,7 @@ nomem:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
|
static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
|
||||||
u64 physical, u64 flags, u64 gen, u64 mirror_num,
|
u64 physical, u64 flags, u64 gen, int mirror_num,
|
||||||
u8 *csum, int force)
|
u8 *csum, int force)
|
||||||
{
|
{
|
||||||
struct scrub_bio *sbio;
|
struct scrub_bio *sbio;
|
||||||
|
@ -698,7 +1087,7 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
|
||||||
|
|
||||||
/* scrub extent tries to collect up to 64 kB for each bio */
|
/* scrub extent tries to collect up to 64 kB for each bio */
|
||||||
static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
|
static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
|
||||||
u64 physical, u64 flags, u64 gen, u64 mirror_num)
|
u64 physical, u64 flags, u64 gen, int mirror_num)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u8 csum[BTRFS_CSUM_SIZE];
|
u8 csum[BTRFS_CSUM_SIZE];
|
||||||
|
@ -743,7 +1132,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
|
||||||
u64 physical;
|
u64 physical;
|
||||||
u64 logical;
|
u64 logical;
|
||||||
u64 generation;
|
u64 generation;
|
||||||
u64 mirror_num;
|
int mirror_num;
|
||||||
struct reada_control *reada1;
|
struct reada_control *reada1;
|
||||||
struct reada_control *reada2;
|
struct reada_control *reada2;
|
||||||
struct btrfs_key key_start;
|
struct btrfs_key key_start;
|
||||||
|
@ -758,21 +1147,21 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
|
||||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||||
offset = map->stripe_len * num;
|
offset = map->stripe_len * num;
|
||||||
increment = map->stripe_len * map->num_stripes;
|
increment = map->stripe_len * map->num_stripes;
|
||||||
mirror_num = 0;
|
mirror_num = 1;
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||||
int factor = map->num_stripes / map->sub_stripes;
|
int factor = map->num_stripes / map->sub_stripes;
|
||||||
offset = map->stripe_len * (num / map->sub_stripes);
|
offset = map->stripe_len * (num / map->sub_stripes);
|
||||||
increment = map->stripe_len * factor;
|
increment = map->stripe_len * factor;
|
||||||
mirror_num = num % map->sub_stripes;
|
mirror_num = num % map->sub_stripes + 1;
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
||||||
increment = map->stripe_len;
|
increment = map->stripe_len;
|
||||||
mirror_num = num % map->num_stripes;
|
mirror_num = num % map->num_stripes + 1;
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
||||||
increment = map->stripe_len;
|
increment = map->stripe_len;
|
||||||
mirror_num = num % map->num_stripes;
|
mirror_num = num % map->num_stripes + 1;
|
||||||
} else {
|
} else {
|
||||||
increment = map->stripe_len;
|
increment = map->stripe_len;
|
||||||
mirror_num = 0;
|
mirror_num = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
@ -1241,10 +1630,11 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
|
||||||
ret = scrub_enumerate_chunks(sdev, start, end);
|
ret = scrub_enumerate_chunks(sdev, start, end);
|
||||||
|
|
||||||
wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
|
wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
|
||||||
|
|
||||||
atomic_dec(&fs_info->scrubs_running);
|
atomic_dec(&fs_info->scrubs_running);
|
||||||
wake_up(&fs_info->scrub_pause_wait);
|
wake_up(&fs_info->scrub_pause_wait);
|
||||||
|
|
||||||
|
wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
|
||||||
|
|
||||||
if (progress)
|
if (progress)
|
||||||
memcpy(progress, &sdev->stat, sizeof(*progress));
|
memcpy(progress, &sdev->stat, sizeof(*progress));
|
||||||
|
|
||||||
|
|
|
@ -2880,7 +2880,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
|
||||||
|
|
||||||
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
u64 logical, u64 *length,
|
u64 logical, u64 *length,
|
||||||
struct btrfs_multi_bio **multi_ret,
|
struct btrfs_bio **bbio_ret,
|
||||||
int mirror_num)
|
int mirror_num)
|
||||||
{
|
{
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
|
@ -2898,18 +2898,18 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
int i;
|
int i;
|
||||||
int num_stripes;
|
int num_stripes;
|
||||||
int max_errors = 0;
|
int max_errors = 0;
|
||||||
struct btrfs_multi_bio *multi = NULL;
|
struct btrfs_bio *bbio = NULL;
|
||||||
|
|
||||||
if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
|
if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
|
||||||
stripes_allocated = 1;
|
stripes_allocated = 1;
|
||||||
again:
|
again:
|
||||||
if (multi_ret) {
|
if (bbio_ret) {
|
||||||
multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
|
bbio = kzalloc(btrfs_bio_size(stripes_allocated),
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
if (!multi)
|
if (!bbio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
atomic_set(&multi->error, 0);
|
atomic_set(&bbio->error, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
read_lock(&em_tree->lock);
|
read_lock(&em_tree->lock);
|
||||||
|
@ -2930,7 +2930,7 @@ again:
|
||||||
if (mirror_num > map->num_stripes)
|
if (mirror_num > map->num_stripes)
|
||||||
mirror_num = 0;
|
mirror_num = 0;
|
||||||
|
|
||||||
/* if our multi bio struct is too small, back off and try again */
|
/* if our btrfs_bio struct is too small, back off and try again */
|
||||||
if (rw & REQ_WRITE) {
|
if (rw & REQ_WRITE) {
|
||||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||||
BTRFS_BLOCK_GROUP_DUP)) {
|
BTRFS_BLOCK_GROUP_DUP)) {
|
||||||
|
@ -2949,11 +2949,11 @@ again:
|
||||||
stripes_required = map->num_stripes;
|
stripes_required = map->num_stripes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
|
if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
|
||||||
stripes_allocated < stripes_required) {
|
stripes_allocated < stripes_required) {
|
||||||
stripes_allocated = map->num_stripes;
|
stripes_allocated = map->num_stripes;
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
kfree(multi);
|
kfree(bbio);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
stripe_nr = offset;
|
stripe_nr = offset;
|
||||||
|
@ -2982,7 +2982,7 @@ again:
|
||||||
*length = em->len - offset;
|
*length = em->len - offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!multi_ret)
|
if (!bbio_ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
num_stripes = 1;
|
num_stripes = 1;
|
||||||
|
@ -3007,13 +3007,17 @@ again:
|
||||||
stripe_index = find_live_mirror(map, 0,
|
stripe_index = find_live_mirror(map, 0,
|
||||||
map->num_stripes,
|
map->num_stripes,
|
||||||
current->pid % map->num_stripes);
|
current->pid % map->num_stripes);
|
||||||
|
mirror_num = stripe_index + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
||||||
if (rw & (REQ_WRITE | REQ_DISCARD))
|
if (rw & (REQ_WRITE | REQ_DISCARD)) {
|
||||||
num_stripes = map->num_stripes;
|
num_stripes = map->num_stripes;
|
||||||
else if (mirror_num)
|
} else if (mirror_num) {
|
||||||
stripe_index = mirror_num - 1;
|
stripe_index = mirror_num - 1;
|
||||||
|
} else {
|
||||||
|
mirror_num = 1;
|
||||||
|
}
|
||||||
|
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||||
int factor = map->num_stripes / map->sub_stripes;
|
int factor = map->num_stripes / map->sub_stripes;
|
||||||
|
@ -3033,6 +3037,7 @@ again:
|
||||||
stripe_index = find_live_mirror(map, stripe_index,
|
stripe_index = find_live_mirror(map, stripe_index,
|
||||||
map->sub_stripes, stripe_index +
|
map->sub_stripes, stripe_index +
|
||||||
current->pid % map->sub_stripes);
|
current->pid % map->sub_stripes);
|
||||||
|
mirror_num = stripe_index + 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -3041,15 +3046,16 @@ again:
|
||||||
* stripe_index is the number of our device in the stripe array
|
* stripe_index is the number of our device in the stripe array
|
||||||
*/
|
*/
|
||||||
stripe_index = do_div(stripe_nr, map->num_stripes);
|
stripe_index = do_div(stripe_nr, map->num_stripes);
|
||||||
|
mirror_num = stripe_index + 1;
|
||||||
}
|
}
|
||||||
BUG_ON(stripe_index >= map->num_stripes);
|
BUG_ON(stripe_index >= map->num_stripes);
|
||||||
|
|
||||||
if (rw & REQ_DISCARD) {
|
if (rw & REQ_DISCARD) {
|
||||||
for (i = 0; i < num_stripes; i++) {
|
for (i = 0; i < num_stripes; i++) {
|
||||||
multi->stripes[i].physical =
|
bbio->stripes[i].physical =
|
||||||
map->stripes[stripe_index].physical +
|
map->stripes[stripe_index].physical +
|
||||||
stripe_offset + stripe_nr * map->stripe_len;
|
stripe_offset + stripe_nr * map->stripe_len;
|
||||||
multi->stripes[i].dev = map->stripes[stripe_index].dev;
|
bbio->stripes[i].dev = map->stripes[stripe_index].dev;
|
||||||
|
|
||||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||||
u64 stripes;
|
u64 stripes;
|
||||||
|
@ -3070,16 +3076,16 @@ again:
|
||||||
}
|
}
|
||||||
stripes = stripe_nr_end - 1 - j;
|
stripes = stripe_nr_end - 1 - j;
|
||||||
do_div(stripes, map->num_stripes);
|
do_div(stripes, map->num_stripes);
|
||||||
multi->stripes[i].length = map->stripe_len *
|
bbio->stripes[i].length = map->stripe_len *
|
||||||
(stripes - stripe_nr + 1);
|
(stripes - stripe_nr + 1);
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
multi->stripes[i].length -=
|
bbio->stripes[i].length -=
|
||||||
stripe_offset;
|
stripe_offset;
|
||||||
stripe_offset = 0;
|
stripe_offset = 0;
|
||||||
}
|
}
|
||||||
if (stripe_index == last_stripe)
|
if (stripe_index == last_stripe)
|
||||||
multi->stripes[i].length -=
|
bbio->stripes[i].length -=
|
||||||
stripe_end_offset;
|
stripe_end_offset;
|
||||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||||
u64 stripes;
|
u64 stripes;
|
||||||
|
@ -3104,11 +3110,11 @@ again:
|
||||||
}
|
}
|
||||||
stripes = stripe_nr_end - 1 - j;
|
stripes = stripe_nr_end - 1 - j;
|
||||||
do_div(stripes, factor);
|
do_div(stripes, factor);
|
||||||
multi->stripes[i].length = map->stripe_len *
|
bbio->stripes[i].length = map->stripe_len *
|
||||||
(stripes - stripe_nr + 1);
|
(stripes - stripe_nr + 1);
|
||||||
|
|
||||||
if (i < map->sub_stripes) {
|
if (i < map->sub_stripes) {
|
||||||
multi->stripes[i].length -=
|
bbio->stripes[i].length -=
|
||||||
stripe_offset;
|
stripe_offset;
|
||||||
if (i == map->sub_stripes - 1)
|
if (i == map->sub_stripes - 1)
|
||||||
stripe_offset = 0;
|
stripe_offset = 0;
|
||||||
|
@ -3116,11 +3122,11 @@ again:
|
||||||
if (stripe_index >= last_stripe &&
|
if (stripe_index >= last_stripe &&
|
||||||
stripe_index <= (last_stripe +
|
stripe_index <= (last_stripe +
|
||||||
map->sub_stripes - 1)) {
|
map->sub_stripes - 1)) {
|
||||||
multi->stripes[i].length -=
|
bbio->stripes[i].length -=
|
||||||
stripe_end_offset;
|
stripe_end_offset;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
multi->stripes[i].length = *length;
|
bbio->stripes[i].length = *length;
|
||||||
|
|
||||||
stripe_index++;
|
stripe_index++;
|
||||||
if (stripe_index == map->num_stripes) {
|
if (stripe_index == map->num_stripes) {
|
||||||
|
@ -3131,19 +3137,20 @@ again:
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < num_stripes; i++) {
|
for (i = 0; i < num_stripes; i++) {
|
||||||
multi->stripes[i].physical =
|
bbio->stripes[i].physical =
|
||||||
map->stripes[stripe_index].physical +
|
map->stripes[stripe_index].physical +
|
||||||
stripe_offset +
|
stripe_offset +
|
||||||
stripe_nr * map->stripe_len;
|
stripe_nr * map->stripe_len;
|
||||||
multi->stripes[i].dev =
|
bbio->stripes[i].dev =
|
||||||
map->stripes[stripe_index].dev;
|
map->stripes[stripe_index].dev;
|
||||||
stripe_index++;
|
stripe_index++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (multi_ret) {
|
if (bbio_ret) {
|
||||||
*multi_ret = multi;
|
*bbio_ret = bbio;
|
||||||
multi->num_stripes = num_stripes;
|
bbio->num_stripes = num_stripes;
|
||||||
multi->max_errors = max_errors;
|
bbio->max_errors = max_errors;
|
||||||
|
bbio->mirror_num = mirror_num;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
|
@ -3152,9 +3159,9 @@ out:
|
||||||
|
|
||||||
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
u64 logical, u64 *length,
|
u64 logical, u64 *length,
|
||||||
struct btrfs_multi_bio **multi_ret, int mirror_num)
|
struct btrfs_bio **bbio_ret, int mirror_num)
|
||||||
{
|
{
|
||||||
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
|
return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
|
||||||
mirror_num);
|
mirror_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3223,28 +3230,30 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void end_bio_multi_stripe(struct bio *bio, int err)
|
static void btrfs_end_bio(struct bio *bio, int err)
|
||||||
{
|
{
|
||||||
struct btrfs_multi_bio *multi = bio->bi_private;
|
struct btrfs_bio *bbio = bio->bi_private;
|
||||||
int is_orig_bio = 0;
|
int is_orig_bio = 0;
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
atomic_inc(&multi->error);
|
atomic_inc(&bbio->error);
|
||||||
|
|
||||||
if (bio == multi->orig_bio)
|
if (bio == bbio->orig_bio)
|
||||||
is_orig_bio = 1;
|
is_orig_bio = 1;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&multi->stripes_pending)) {
|
if (atomic_dec_and_test(&bbio->stripes_pending)) {
|
||||||
if (!is_orig_bio) {
|
if (!is_orig_bio) {
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
bio = multi->orig_bio;
|
bio = bbio->orig_bio;
|
||||||
}
|
}
|
||||||
bio->bi_private = multi->private;
|
bio->bi_private = bbio->private;
|
||||||
bio->bi_end_io = multi->end_io;
|
bio->bi_end_io = bbio->end_io;
|
||||||
|
bio->bi_bdev = (struct block_device *)
|
||||||
|
(unsigned long)bbio->mirror_num;
|
||||||
/* only send an error to the higher layers if it is
|
/* only send an error to the higher layers if it is
|
||||||
* beyond the tolerance of the multi-bio
|
* beyond the tolerance of the multi-bio
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&multi->error) > multi->max_errors) {
|
if (atomic_read(&bbio->error) > bbio->max_errors) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
} else if (err) {
|
} else if (err) {
|
||||||
/*
|
/*
|
||||||
|
@ -3254,7 +3263,7 @@ static void end_bio_multi_stripe(struct bio *bio, int err)
|
||||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
kfree(multi);
|
kfree(bbio);
|
||||||
|
|
||||||
bio_endio(bio, err);
|
bio_endio(bio, err);
|
||||||
} else if (!is_orig_bio) {
|
} else if (!is_orig_bio) {
|
||||||
|
@ -3334,20 +3343,20 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||||
u64 logical = (u64)bio->bi_sector << 9;
|
u64 logical = (u64)bio->bi_sector << 9;
|
||||||
u64 length = 0;
|
u64 length = 0;
|
||||||
u64 map_length;
|
u64 map_length;
|
||||||
struct btrfs_multi_bio *multi = NULL;
|
|
||||||
int ret;
|
int ret;
|
||||||
int dev_nr = 0;
|
int dev_nr = 0;
|
||||||
int total_devs = 1;
|
int total_devs = 1;
|
||||||
|
struct btrfs_bio *bbio = NULL;
|
||||||
|
|
||||||
length = bio->bi_size;
|
length = bio->bi_size;
|
||||||
map_tree = &root->fs_info->mapping_tree;
|
map_tree = &root->fs_info->mapping_tree;
|
||||||
map_length = length;
|
map_length = length;
|
||||||
|
|
||||||
ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
|
ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
|
||||||
mirror_num);
|
mirror_num);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
total_devs = multi->num_stripes;
|
total_devs = bbio->num_stripes;
|
||||||
if (map_length < length) {
|
if (map_length < length) {
|
||||||
printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
|
printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
|
||||||
"len %llu\n", (unsigned long long)logical,
|
"len %llu\n", (unsigned long long)logical,
|
||||||
|
@ -3355,25 +3364,28 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||||
(unsigned long long)map_length);
|
(unsigned long long)map_length);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
multi->end_io = first_bio->bi_end_io;
|
|
||||||
multi->private = first_bio->bi_private;
|
bbio->orig_bio = first_bio;
|
||||||
multi->orig_bio = first_bio;
|
bbio->private = first_bio->bi_private;
|
||||||
atomic_set(&multi->stripes_pending, multi->num_stripes);
|
bbio->end_io = first_bio->bi_end_io;
|
||||||
|
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||||
|
|
||||||
while (dev_nr < total_devs) {
|
while (dev_nr < total_devs) {
|
||||||
if (total_devs > 1) {
|
|
||||||
if (dev_nr < total_devs - 1) {
|
if (dev_nr < total_devs - 1) {
|
||||||
bio = bio_clone(first_bio, GFP_NOFS);
|
bio = bio_clone(first_bio, GFP_NOFS);
|
||||||
BUG_ON(!bio);
|
BUG_ON(!bio);
|
||||||
} else {
|
} else {
|
||||||
bio = first_bio;
|
bio = first_bio;
|
||||||
}
|
}
|
||||||
bio->bi_private = multi;
|
bio->bi_private = bbio;
|
||||||
bio->bi_end_io = end_bio_multi_stripe;
|
bio->bi_end_io = btrfs_end_bio;
|
||||||
}
|
bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
|
||||||
bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
|
dev = bbio->stripes[dev_nr].dev;
|
||||||
dev = multi->stripes[dev_nr].dev;
|
|
||||||
if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
|
if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
|
||||||
|
pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
|
||||||
|
"(%s id %llu), size=%u\n", rw,
|
||||||
|
(u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
|
||||||
|
dev->name, dev->devid, bio->bi_size);
|
||||||
bio->bi_bdev = dev->bdev;
|
bio->bi_bdev = dev->bdev;
|
||||||
if (async_submit)
|
if (async_submit)
|
||||||
schedule_bio(root, dev, rw, bio);
|
schedule_bio(root, dev, rw, bio);
|
||||||
|
@ -3386,8 +3398,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||||
}
|
}
|
||||||
dev_nr++;
|
dev_nr++;
|
||||||
}
|
}
|
||||||
if (total_devs == 1)
|
|
||||||
kfree(multi);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,10 @@ struct btrfs_bio_stripe {
|
||||||
u64 length; /* only used for discard mappings */
|
u64 length; /* only used for discard mappings */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct btrfs_multi_bio {
|
struct btrfs_bio;
|
||||||
|
typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
|
||||||
|
|
||||||
|
struct btrfs_bio {
|
||||||
atomic_t stripes_pending;
|
atomic_t stripes_pending;
|
||||||
bio_end_io_t *end_io;
|
bio_end_io_t *end_io;
|
||||||
struct bio *orig_bio;
|
struct bio *orig_bio;
|
||||||
|
@ -152,6 +155,7 @@ struct btrfs_multi_bio {
|
||||||
atomic_t error;
|
atomic_t error;
|
||||||
int max_errors;
|
int max_errors;
|
||||||
int num_stripes;
|
int num_stripes;
|
||||||
|
int mirror_num;
|
||||||
struct btrfs_bio_stripe stripes[];
|
struct btrfs_bio_stripe stripes[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -179,7 +183,7 @@ struct map_lookup {
|
||||||
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
|
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
|
||||||
u64 end, u64 *length);
|
u64 end, u64 *length);
|
||||||
|
|
||||||
#define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
|
#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \
|
||||||
(sizeof(struct btrfs_bio_stripe) * (n)))
|
(sizeof(struct btrfs_bio_stripe) * (n)))
|
||||||
|
|
||||||
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
||||||
|
@ -188,7 +192,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
||||||
u64 chunk_offset, u64 start, u64 num_bytes);
|
u64 chunk_offset, u64 start, u64 num_bytes);
|
||||||
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
u64 logical, u64 *length,
|
u64 logical, u64 *length,
|
||||||
struct btrfs_multi_bio **multi_ret, int mirror_num);
|
struct btrfs_bio **bbio_ret, int mirror_num);
|
||||||
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||||
u64 chunk_start, u64 physical, u64 devid,
|
u64 chunk_start, u64 physical, u64 devid,
|
||||||
u64 **logical, int *naddrs, int *stripe_len);
|
u64 **logical, int *naddrs, int *stripe_len);
|
||||||
|
|
Loading…
Add table
Reference in a new issue