summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason2008-04-17 17:29:12 +0200
committerChris Mason2008-09-25 17:04:01 +0200
commit3b951516ed703af0f6d82053937655ad69b60864 (patch)
tree616e648b8683e6bb2996c7ce6bafb8f592244698 /fs/btrfs/file.c
parentBtrfs: Only do async bio submission for pdflush (diff)
downloadkernel-qcow2-linux-3b951516ed703af0f6d82053937655ad69b60864.tar.gz
kernel-qcow2-linux-3b951516ed703af0f6d82053937655ad69b60864.tar.xz
kernel-qcow2-linux-3b951516ed703af0f6d82053937655ad69b60864.zip
Btrfs: Use the extent map cache to find the logical disk block during data retries
The data read retry code needs to find the logical disk block before it can resubmit new bios. But, finding this block isn't allowed to take the fs_mutex because that will deadlock with a number of different callers. This changes the retry code to use the extent map cache instead, but that requires the extent map cache to have the extent we're looking for. This is a problem because btrfs_drop_extent_cache just drops the entire extent instead of the little tiny part it is invalidating. The bulk of the code in this patch changes btrfs_drop_extent_cache to invalidate only a portion of the extent cache, and changes btrfs_get_extent to deal with the results. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9fbda6552069..3f5525f0834c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -356,12 +356,23 @@ out_unlock:
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
{
struct extent_map *em;
+ struct extent_map *split = NULL;
+ struct extent_map *split2 = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 len = end - start + 1;
+ int ret;
+ int testend = 1;
- if (end == (u64)-1)
+ if (end == (u64)-1) {
len = (u64)-1;
+ testend = 0;
+ }
while(1) {
+ if (!split)
+ split = alloc_extent_map(GFP_NOFS);
+ if (!split2)
+ split2 = alloc_extent_map(GFP_NOFS);
+
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (!em) {
@@ -369,6 +380,36 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
break;
}
remove_extent_mapping(em_tree, em);
+
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ em->start < start) {
+ split->start = em->start;
+ split->len = start - em->start;
+ split->block_start = em->block_start;
+ split->bdev = em->bdev;
+ split->flags = em->flags;
+ ret = add_extent_mapping(em_tree, split);
+ BUG_ON(ret);
+ free_extent_map(split);
+ split = split2;
+ split2 = NULL;
+ }
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ testend && em->start + em->len > start + len) {
+ u64 diff = start + len - em->start;
+
+ split->start = start + len;
+ split->len = em->start + em->len - (start + len);
+ split->bdev = em->bdev;
+ split->flags = em->flags;
+
+ split->block_start = em->block_start + diff;
+
+ ret = add_extent_mapping(em_tree, split);
+ BUG_ON(ret);
+ free_extent_map(split);
+ split = NULL;
+ }
spin_unlock(&em_tree->lock);
/* once for us */
@@ -376,6 +417,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
/* once for the tree*/
free_extent_map(em);
}
+ if (split)
+ free_extent_map(split);
+ if (split2)
+ free_extent_map(split2);
return 0;
}