summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFilipe Manana2014-03-11 14:56:15 +0100
committerChris Mason2014-03-21 01:15:27 +0100
commit308d9800b2c4f1fb344dbf055912d3140438bac0 (patch)
tree24c94bc9e5d7aa3394a35d767f78f245e2922a58
parentBtrfs: fix deadlock with nested trans handles (diff)
downloadkernel-qcow2-linux-308d9800b2c4f1fb344dbf055912d3140438bac0.tar.gz
kernel-qcow2-linux-308d9800b2c4f1fb344dbf055912d3140438bac0.tar.xz
kernel-qcow2-linux-308d9800b2c4f1fb344dbf055912d3140438bac0.zip
Btrfs: cache extent states in defrag code path
When locking file ranges in the inode's io_tree, cache the first extent state that belongs to the target range, so that when unlocking the range we don't need to search in the io_tree again, reducing cpu time and making and therefore holding the io_tree's lock for a shorter period. Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com> Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r--fs/btrfs/ioctl.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index e1747701f520..3ad5c10d3704 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -986,10 +986,13 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
read_unlock(&em_tree->lock);
if (!em) {
+ struct extent_state *cached = NULL;
+ u64 end = start + len - 1;
+
/* get the big lock and read metadata off disk */
- lock_extent(io_tree, start, start + len - 1);
+ lock_extent_bits(io_tree, start, end, 0, &cached);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- unlock_extent(io_tree, start, start + len - 1);
+ unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
if (IS_ERR(em))
return NULL;
@@ -1128,10 +1131,12 @@ again:
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
while (1) {
- lock_extent(tree, page_start, page_end);
+ lock_extent_bits(tree, page_start, page_end,
+ 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode,
page_start);
- unlock_extent(tree, page_start, page_end);
+ unlock_extent_cached(tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
if (!ordered)
break;