summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorChris Mason2008-07-24 15:41:53 +0200
committerChris Mason2008-09-25 17:04:05 +0200
commit89642229a582a5c2b6d2ed8ec16986387d9a9047 (patch)
tree6ece7a9b7d92159096229e27b4862c3685f198f6 /fs/btrfs/inode.c
parentBtrfs: Fix 32 bit compiles by using an unsigned long byte count in the ordere... (diff)
downloadkernel-qcow2-linux-89642229a582a5c2b6d2ed8ec16986387d9a9047.tar.gz
kernel-qcow2-linux-89642229a582a5c2b6d2ed8ec16986387d9a9047.tar.xz
kernel-qcow2-linux-89642229a582a5c2b6d2ed8ec16986387d9a9047.zip
Btrfs: Search data ordered extents first for checksums on read
Checksum items are not inserted into the tree until all of the io from a given extent is complete. This means one dirty page from an extent may be written, freed, and then read again before the entire extent is on disk and the checksum item is inserted. The checksums themselves are stored in the ordered extent so they can be inserted in bulk when IO is complete. On read, if a checksum item isn't found, the ordered extents were being searched for a checksum record. This all worked most of the time, but the checksum insertion code tries to reduce the number of tree operations by pre-inserting checksum items based on i_size and a few other factors. This means the read code might find a checksum item that hasn't yet really been filled in. This commit changes things to check the ordered extents first and only dive into the btree if nothing was found. This removes the need for extra locking and is more reliable. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 28e667052ec3..0e90315ea803 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -611,22 +611,25 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
btrfs_test_flag(inode, NODATASUM))
return 0;
+ /*
+ * It is possible there is an ordered extent that has
+ * not yet finished for this range in the file. If so,
+ * that extent will have a csum cached, and it will insert
+ * the sum after all the blocks in the extent are fully
+ * on disk. So, look for an ordered extent and use the
+ * sum if found. We have to do this before looking in the
+ * btree because csum items are pre-inserted based on
+ * the file size. btrfs_lookup_csum might find an item
+ * that still hasn't been fully filled.
+ */
+ ret = btrfs_find_ordered_sum(inode, start, &csum);
+ if (ret == 0)
+ goto found;
+
+ ret = 0;
path = btrfs_alloc_path();
- mutex_lock(&BTRFS_I(inode)->csum_mutex);
item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
if (IS_ERR(item)) {
- /*
- * It is possible there is an ordered extent that has
- * not yet finished for this range in the file. If so,
- * that extent will have a csum cached, and it will insert
- * the sum after all the blocks in the extent are fully
- * on disk. So, look for an ordered extent and use the
- * sum if found.
- */
- ret = btrfs_find_ordered_sum(inode, start, &csum);
- if (ret == 0)
- goto found;
-
ret = PTR_ERR(item);
/* a csum that isn't present is a preallocated region. */
if (ret == -ENOENT || ret == -EFBIG)
@@ -641,7 +644,6 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
found:
set_state_private(io_tree, start, csum);
out:
- mutex_unlock(&BTRFS_I(inode)->csum_mutex);
if (path)
btrfs_free_path(path);
return ret;
@@ -1375,7 +1377,7 @@ again:
}
if (!PageUptodate(page)) {
ret = -EIO;
- goto out;
+ goto out_unlock;
}
}
wait_on_page_writeback(page);
@@ -1406,6 +1408,7 @@ again:
set_page_dirty(page);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+out_unlock:
unlock_page(page);
page_cache_release(page);
out: