summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorJosef Bacik2012-08-31 02:06:49 +0200
committerChris Mason2012-10-01 21:19:09 +0200
commit7014cdb49305eda0767d2ae6136f8c191ea8fd81 (patch)
treec13ca983bdb7ffdabf2b4bd3bade52ab431904a5 /fs/btrfs/file.c
parentBtrfs: do not take cleanup_work_sem in btrfs_run_delayed_iputs() (diff)
downloadkernel-qcow2-linux-7014cdb49305eda0767d2ae6136f8c191ea8fd81.tar.gz
kernel-qcow2-linux-7014cdb49305eda0767d2ae6136f8c191ea8fd81.tar.xz
kernel-qcow2-linux-7014cdb49305eda0767d2ae6136f8c191ea8fd81.zip
Btrfs: btrfs_drop_extent_cache should never fail
I noticed this when I was doing the fsync stuff, we allocate split extents if we drop an extent range that is in the middle of an existing extent. This BUG()'s if we fail to allocate memory, but the fact is this is just a cache, we will just regenerate the cache if we need it, the important part is that we free the range we are given. This can be done without allocations, so if we fail to allocate splits just skip the splitting stage and free our em and look for more extents to drop. This also makes btrfs_drop_extent_cache a void since nobody was checking the return value anyway. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 57026a6e9c94..a50e98733e28 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -459,8 +459,8 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
* this drops all the extents in the cache that intersect the range
* [start, end]. Existing extents are split as required.
*/
-int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
- int skip_pinned)
+void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+ int skip_pinned)
{
struct extent_map *em;
struct extent_map *split = NULL;
@@ -479,11 +479,14 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
testend = 0;
}
while (1) {
+ int no_splits = 0;
+
if (!split)
split = alloc_extent_map();
if (!split2)
split2 = alloc_extent_map();
- BUG_ON(!split || !split2); /* -ENOMEM */
+ if (!split || !split2)
+ no_splits = 1;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
@@ -509,6 +512,8 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
remove_extent_mapping(em_tree, em);
+ if (no_splits)
+ goto next;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
em->start < start) {
@@ -559,6 +564,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map(split);
split = NULL;
}
+next:
write_unlock(&em_tree->lock);
/* once for us */
@@ -570,7 +576,6 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map(split);
if (split2)
free_extent_map(split2);
- return 0;
}
/*