summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik2013-03-25 21:03:35 +0100
committerJosef Bacik2013-03-28 14:51:26 +0100
commitf4881bc7a83eff263789dd524b7c269d138d4af5 (patch)
treed305caf38f58f140b952e14fa707964c905356b0
parentBtrfs: fix EIO from btrfs send in is_extent_unchanged for punched holes (diff)
downloadkernel-qcow2-linux-f4881bc7a83eff263789dd524b7c269d138d4af5.tar.gz
kernel-qcow2-linux-f4881bc7a83eff263789dd524b7c269d138d4af5.tar.xz
kernel-qcow2-linux-f4881bc7a83eff263789dd524b7c269d138d4af5.zip
Btrfs: fix space leak when we fail to reserve metadata space
Dave reported a warning when running xfstest 275. We have been leaking delalloc metadata space when our reservations fail. This is because we were improperly calculating how much space to free for our checksum reservations. The problem is we would sometimes free up space that had already been freed in another thread and we would end up with negative usage for the delalloc space. This patch fixes the problem by calculating how much space the other threads would have already freed, and then calculate how much space we need to free had we not done the reservation at all, and then freeing any excess space. This makes xfstests 275 no longer have leaked space. Thanks Cc: stable@vger.kernel.org Reported-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
-rw-r--r--fs/btrfs/extent-tree.c47
1 files changed, 41 insertions, 6 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a8ff25aedca1..a22b5cc921ad 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4815,14 +4815,49 @@ out_fail:
* If the inodes csum_bytes is the same as the original
* csum_bytes then we know we haven't raced with any free()ers
* so we can just reduce our inodes csum bytes and carry on.
- * Otherwise we have to do the normal free thing to account for
- * the case that the free side didn't free up its reserve
- * because of this outstanding reservation.
*/
- if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
calc_csum_metadata_size(inode, num_bytes, 0);
- else
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ } else {
+ u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
+ u64 bytes;
+
+ /*
+ * This is tricky, but first we need to figure out how much we
+ * free'd from any free-ers that occured during this
+ * reservation, so we reset ->csum_bytes to the csum_bytes
+ * before we dropped our lock, and then call the free for the
+ * number of bytes that were freed while we were trying our
+ * reservation.
+ */
+ bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
+ BTRFS_I(inode)->csum_bytes = csum_bytes;
+ to_free = calc_csum_metadata_size(inode, bytes, 0);
+
+
+ /*
+ * Now we need to see how much we would have freed had we not
+ * been making this reservation and our ->csum_bytes were not
+ * artificially inflated.
+ */
+ BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
+ bytes = csum_bytes - orig_csum_bytes;
+ bytes = calc_csum_metadata_size(inode, bytes, 0);
+
+ /*
+ * Now reset ->csum_bytes to what it should be. If bytes is
+ * more than to_free then we would have free'd more space had we
+ * not had an artificially high ->csum_bytes, so we need to free
+ * the remainder. If bytes is the same or less then we don't
+ * need to do anything, the other free-ers did the correct
+ * thing.
+ */
+ BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
+ if (bytes > to_free)
+ to_free = bytes - to_free;
+ else
+ to_free = 0;
+ }
spin_unlock(&BTRFS_I(inode)->lock);
if (dropped)
to_free += btrfs_calc_trans_metadata_size(root, dropped);