summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorFilipe Manana2014-10-10 10:43:11 +0200
committerChris Mason2014-11-21 02:14:28 +0100
commit728404dacfddb5364d7256d821a2ea482159cbe7 (patch)
treeadb45931cbe6104ad888f4cc2d43aa0951f8b8d0 /fs/btrfs/file.c
parentBtrfs: correctly flush compressed data before/after direct IO (diff)
downloadkernel-qcow2-linux-728404dacfddb5364d7256d821a2ea482159cbe7.tar.gz
kernel-qcow2-linux-728404dacfddb5364d7256d821a2ea482159cbe7.tar.xz
kernel-qcow2-linux-728404dacfddb5364d7256d821a2ea482159cbe7.zip
Btrfs: add helper btrfs_fdatawrite_range
To avoid duplicating this double filemap_fdatawrite_range() call for inodes with async extents (compressed writes) so often. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f5a868ab60f3..0fbf0e7bc606 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
@@ -1697,13 +1698,10 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
* able to read what was just written.
*/
endbyte = pos + written_buffered - 1;
- err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
- if (!err && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(file_inode(file))->runtime_flags))
- err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
+ err = btrfs_fdatawrite_range(inode, pos, endbyte);
if (err)
goto out;
- err = filemap_fdatawait_range(file->f_mapping, pos, endbyte);
+ err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
if (err)
goto out;
written += written_buffered;
@@ -1864,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
int ret;
atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ ret = btrfs_fdatawrite_range(inode, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
return ret;
@@ -2820,3 +2815,29 @@ int btrfs_auto_defrag_init(void)
return 0;
}
+
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+{
+ int ret;
+
+ /*
+ * So with compression we will find and lock a dirty page and clear the
+ * first one as dirty, setup an async extent, and immediately return
+ * with the entire range locked but with nobody actually marked with
+ * writeback. So we can't just filemap_write_and_wait_range() and
+ * expect it to work since it will just kick off a thread to do the
+ * actual work. So we need to call filemap_fdatawrite_range _again_
+ * since it will wait on the page lock, which won't be unlocked until
+ * after the pages have been marked as writeback and so we're good to go
+ * from there. We have to do this otherwise we'll miss the ordered
+ * extents and that results in badness. Please Josef, do not think you
+ * know better and pull this out at some point in the future, it is
+ * right and you are wrong.
+ */
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+
+ return ret;
+}