summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0921d2b07f1d..45d0dafbbf40 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7658,6 +7658,25 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (can_nocow_extent(inode, start, &len, &orig_start,
&orig_block_len, &ram_bytes) == 1) {
+
+ /*
+ * Create the ordered extent before the extent map. This
+ * is to avoid races with the fast fsync path because it
+ * collects ordered extents into a local list and then
+ * collects all the new extent maps, so we must create
+ * the ordered extent first and make sure the fast fsync
+ * path collects any new ordered extents after
+ * collecting new extent maps as well. The fsync path
+ * simply can not rely on inode_dio_wait() because it
+ * causes deadlock with AIO.
+ */
+ ret = btrfs_add_ordered_extent_dio(inode, start,
+ block_start, len, len, type);
+ if (ret) {
+ free_extent_map(em);
+ goto unlock_err;
+ }
+
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
@@ -7666,17 +7685,29 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
orig_block_len,
ram_bytes, type);
if (IS_ERR(em)) {
+ struct btrfs_ordered_extent *oe;
+
ret = PTR_ERR(em);
+ oe = btrfs_lookup_ordered_extent(inode,
+ start);
+ ASSERT(oe);
+ if (WARN_ON(!oe))
+ goto unlock_err;
+ set_bit(BTRFS_ORDERED_IOERR,
+ &oe->flags);
+ set_bit(BTRFS_ORDERED_IO_DONE,
+ &oe->flags);
+ btrfs_remove_ordered_extent(inode, oe);
+ /*
+ * Once for our lookup and once for the
+ * ordered extents tree.
+ */
+ btrfs_put_ordered_extent(oe);
+ btrfs_put_ordered_extent(oe);
goto unlock_err;
}
}
- ret = btrfs_add_ordered_extent_dio(inode, start,
- block_start, len, len, type);
- if (ret) {
- free_extent_map(em);
- goto unlock_err;
- }
goto unlock;
}
}