summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorBrian Foster2016-05-18 02:56:41 +0200
committerDave Chinner2016-05-18 02:56:41 +0200
commit9bdd9bd69b826875531bb1b2efb6aeb8d70e6f72 (patch)
tree8cf9acecfa7280e4c3e43e116dc1f592665fa14b /fs/xfs/xfs_buf.c
parentLinux 4.6-rc1 (diff)
downloadkernel-qcow2-linux-9bdd9bd69b826875531bb1b2efb6aeb8d70e6f72.tar.gz
kernel-qcow2-linux-9bdd9bd69b826875531bb1b2efb6aeb8d70e6f72.tar.xz
kernel-qcow2-linux-9bdd9bd69b826875531bb1b2efb6aeb8d70e6f72.zip
xfs: buffer ->bi_end_io function requires irq-safe lock
Reports have surfaced of a lockdep splat complaining about an irq-safe -> irq-unsafe locking order in the xfs_buf_bio_end_io() bio completion handler. This only occurs when I/O errors are present because bp->b_lock is only acquired in this context to protect setting an error on the buffer. The problem is that this lock can be acquired with the (request_queue) q->queue_lock held. See scsi_end_request() or ata_qc_schedule_eh(), for example. Replace the locked test/set of b_io_error with a cmpxchg() call. This eliminates the need for the lock and thus the lock ordering problem goes away. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 9a2191b91137..e71cfbd5acb3 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1100,22 +1100,18 @@ xfs_bwrite(
return error;
}
-STATIC void
+static void
xfs_buf_bio_end_io(
struct bio *bio)
{
- xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+ struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
/*
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
- if (bio->bi_error) {
- spin_lock(&bp->b_lock);
- if (!bp->b_io_error)
- bp->b_io_error = bio->bi_error;
- spin_unlock(&bp->b_lock);
- }
+ if (bio->bi_error)
+ cmpxchg(&bp->b_io_error, 0, bio->bi_error);
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));