summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c68
1 files changed, 40 insertions, 28 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2722cb495ef4..32fc5401a756 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1016,10 +1016,12 @@ xfs_buf_trylock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked)
+ if (locked) {
XB_SET_OWNER(bp);
-
- trace_xfs_buf_trylock(bp, _RET_IP_);
+ trace_xfs_buf_trylock(bp, _RET_IP_);
+ } else {
+ trace_xfs_buf_trylock_fail(bp, _RET_IP_);
+ }
return locked;
}
@@ -1858,18 +1860,33 @@ xfs_buf_cmp(
return 0;
}
+/*
+ * submit buffers for write.
+ *
+ * When we have a large buffer list, we do not want to hold all the buffers
+ * locked while we block on the request queue waiting for IO dispatch. To avoid
+ * this problem, we lock and submit buffers in groups of 50, thereby minimising
+ * the lock hold times for lists which may contain thousands of objects.
+ *
+ * To do this, we sort the buffer list before we walk the list to lock and
+ * submit buffers, and we plug and unplug around each group of buffers we
+ * submit.
+ */
static int
-__xfs_buf_delwri_submit(
+xfs_buf_delwri_submit_buffers(
struct list_head *buffer_list,
- struct list_head *io_list,
- bool wait)
+ struct list_head *wait_list)
{
- struct blk_plug plug;
struct xfs_buf *bp, *n;
+ LIST_HEAD (submit_list);
int pinned = 0;
+ struct blk_plug plug;
+
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+ blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) {
- if (!wait) {
+ if (!wait_list) {
if (xfs_buf_ispinned(bp)) {
pinned++;
continue;
@@ -1892,25 +1909,21 @@ __xfs_buf_delwri_submit(
continue;
}
- list_move_tail(&bp->b_list, io_list);
trace_xfs_buf_delwri_split(bp, _RET_IP_);
- }
-
- list_sort(NULL, io_list, xfs_buf_cmp);
-
- blk_start_plug(&plug);
- list_for_each_entry_safe(bp, n, io_list, b_list) {
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE | XBF_ASYNC;
/*
- * we do all Io submission async. This means if we need to wait
- * for IO completion we need to take an extra reference so the
- * buffer is still valid on the other side.
+ * We do all IO submission async. This means if we need
+ * to wait for IO completion we need to take an extra
+ * reference so the buffer is still valid on the other
+ * side. We need to move the buffer onto the io_list
+ * at this point so the caller can still access it.
*/
- if (wait)
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC;
+ if (wait_list) {
xfs_buf_hold(bp);
- else
+ list_move_tail(&bp->b_list, wait_list);
+ } else
list_del_init(&bp->b_list);
xfs_buf_submit(bp);
@@ -1933,8 +1946,7 @@ int
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
- LIST_HEAD (io_list);
- return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
+ return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
}
/*
@@ -1949,15 +1961,15 @@ int
xfs_buf_delwri_submit(
struct list_head *buffer_list)
{
- LIST_HEAD (io_list);
+ LIST_HEAD (wait_list);
int error = 0, error2;
struct xfs_buf *bp;
- __xfs_buf_delwri_submit(buffer_list, &io_list, true);
+ xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
/* Wait for IO to complete. */
- while (!list_empty(&io_list)) {
- bp = list_first_entry(&io_list, struct xfs_buf, b_list);
+ while (!list_empty(&wait_list)) {
+ bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list);