summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDarrick J. Wong2019-07-02 18:39:40 +0200
committerDarrick J. Wong2019-07-02 18:40:05 +0200
commit938c710d99a62eed93b6a2770f92f943762beca0 (patch)
treee58cd5750ad32c6aca19b054081026be4cecf8c4 /fs/xfs
parentxfs: convert bulkstat to new iwalk infrastructure (diff)
downloadkernel-qcow2-linux-938c710d99a62eed93b6a2770f92f943762beca0.tar.gz
kernel-qcow2-linux-938c710d99a62eed93b6a2770f92f943762beca0.tar.xz
kernel-qcow2-linux-938c710d99a62eed93b6a2770f92f943762beca0.zip
xfs: calculate inode walk prefetch more carefully
The existing inode walk prefetch is based on the old bulkstat code, which simply allocated 4 pages worth of memory and prefetched that many inobt records, regardless of however many inodes the caller requested. 65536 inodes is a lot to prefetch (~32M on x64, ~512M on arm64) so let's scale things down a little more intelligently based on the number of inodes requested, etc. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_iwalk.c48
1 files changed, 45 insertions, 3 deletions
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 304c41e6ed1d..4aa22f02b9ec 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -334,15 +334,57 @@ out:
}
/*
+ * We experimentally determined that the reduction in ioctl call overhead
+ * diminishes when userspace asks for more than 2048 inodes, so we'll cap
+ * prefetch at this point.
+ */
+#define IWALK_MAX_INODE_PREFETCH (2048U)
+
+/*
* Given the number of inodes to prefetch, set the number of inobt records that
* we cache in memory, which controls the number of inodes we try to read
- * ahead.
+ * ahead. Set the maximum if @inodes == 0.
*/
static inline unsigned int
xfs_iwalk_prefetch(
- unsigned int inode_records)
+ unsigned int inodes)
{
- return PAGE_SIZE * 4 / sizeof(struct xfs_inobt_rec_incore);
+ unsigned int inobt_records;
+
+ /*
+ * If the caller didn't tell us the number of inodes they wanted,
+ * assume the maximum prefetch possible for best performance.
+ * Otherwise, cap prefetch at that maximum so that we don't start an
+ * absurd amount of prefetch.
+ */
+ if (inodes == 0)
+ inodes = IWALK_MAX_INODE_PREFETCH;
+ inodes = min(inodes, IWALK_MAX_INODE_PREFETCH);
+
+ /* Round the inode count up to a full chunk. */
+ inodes = round_up(inodes, XFS_INODES_PER_CHUNK);
+
+ /*
+ * In order to convert the number of inodes to prefetch into an
+ * estimate of the number of inobt records to cache, we require a
+ * conversion factor that reflects our expectations of the average
+ * loading factor of an inode chunk. Based on data gathered, most
+ * (but not all) filesystems manage to keep the inode chunks totally
+ * full, so we'll underestimate slightly so that our readahead will
+ * still deliver the performance we want on aging filesystems:
+ *
+ * inobt = inodes / (INODES_PER_CHUNK * (4 / 5));
+ *
+ * The funny math is to avoid integer division.
+ */
+ inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK);
+
+ /*
+ * Allocate enough space to prefetch at least two inobt records so that
+ * we can cache both the record where the iwalk started and the next
+ * record. This simplifies the AG inode walk loop setup code.
+ */
+ return max(inobt_records, 2U);
}
/*