diff options
author | Simon Rettberg | 2022-02-09 15:11:40 +0100 |
---|---|---|
committer | Simon Rettberg | 2022-03-04 12:04:14 +0100 |
commit | 08745f8292deffbdcbdb7071cebade5945fb924a (patch) | |
tree | f2864eea407de1374ef90f4269b9aaa0150ead60 /src | |
parent | Add more sanity checks to kernel_read calls and compressed buffer access (diff) | |
download | xloop-08745f8292deffbdcbdb7071cebade5945fb924a.tar.gz xloop-08745f8292deffbdcbdb7071cebade5945fb924a.tar.xz xloop-08745f8292deffbdcbdb7071cebade5945fb924a.zip |
qcow2: Add lock for lookup cache and decompression
Race conditions can occur if reads are submitted from different
cgroups, as each cgroup has its own workqueue. Linearize the
according parts of the code using a mutex, as this will in the
worst case lead to performance on par with pre-5.15 versions
of xloop, which used one workqueue for all cgroups.
Diffstat (limited to 'src')
-rw-r--r-- | src/kernel/xloop_file_fmt_qcow_main.c | 13 | ||||
-rw-r--r-- | src/kernel/xloop_file_fmt_qcow_main.h | 2 |
2 files changed, 12 insertions, 3 deletions
diff --git a/src/kernel/xloop_file_fmt_qcow_main.c b/src/kernel/xloop_file_fmt_qcow_main.c index ae3ad81..fb70f9a 100644 --- a/src/kernel/xloop_file_fmt_qcow_main.c +++ b/src/kernel/xloop_file_fmt_qcow_main.c @@ -200,6 +200,8 @@ static int __qcow_file_fmt_compression_init(struct xloop_file_fmt *xlo_fmt) #endif } + mutex_init(&qcow_data->global_mutex); + return ret; #ifdef CONFIG_ZSTD_DECOMPRESS @@ -218,6 +220,8 @@ static void __qcow_file_fmt_compression_exit(struct xloop_file_fmt *xlo_fmt) { struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data; + mutex_destroy(&qcow_data->global_mutex); + /* ZLIB specific cleanup */ zlib_inflateEnd(qcow_data->zlib_dstrm); vfree(qcow_data->zlib_dstrm->workspace); @@ -302,7 +306,9 @@ static ssize_t __qcow_file_fmt_dbgfs_ofs_read(struct file *file, char __user *bu mutex_unlock(&qcow_data->dbgfs_qcow_offset_mutex); /* calculate and print the cluster offset */ + mutex_lock(&qcow_data->global_mutex); ret = xloop_file_fmt_qcow_get_host_offset(xlo_fmt, offset, &cur_bytes, &host_offset, &type); + mutex_unlock(&qcow_data->global_mutex); if (ret) return -EINVAL; @@ -1060,7 +1066,6 @@ static int __qcow_file_fmt_read_bvec(struct xloop_file_fmt *xlo_fmt, struct bio_ { struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data; struct xloop_device *xlo = xloop_file_fmt_get_xlo(xlo_fmt); - int offset_in_cluster; int ret; unsigned int cur_bytes; /* number of bytes in current iteration */ u64 bytes; @@ -1082,12 +1087,12 @@ static int __qcow_file_fmt_read_bvec(struct xloop_file_fmt *xlo_fmt, struct bio_ /* prepare next request. if this spans a cluster boundary, this will be clamped */ cur_bytes = bytes; + mutex_lock(&qcow_data->global_mutex); ret = xloop_file_fmt_qcow_get_host_offset(xlo_fmt, *ppos, &cur_bytes, &host_offset, &type); + mutex_unlock(&qcow_data->global_mutex); if (ret) goto fail; - offset_in_cluster = xloop_file_fmt_qcow_offset_into_cluster(qcow_data, *ppos); - switch (type) { case QCOW_SUBCLUSTER_ZERO_PLAIN: case QCOW_SUBCLUSTER_ZERO_ALLOC: @@ -1108,7 +1113,9 @@ static int __qcow_file_fmt_read_bvec(struct xloop_file_fmt *xlo_fmt, struct bio_ break; case QCOW_SUBCLUSTER_COMPRESSED: + mutex_lock(&qcow_data->global_mutex); ret = __qcow_file_fmt_read_compressed(xlo_fmt, bvec, host_offset, *ppos, cur_bytes, bytes_done); + mutex_unlock(&qcow_data->global_mutex); if (ret < 0) goto fail; if (len == 0) { diff --git a/src/kernel/xloop_file_fmt_qcow_main.h b/src/kernel/xloop_file_fmt_qcow_main.h index aff5e13..32e14f9 100644 --- a/src/kernel/xloop_file_fmt_qcow_main.h +++ b/src/kernel/xloop_file_fmt_qcow_main.h @@ -291,6 +291,8 @@ struct xloop_file_fmt_qcow_data { u64 compatible_features; u64 autoclear_features; + struct mutex global_mutex; + /* ZLIB specific data */ z_streamp zlib_dstrm; |