summaryrefslogtreecommitdiffstats
path: root/loop_file_fmt_qcow_cluster.c
diff options
context:
space:
mode:
Diffstat (limited to 'loop_file_fmt_qcow_cluster.c')
-rw-r--r--loop_file_fmt_qcow_cluster.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/loop_file_fmt_qcow_cluster.c b/loop_file_fmt_qcow_cluster.c
index 9c91a8b..593a173 100644
--- a/loop_file_fmt_qcow_cluster.c
+++ b/loop_file_fmt_qcow_cluster.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * loop_file_fmt_qcow_cluster.c
+ * xloop_file_fmt_qcow_cluster.c
*
* Ported QCOW2 implementation of the QEMU project (GPL-2.0):
* Cluster calculation and lookup for the QCOW2 format.
@@ -24,18 +24,18 @@
* the cache is used; otherwise the L2 slice is loaded from the image
* file.
*/
-static int __loop_file_fmt_qcow_cluster_l2_load(struct loop_file_fmt *lo_fmt,
+static int __xloop_file_fmt_qcow_cluster_l2_load(struct xloop_file_fmt *xlo_fmt,
u64 offset, u64 l2_offset, u64 **l2_slice)
{
- struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data;
+ struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
int start_of_slice = sizeof(u64) * (
- loop_file_fmt_qcow_offset_to_l2_index(qcow_data, offset) -
- loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset)
+ xloop_file_fmt_qcow_offset_to_l2_index(qcow_data, offset) -
+ xloop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset)
);
ASSERT(qcow_data->l2_table_cache != NULL);
- return loop_file_fmt_qcow_cache_get(lo_fmt, l2_offset + start_of_slice,
+ return xloop_file_fmt_qcow_cache_get(xlo_fmt, l2_offset + start_of_slice,
(void **) l2_slice);
}
@@ -46,17 +46,17 @@ static int __loop_file_fmt_qcow_cluster_l2_load(struct loop_file_fmt *lo_fmt,
* as contiguous. (This allows it, for example, to stop at the first compressed
* cluster which may require a different handling)
*/
-static int __loop_file_fmt_qcow_cluster_count_contiguous(
- struct loop_file_fmt *lo_fmt, int nb_clusters, int cluster_size,
+static int __xloop_file_fmt_qcow_cluster_count_contiguous(
+ struct xloop_file_fmt *xlo_fmt, int nb_clusters, int cluster_size,
u64 *l2_slice, u64 stop_flags)
{
int i;
- enum loop_file_fmt_qcow_cluster_type first_cluster_type;
+ enum xloop_file_fmt_qcow_cluster_type first_cluster_type;
u64 mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
u64 first_entry = be64_to_cpu(l2_slice[0]);
u64 offset = first_entry & mask;
- first_cluster_type = loop_file_fmt_qcow_get_cluster_type(lo_fmt,
+ first_cluster_type = xloop_file_fmt_qcow_get_cluster_type(xlo_fmt,
first_entry);
if (first_cluster_type == QCOW_CLUSTER_UNALLOCATED) {
return 0;
@@ -80,9 +80,9 @@ static int __loop_file_fmt_qcow_cluster_count_contiguous(
* Checks how many consecutive unallocated clusters in a given L2
* slice have the same cluster type.
*/
-static int __loop_file_fmt_qcow_cluster_count_contiguous_unallocated(
- struct loop_file_fmt *lo_fmt, int nb_clusters, u64 *l2_slice,
- enum loop_file_fmt_qcow_cluster_type wanted_type)
+static int __xloop_file_fmt_qcow_cluster_count_contiguous_unallocated(
+ struct xloop_file_fmt *xlo_fmt, int nb_clusters, u64 *l2_slice,
+ enum xloop_file_fmt_qcow_cluster_type wanted_type)
{
int i;
@@ -91,8 +91,8 @@ static int __loop_file_fmt_qcow_cluster_count_contiguous_unallocated(
for (i = 0; i < nb_clusters; i++) {
u64 entry = be64_to_cpu(l2_slice[i]);
- enum loop_file_fmt_qcow_cluster_type type =
- loop_file_fmt_qcow_get_cluster_type(lo_fmt, entry);
+ enum xloop_file_fmt_qcow_cluster_type type =
+ xloop_file_fmt_qcow_get_cluster_type(xlo_fmt, entry);
if (type != wanted_type) {
break;
@@ -116,19 +116,19 @@ static int __loop_file_fmt_qcow_cluster_count_contiguous_unallocated(
* Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
* cases.
*/
-int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
+int xloop_file_fmt_qcow_cluster_get_offset(struct xloop_file_fmt *xlo_fmt,
u64 offset, unsigned int *bytes, u64 *cluster_offset)
{
- struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data;
+ struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
unsigned int l2_index;
u64 l1_index, l2_offset, *l2_slice;
int c;
unsigned int offset_in_cluster;
u64 bytes_available, bytes_needed, nb_clusters;
- enum loop_file_fmt_qcow_cluster_type type;
+ enum xloop_file_fmt_qcow_cluster_type type;
int ret;
- offset_in_cluster = loop_file_fmt_qcow_offset_into_cluster(qcow_data,
+ offset_in_cluster = xloop_file_fmt_qcow_offset_into_cluster(qcow_data,
offset);
bytes_needed = (u64) *bytes + offset_in_cluster;
@@ -137,7 +137,7 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
* the entry pointing to it */
bytes_available = ((u64)(
qcow_data->l2_slice_size -
- loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset))
+ xloop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset))
) << qcow_data->cluster_bits;
if (bytes_needed > bytes_available) {
@@ -147,7 +147,7 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
*cluster_offset = 0;
/* seek to the l2 offset in the l1 table */
- l1_index = loop_file_fmt_qcow_offset_to_l1_index(qcow_data, offset);
+ l1_index = xloop_file_fmt_qcow_offset_to_l1_index(qcow_data, offset);
if (l1_index >= qcow_data->l1_size) {
type = QCOW_CLUSTER_UNALLOCATED;
goto out;
@@ -159,37 +159,37 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
goto out;
}
- if (loop_file_fmt_qcow_offset_into_cluster(qcow_data, l2_offset)) {
- printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: L2 table "
+ if (xloop_file_fmt_qcow_offset_into_cluster(qcow_data, l2_offset)) {
+ printk_ratelimited(KERN_ERR "xloop_file_fmt_qcow: L2 table "
"offset %llx unaligned (L1 index: %llx)", l2_offset,
l1_index);
return -EIO;
}
/* load the l2 slice in memory */
- ret = __loop_file_fmt_qcow_cluster_l2_load(lo_fmt, offset, l2_offset,
+ ret = __xloop_file_fmt_qcow_cluster_l2_load(xlo_fmt, offset, l2_offset,
&l2_slice);
if (ret < 0) {
return ret;
}
/* find the cluster offset for the given disk offset */
- l2_index = loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data,
+ l2_index = xloop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data,
offset);
*cluster_offset = be64_to_cpu(l2_slice[l2_index]);
- nb_clusters = loop_file_fmt_qcow_size_to_clusters(qcow_data,
+ nb_clusters = xloop_file_fmt_qcow_size_to_clusters(qcow_data,
bytes_needed);
/* bytes_needed <= *bytes + offset_in_cluster, both of which are
* unsigned integers; the minimum cluster size is 512, so this
* assertion is always true */
ASSERT(nb_clusters <= INT_MAX);
- type = loop_file_fmt_qcow_get_cluster_type(lo_fmt, *cluster_offset);
+ type = xloop_file_fmt_qcow_get_cluster_type(xlo_fmt, *cluster_offset);
if (qcow_data->qcow_version < 3 && (
type == QCOW_CLUSTER_ZERO_PLAIN ||
type == QCOW_CLUSTER_ZERO_ALLOC)) {
- printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: zero cluster "
+ printk_ratelimited(KERN_ERR "xloop_file_fmt_qcow: zero cluster "
"entry found in pre-v3 image (L2 offset: %llx, "
"L2 index: %x)\n", l2_offset, l2_index);
ret = -EIO;
@@ -197,8 +197,8 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
}
switch (type) {
case QCOW_CLUSTER_COMPRESSED:
- if (loop_file_fmt_qcow_has_data_file(lo_fmt)) {
- printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: "
+ if (xloop_file_fmt_qcow_has_data_file(xlo_fmt)) {
+ printk_ratelimited(KERN_ERR "xloop_file_fmt_qcow: "
"compressed cluster entry found in image with "
"external data file (L2 offset: %llx, "
"L2 index: %x)", l2_offset, l2_index);
@@ -212,29 +212,29 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
case QCOW_CLUSTER_ZERO_PLAIN:
case QCOW_CLUSTER_UNALLOCATED:
/* how many empty clusters ? */
- c = __loop_file_fmt_qcow_cluster_count_contiguous_unallocated(
- lo_fmt, nb_clusters, &l2_slice[l2_index], type);
+ c = __xloop_file_fmt_qcow_cluster_count_contiguous_unallocated(
+ xlo_fmt, nb_clusters, &l2_slice[l2_index], type);
*cluster_offset = 0;
break;
case QCOW_CLUSTER_ZERO_ALLOC:
case QCOW_CLUSTER_NORMAL:
/* how many allocated clusters ? */
- c = __loop_file_fmt_qcow_cluster_count_contiguous(lo_fmt,
+ c = __xloop_file_fmt_qcow_cluster_count_contiguous(xlo_fmt,
nb_clusters, qcow_data->cluster_size,
&l2_slice[l2_index], QCOW_OFLAG_ZERO);
*cluster_offset &= L2E_OFFSET_MASK;
- if (loop_file_fmt_qcow_offset_into_cluster(qcow_data,
+ if (xloop_file_fmt_qcow_offset_into_cluster(qcow_data,
*cluster_offset)) {
- printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: "
+ printk_ratelimited(KERN_ERR "xloop_file_fmt_qcow: "
"cluster allocation offset %llx unaligned "
"(L2 offset: %llx, L2 index: %x)\n",
*cluster_offset, l2_offset, l2_index);
ret = -EIO;
goto fail;
}
- if (loop_file_fmt_qcow_has_data_file(lo_fmt) &&
+ if (xloop_file_fmt_qcow_has_data_file(xlo_fmt) &&
*cluster_offset != offset - offset_in_cluster) {
- printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: "
+ printk_ratelimited(KERN_ERR "xloop_file_fmt_qcow: "
"external data file host cluster offset %llx "
"does not match guest cluster offset: %llx, "
"L2 index: %x)", *cluster_offset,
@@ -247,7 +247,7 @@ int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt,
BUG();
}
- loop_file_fmt_qcow_cache_put(lo_fmt, (void **) &l2_slice);
+ xloop_file_fmt_qcow_cache_put(xlo_fmt, (void **) &l2_slice);
bytes_available = (s64) c * qcow_data->cluster_size;
@@ -265,6 +265,6 @@ out:
return type;
fail:
- loop_file_fmt_qcow_cache_put(lo_fmt, (void **) &l2_slice);
+ xloop_file_fmt_qcow_cache_put(xlo_fmt, (void **) &l2_slice);
return ret;
}