1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* xloop_file_fmt_qcow.h
*
* QCOW file format driver for the xloop device module.
*
* Ported QCOW2 implementation of the QEMU project (GPL-2.0):
* Declarations for the QCOW2 file format.
*
* The copyright (C) 2004-2006 of the original code is owned by Fabrice Bellard.
*
* Copyright (C) 2019 Manuel Bentele <development@manuel-bentele.de>
*/
#ifndef _LINUX_XLOOP_FILE_FMT_QCOW_H
#define _LINUX_XLOOP_FILE_FMT_QCOW_H
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/zlib.h>
#ifdef CONFIG_ZSTD_DECOMPRESS
#include <linux/zstd.h>
#endif
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#include "xloop_file_fmt.h"
#ifdef CONFIG_DEBUG_DRIVER
#define ASSERT(x) \
do { \
if (!(x)) { \
printk(KERN_EMERG "assertion failed %s: %d: %s\n", __FILE__, __LINE__, #x); \
BUG(); \
} \
} while (0)
#else
#define ASSERT(x) \
do { \
} while (0)
#endif
#define KiB (1024)
#define MiB (1024 * 1024)
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
#define QCOW_CRYPT_NONE 0
#define QCOW_CRYPT_AES 1
#define QCOW_CRYPT_LUKS 2
#define QCOW_MAX_CRYPT_CLUSTERS 32
#define QCOW_MAX_SNAPSHOTS 65536
/*
* Field widths in QCOW mean normal cluster offsets cannot reach
* 64PB; depending on cluster size, compressed clusters can have a
* smaller limit (64PB for up to 16k clusters, then ramps down to
* 512TB for 2M clusters).
*/
#define QCOW_MAX_CLUSTER_OFFSET ((1ULL << 56) - 1)
/*
* 8 MB refcount table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters)
*/
#define QCOW_MAX_REFTABLE_SIZE (8 * MiB)
/*
* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters)
*/
#define QCOW_MAX_L1_SIZE (32 * MiB)
/*
* Allow for an average of 1k per snapshot table entry, should be plenty of
* space for snapshot names and IDs
*/
#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
/* Bitmap header extension constraints */
#define QCOW_MAX_BITMAPS 65535
#define QCOW_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW_MAX_BITMAPS)
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
/* indicate that the cluster is compressed (they never have the copied flag) */
#define QCOW_OFLAG_COMPRESSED (1ULL << 62)
/* The cluster reads as all zeros */
#define QCOW_OFLAG_ZERO (1ULL << 0)
#define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32
/* The subcluster X [0..31] is allocated */
#define QCOW_OFLAG_SUB_ALLOC(X) (1ULL << (X))
/* The subcluster X [0..31] reads as zeroes */
#define QCOW_OFLAG_SUB_ZERO(X) (QCOW_OFLAG_SUB_ALLOC(X) << 32)
/* Subclusters [X, Y) (0 <= X <= Y <= 32) are allocated */
#define QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) (QCOW_OFLAG_SUB_ALLOC(Y) - QCOW_OFLAG_SUB_ALLOC(X))
/* Subclusters [X, Y) (0 <= X <= Y <= 32) read as zeroes */
#define QCOW_OFLAG_SUB_ZERO_RANGE(X, Y) (QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) << 32)
/* L2 entry bitmap with all allocation bits set */
#define QCOW_L2_BITMAP_ALL_ALLOC (QCOW_OFLAG_SUB_ALLOC_RANGE(0, 32))
/* L2 entry bitmap with all "read as zeroes" bits set */
#define QCOW_L2_BITMAP_ALL_ZEROES (QCOW_OFLAG_SUB_ZERO_RANGE(0, 32))
/* Size of normal and extended L2 entries */
#define QCOW_L2E_SIZE_NORMAL (sizeof(u64))
#define QCOW_L2E_SIZE_EXTENDED (sizeof(u64) * 2)
/* Size of L1 table entries */
#define QCOW_L1E_SIZE (sizeof(u64))
/* Size of reftable entries */
#define QCOW_REFTABLE_ENTRY_SIZE (sizeof(u64))
#define QCOW_MIN_CLUSTER_BITS 9
#define QCOW_MAX_CLUSTER_BITS 21
/* Defined in the qcow2 spec (compressed cluster descriptor) */
#define QCOW_COMPRESSED_SECTOR_SIZE 512U
#define QCOW_COMPRESSED_SECTOR_MASK (~(QCOW_COMPRESSED_SECTOR_SIZE - 1))
/* Must be at least 2 to cover COW */
#define QCOW_MIN_L2_CACHE_SIZE 2 /* cache entries */
/* Must be at least 4 to cover all cases of refcount table growth */
#define QCOW_MIN_REFCOUNT_CACHE_SIZE 4 /* clusters */
#define QCOW_DEFAULT_L2_CACHE_MAX_SIZE (32 * MiB)
#define QCOW_DEFAULT_CACHE_CLEAN_INTERVAL 600 /* seconds */
#define QCOW_DEFAULT_CLUSTER_SIZE 65536
/* Buffer size for debugfs file buffer to display QCOW header information */
#define QCOW_HEADER_BUF_LEN 1024
/*
* Buffer size for debugfs file buffer to receive and display offset and
* cluster offset information
*/
#define QCOW_OFFSET_BUF_LEN 32
#define QCOW_CLUSTER_BUF_LEN 256
struct xloop_file_fmt_qcow_header {
u32 magic;
u32 version;
u64 backing_file_offset;
u32 backing_file_size;
u32 cluster_bits;
u64 size; /* in bytes */
u32 crypt_method;
u32 l1_size;
u64 l1_table_offset;
u64 refcount_table_offset;
u32 refcount_table_clusters;
u32 nb_snapshots;
u64 snapshots_offset;
/* The following fields are only valid for version >= 3 */
u64 incompatible_features;
u64 compatible_features;
u64 autoclear_features;
u32 refcount_order;
u32 header_length;
/* Additional fields */
u8 compression_type;
/* header must be a multiple of 8 */
u8 padding[7];
} __packed;
struct xloop_file_fmt_qcow_snapshot_header {
/* header is 8 byte aligned */
u64 l1_table_offset;
u32 l1_size;
u16 id_str_size;
u16 name_size;
u32 date_sec;
u32 date_nsec;
u64 vm_clock_nsec;
u32 vm_state_size;
/* Size of all extra data, including QCowSnapshotExtraData if available */
u32 extra_data_size;
/* Data beyond QCowSnapshotExtraData, if any */
void *unknown_extra_data;
} __packed;
enum {
QCOW_FEAT_TYPE_INCOMPATIBLE = 0,
QCOW_FEAT_TYPE_COMPATIBLE = 1,
QCOW_FEAT_TYPE_AUTOCLEAR = 2,
};
/* incompatible feature bits */
enum {
QCOW_INCOMPAT_DIRTY_BITNR = 0,
QCOW_INCOMPAT_CORRUPT_BITNR = 1,
QCOW_INCOMPAT_DATA_FILE_BITNR = 2,
QCOW_INCOMPAT_COMPRESSION_BITNR = 3,
QCOW_INCOMPAT_EXTL2_BITNR = 4,
QCOW_INCOMPAT_DIRTY = 1 << QCOW_INCOMPAT_DIRTY_BITNR,
QCOW_INCOMPAT_CORRUPT = 1 << QCOW_INCOMPAT_CORRUPT_BITNR,
QCOW_INCOMPAT_DATA_FILE = 1 << QCOW_INCOMPAT_DATA_FILE_BITNR,
QCOW_INCOMPAT_COMPRESSION = 1 << QCOW_INCOMPAT_COMPRESSION_BITNR,
QCOW_INCOMPAT_EXTL2 = 1 << QCOW_INCOMPAT_EXTL2_BITNR,
QCOW_INCOMPAT_MASK = QCOW_INCOMPAT_DIRTY | QCOW_INCOMPAT_CORRUPT | QCOW_INCOMPAT_DATA_FILE |
QCOW_INCOMPAT_COMPRESSION | QCOW_INCOMPAT_EXTL2,
};
/* compatible feature bits */
enum {
QCOW_COMPAT_LAZY_REFCOUNTS_BITNR = 0,
QCOW_COMPAT_LAZY_REFCOUNTS = 1 << QCOW_COMPAT_LAZY_REFCOUNTS_BITNR,
QCOW_COMPAT_FEAT_MASK = QCOW_COMPAT_LAZY_REFCOUNTS,
};
/* autoclear feature bits */
enum {
QCOW_AUTOCLEAR_BITMAPS_BITNR = 0,
QCOW_AUTOCLEAR_DATA_FILE_RAW_BITNR = 1,
QCOW_AUTOCLEAR_BITMAPS = 1 << QCOW_AUTOCLEAR_BITMAPS_BITNR,
QCOW_AUTOCLEAR_DATA_FILE_RAW = 1 << QCOW_AUTOCLEAR_DATA_FILE_RAW_BITNR,
QCOW_AUTOCLEAR_MASK = QCOW_AUTOCLEAR_BITMAPS | QCOW_AUTOCLEAR_DATA_FILE_RAW,
};
enum xloop_file_fmt_qcow_compression_type {
QCOW_COMPRESSION_TYPE_ZLIB,
QCOW_COMPRESSION_TYPE_ZSTD,
};
struct xloop_file_fmt_qcow_data {
u64 size;
int cluster_bits;
int cluster_size;
int l2_slice_size;
int subcluster_bits;
int subcluster_size;
int subclusters_per_cluster;
int l2_bits;
int l2_size;
int l1_size;
int l1_vm_state_index;
int refcount_block_bits;
int refcount_block_size;
int csize_shift;
int csize_mask;
u64 cluster_offset_mask;
u64 l1_table_offset;
u64 *l1_table;
struct xloop_file_fmt_qcow_cache *l2_table_cache;
struct xloop_file_fmt_qcow_cache *refcount_block_cache;
u64 *refcount_table;
u64 refcount_table_offset;
u32 refcount_table_size;
u32 max_refcount_table_index; /* Last used entry in refcount_table */
u64 free_cluster_index;
u64 free_byte_offset;
u32 crypt_method_header;
u64 snapshots_offset;
int snapshots_size;
unsigned int nb_snapshots;
u32 nb_bitmaps;
u64 bitmap_directory_size;
u64 bitmap_directory_offset;
int qcow_version;
bool use_lazy_refcounts;
int refcount_order;
int refcount_bits;
u64 refcount_max;
u64 incompatible_features;
u64 compatible_features;
u64 autoclear_features;
struct mutex global_mutex;
/* ZLIB specific data */
z_streamp zlib_dstrm;
/* ZSTD specific data */
#ifdef CONFIG_ZSTD_DECOMPRESS
void *zstd_dworkspace;
ZSTD_DStream *zstd_dstrm;
#endif
/* used to cache last compressed QCOW cluster */
u8 *cmp_out_buf;
u64 cmp_last_coffset;
int cmp_last_size;
/*
* Compression type used for the image. Default: 0 - ZLIB
* The image compression type is set on image creation.
* For now, the only way to change the compression type
* is to convert the image with the desired compression type set.
*/
enum xloop_file_fmt_qcow_compression_type compression_type;
/* debugfs entries */
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
struct dentry *dbgfs_file_qcow_header;
char dbgfs_file_qcow_header_buf[QCOW_HEADER_BUF_LEN];
struct dentry *dbgfs_file_qcow_offset;
char dbgfs_file_qcow_offset_buf[QCOW_OFFSET_BUF_LEN];
char dbgfs_file_qcow_cluster_buf[QCOW_CLUSTER_BUF_LEN];
u64 dbgfs_qcow_offset;
struct mutex dbgfs_qcow_offset_mutex;
#endif
};
struct xloop_file_fmt_qcow_cow_region {
/**
* Offset of the COW region in bytes from the start of the first
* cluster touched by the request.
*/
unsigned int offset;
/** Number of bytes to copy */
unsigned int nb_bytes;
};
/*
* In images with standard L2 entries all clusters are treated as if
* they had one subcluster so xloop_file_fmt_qcow_cluster_type and
* xloop_file_fmt_qcow_subcluster_type can be mapped to each other and
* have the exact same meaning (QCOW_SUBCLUSTER_UNALLOCATED_ALLOC cannot
* happen in these images).
*
* In images with extended L2 entries xloop_file_fmt_qcow_cluster_type
* refers to the complete cluster and xloop_file_fmt_qcow_subcluster_type
* to each of the individual subclusters, so there are several possible
* combinations:
*
* |--------------+---------------------------|
* | Cluster type | Possible subcluster types |
* |--------------+---------------------------|
* | UNALLOCATED | UNALLOCATED_PLAIN |
* | | ZERO_PLAIN |
* |--------------+---------------------------|
* | NORMAL | UNALLOCATED_ALLOC |
* | | ZERO_ALLOC |
* | | NORMAL |
* |--------------+---------------------------|
* | COMPRESSED | COMPRESSED |
* |--------------+---------------------------|
*
* QCOW_SUBCLUSTER_INVALID means that the L2 entry is incorrect and
* the image should be marked corrupt.
*/
enum xloop_file_fmt_qcow_cluster_type {
QCOW_CLUSTER_UNALLOCATED,
QCOW_CLUSTER_ZERO_PLAIN,
QCOW_CLUSTER_ZERO_ALLOC,
QCOW_CLUSTER_NORMAL,
QCOW_CLUSTER_COMPRESSED,
};
enum xloop_file_fmt_qcow_subcluster_type {
QCOW_SUBCLUSTER_UNALLOCATED_PLAIN,
QCOW_SUBCLUSTER_UNALLOCATED_ALLOC,
QCOW_SUBCLUSTER_ZERO_PLAIN,
QCOW_SUBCLUSTER_ZERO_ALLOC,
QCOW_SUBCLUSTER_NORMAL,
QCOW_SUBCLUSTER_COMPRESSED,
QCOW_SUBCLUSTER_INVALID,
};
enum xloop_file_fmt_qcow_metadata_overlap {
QCOW_OL_MAIN_HEADER_BITNR = 0,
QCOW_OL_ACTIVE_L1_BITNR = 1,
QCOW_OL_ACTIVE_L2_BITNR = 2,
QCOW_OL_REFCOUNT_TABLE_BITNR = 3,
QCOW_OL_REFCOUNT_BLOCK_BITNR = 4,
QCOW_OL_SNAPSHOT_TABLE_BITNR = 5,
QCOW_OL_INACTIVE_L1_BITNR = 6,
QCOW_OL_INACTIVE_L2_BITNR = 7,
QCOW_OL_BITMAP_DIRECTORY_BITNR = 8,
QCOW_OL_MAX_BITNR = 9,
QCOW_OL_NONE = 0,
QCOW_OL_MAIN_HEADER = (1 << QCOW_OL_MAIN_HEADER_BITNR),
QCOW_OL_ACTIVE_L1 = (1 << QCOW_OL_ACTIVE_L1_BITNR),
QCOW_OL_ACTIVE_L2 = (1 << QCOW_OL_ACTIVE_L2_BITNR),
QCOW_OL_REFCOUNT_TABLE = (1 << QCOW_OL_REFCOUNT_TABLE_BITNR),
QCOW_OL_REFCOUNT_BLOCK = (1 << QCOW_OL_REFCOUNT_BLOCK_BITNR),
QCOW_OL_SNAPSHOT_TABLE = (1 << QCOW_OL_SNAPSHOT_TABLE_BITNR),
QCOW_OL_INACTIVE_L1 = (1 << QCOW_OL_INACTIVE_L1_BITNR),
/* NOTE: Checking overlaps with inactive L2 tables will result in bdrv reads. */
QCOW_OL_INACTIVE_L2 = (1 << QCOW_OL_INACTIVE_L2_BITNR),
QCOW_OL_BITMAP_DIRECTORY = (1 << QCOW_OL_BITMAP_DIRECTORY_BITNR),
};
/* Perform all overlap checks which can be done in constant time */
#define QCOW_OL_CONSTANT \
(QCOW_OL_MAIN_HEADER | QCOW_OL_ACTIVE_L1 | QCOW_OL_REFCOUNT_TABLE | QCOW_OL_SNAPSHOT_TABLE | \
QCOW_OL_BITMAP_DIRECTORY)
/* Perform all overlap checks which don't require disk access */
#define QCOW_OL_CACHED (QCOW_OL_CONSTANT | QCOW_OL_ACTIVE_L2 | QCOW_OL_REFCOUNT_BLOCK | QCOW_OL_INACTIVE_L1)
/* Perform all overlap checks */
#define QCOW_OL_ALL (QCOW_OL_CACHED | QCOW_OL_INACTIVE_L2)
#define QCOW_L1E_OFFSET_MASK 0x00fffffffffffe00ULL
#define QCOW_L2E_OFFSET_MASK 0x00fffffffffffe00ULL
#define QCOW_L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
static inline bool xloop_file_fmt_qcow_has_subclusters(struct xloop_file_fmt_qcow_data *qcow_data)
{
return qcow_data->incompatible_features & QCOW_INCOMPAT_EXTL2;
}
static inline size_t xloop_file_fmt_qcow_l2_entry_size(struct xloop_file_fmt_qcow_data *qcow_data)
{
return xloop_file_fmt_qcow_has_subclusters(qcow_data) ? QCOW_L2E_SIZE_EXTENDED : QCOW_L2E_SIZE_NORMAL;
}
static inline u64 xloop_file_fmt_qcow_get_l2_entry(struct xloop_file_fmt_qcow_data *qcow_data, u64 *l2_slice, int idx)
{
idx *= xloop_file_fmt_qcow_l2_entry_size(qcow_data) / sizeof(u64);
return be64_to_cpu(l2_slice[idx]);
}
static inline u64 xloop_file_fmt_qcow_get_l2_bitmap(struct xloop_file_fmt_qcow_data *qcow_data, u64 *l2_slice, int idx)
{
if (xloop_file_fmt_qcow_has_subclusters(qcow_data)) {
idx *= xloop_file_fmt_qcow_l2_entry_size(qcow_data) / sizeof(u64);
return be64_to_cpu(l2_slice[idx + 1]);
} else {
return 0; /* For convenience only; this value has no meaning. */
}
}
static inline bool xloop_file_fmt_qcow_has_data_file(struct xloop_file_fmt_qcow_data *qcow_data)
{
/* At the moment, there is no support for copy on write! */
return false;
}
static inline bool xloop_file_fmt_qcow_data_file_is_raw(struct xloop_file_fmt_qcow_data *qcow_data)
{
return !!(qcow_data->autoclear_features & QCOW_AUTOCLEAR_DATA_FILE_RAW);
}
static inline s64 xloop_file_fmt_qcow_start_of_cluster(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return offset & ~(qcow_data->cluster_size - 1);
}
static inline s64 xloop_file_fmt_qcow_offset_into_cluster(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return offset & (qcow_data->cluster_size - 1);
}
static inline s64 xloop_file_fmt_qcow_offset_into_subcluster(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return offset & (qcow_data->subcluster_size - 1);
}
static inline s64 xloop_file_fmt_qcow_size_to_clusters(struct xloop_file_fmt_qcow_data *qcow_data, u64 size)
{
return (size + (qcow_data->cluster_size - 1)) >> qcow_data->cluster_bits;
}
static inline s64 xloop_file_fmt_qcow_size_to_l1(struct xloop_file_fmt_qcow_data *qcow_data, s64 size)
{
int shift = qcow_data->cluster_bits + qcow_data->l2_bits;
return (size + (1ULL << shift) - 1) >> shift;
}
static inline int xloop_file_fmt_qcow_offset_to_l1_index(struct xloop_file_fmt_qcow_data *qcow_data, u64 offset)
{
return offset >> (qcow_data->l2_bits + qcow_data->cluster_bits);
}
static inline int xloop_file_fmt_qcow_offset_to_l2_index(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return (offset >> qcow_data->cluster_bits) & (qcow_data->l2_size - 1);
}
static inline int xloop_file_fmt_qcow_offset_to_l2_slice_index(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return (offset >> qcow_data->cluster_bits) & (qcow_data->l2_slice_size - 1);
}
static inline int xloop_file_fmt_qcow_offset_to_sc_index(struct xloop_file_fmt_qcow_data *qcow_data, s64 offset)
{
return (offset >> qcow_data->subcluster_bits) & (qcow_data->subclusters_per_cluster - 1);
}
static inline s64 xloop_file_fmt_qcow_vm_state_offset(struct xloop_file_fmt_qcow_data *qcow_data)
{
return (s64)qcow_data->l1_vm_state_index << (qcow_data->cluster_bits + qcow_data->l2_bits);
}
static inline enum xloop_file_fmt_qcow_cluster_type xloop_file_fmt_qcow_get_cluster_type(struct xloop_file_fmt *xlo_fmt,
u64 l2_entry)
{
struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
return QCOW_CLUSTER_COMPRESSED;
} else if (l2_entry & QCOW_OFLAG_ZERO) {
if (l2_entry & QCOW_L2E_OFFSET_MASK)
return QCOW_CLUSTER_ZERO_ALLOC;
return QCOW_CLUSTER_ZERO_PLAIN;
} else if (!(l2_entry & QCOW_L2E_OFFSET_MASK)) {
/*
* Offset 0 generally means unallocated, but it is ambiguous
* with external data files because 0 is a valid offset there.
* However, all clusters in external data files always have
* refcount 1, so we can rely on QCOW_OFLAG_COPIED to
* disambiguate.
*/
if (xloop_file_fmt_qcow_has_data_file(qcow_data) && (l2_entry & QCOW_OFLAG_COPIED))
return QCOW_CLUSTER_NORMAL;
else
return QCOW_CLUSTER_UNALLOCATED;
} else {
return QCOW_CLUSTER_NORMAL;
}
}
/*
* In an image without subsclusters @l2_bitmap is ignored and
* @sc_index must be 0.
* Return QCOW_SUBCLUSTER_INVALID if an invalid l2 entry is detected
* (this checks the whole entry and bitmap, not only the bits related
* to subcluster @sc_index).
*/
static inline enum xloop_file_fmt_qcow_subcluster_type
xloop_file_fmt_qcow_get_subcluster_type(struct xloop_file_fmt *xlo_fmt, u64 l2_entry, u64 l2_bitmap,
unsigned int sc_index)
{
struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
enum xloop_file_fmt_qcow_cluster_type type = xloop_file_fmt_qcow_get_cluster_type(xlo_fmt, l2_entry);
ASSERT(sc_index < qcow_data->subclusters_per_cluster);
if (xloop_file_fmt_qcow_has_subclusters(qcow_data)) {
switch (type) {
case QCOW_CLUSTER_COMPRESSED:
return QCOW_SUBCLUSTER_COMPRESSED;
case QCOW_CLUSTER_NORMAL:
if ((l2_bitmap >> 32) & l2_bitmap)
return QCOW_SUBCLUSTER_INVALID;
else if (l2_bitmap & QCOW_OFLAG_SUB_ZERO(sc_index))
return QCOW_SUBCLUSTER_ZERO_ALLOC;
else if (l2_bitmap & QCOW_OFLAG_SUB_ALLOC(sc_index))
return QCOW_SUBCLUSTER_NORMAL;
else
return QCOW_SUBCLUSTER_UNALLOCATED_ALLOC;
case QCOW_CLUSTER_UNALLOCATED:
if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC)
return QCOW_SUBCLUSTER_INVALID;
else if (l2_bitmap & QCOW_OFLAG_SUB_ZERO(sc_index))
return QCOW_SUBCLUSTER_ZERO_PLAIN;
else
return QCOW_SUBCLUSTER_UNALLOCATED_PLAIN;
default:
/* not reachable */
ASSERT(false);
return QCOW_SUBCLUSTER_INVALID;
}
} else {
switch (type) {
case QCOW_CLUSTER_COMPRESSED:
return QCOW_SUBCLUSTER_COMPRESSED;
case QCOW_CLUSTER_ZERO_PLAIN:
return QCOW_SUBCLUSTER_ZERO_PLAIN;
case QCOW_CLUSTER_ZERO_ALLOC:
return QCOW_SUBCLUSTER_ZERO_ALLOC;
case QCOW_CLUSTER_NORMAL:
return QCOW_SUBCLUSTER_NORMAL;
case QCOW_CLUSTER_UNALLOCATED:
return QCOW_SUBCLUSTER_UNALLOCATED_PLAIN;
default:
/* not reachable */
ASSERT(false);
return QCOW_SUBCLUSTER_INVALID;
}
}
}
#ifdef CONFIG_DEBUG_FS
static inline const char *xloop_file_fmt_qcow_get_subcluster_name(const enum xloop_file_fmt_qcow_subcluster_type type)
{
static const char * const subcluster_names[] = { "QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN",
"QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC",
"QCOW2_SUBCLUSTER_ZERO_PLAIN",
"QCOW2_SUBCLUSTER_ZERO_ALLOC",
"QCOW2_SUBCLUSTER_NORMAL",
"QCOW2_SUBCLUSTER_COMPRESSED",
"QCOW2_SUBCLUSTER_INVALID" };
return subcluster_names[type];
}
#endif
#endif
|