summaryrefslogtreecommitdiffstats
path: root/src/kernel/xloop_file_fmt_qcow_cache.c
blob: 14f0b750d00a4e079cfc8208727db9bb5163377d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * xloop_file_fmt_qcow_cache.c
 *
 * QCOW file format driver for the xloop device module.
 *
 * Ported QCOW2 implementation of the QEMU project (GPL-2.0):
 * L2/refcount table cache for the QCOW2 format.
 *
 * The copyright (C) 2010 of the original code is owned by
 * Kevin Wolf <kwolf@redhat.com>
 *
 * Copyright (C) 2019 Manuel Bentele <development@manuel-bentele.de>
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/math64.h>

#include "xloop_file_fmt_qcow_main.h"
#include "xloop_file_fmt_qcow_cache.h"

static inline void *__xloop_file_fmt_qcow_cache_get_table_addr(
	struct xloop_file_fmt_qcow_cache *c, int table)
{
	return (u8 *) c->table_array + (size_t) table * c->table_size;
}

static inline int __xloop_file_fmt_qcow_cache_get_table_idx(
	struct xloop_file_fmt_qcow_cache *c, void *table)
{
	ptrdiff_t table_offset = (u8 *) table - (u8 *) c->table_array;
	int idx = div_s64(table_offset, c->table_size);

#ifdef ASSERT
	s32 rem_table_offset_mod_table_size;
	div_s64_rem(table_offset, c->table_size, &rem_table_offset_mod_table_size);
	ASSERT(idx >= 0 && idx < c->size && rem_table_offset_mod_table_size == 0);
#endif

	return idx;
}

static inline const char *__xloop_file_fmt_qcow_cache_get_name(
	struct xloop_file_fmt *xlo_fmt, struct xloop_file_fmt_qcow_cache *c)
{
	struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;

	if (c == qcow_data->refcount_block_cache) {
		return "refcount block";
	} else if (c == qcow_data->l2_table_cache) {
		return "L2 table";
	} else {
		/* do not abort, because this is not critical */
		return "unknown";
	}
}

struct xloop_file_fmt_qcow_cache *xloop_file_fmt_qcow_cache_create(
	struct xloop_file_fmt *xlo_fmt, int num_tables, unsigned table_size)
{
#ifdef CONFIG_DEBUG_DRIVER
	struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
#endif
	struct xloop_file_fmt_qcow_cache *c;

	ASSERT(num_tables > 0);
	ASSERT(is_power_of_2(table_size));
	ASSERT(table_size >= (1 << QCOW_MIN_CLUSTER_BITS));
	ASSERT(table_size <= qcow_data->cluster_size);

	c = kzalloc(sizeof(*c), GFP_KERNEL);
	if (!c) {
		return NULL;
	}

	c->size = num_tables;
	c->table_size = table_size;
	c->entries = vzalloc(sizeof(struct xloop_file_fmt_qcow_cache_table) *
		num_tables);
	c->table_array = vzalloc(num_tables * c->table_size);

	if (!c->entries || !c->table_array) {
		vfree(c->table_array);
		vfree(c->entries);
		kfree(c);
		c = NULL;
	}

	return c;
}

void xloop_file_fmt_qcow_cache_destroy(struct xloop_file_fmt *xlo_fmt)
{
	struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
	struct xloop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache;
	int i;

	for (i = 0; i < c->size; i++) {
		ASSERT(c->entries[i].ref == 0);
	}

	vfree(c->table_array);
	vfree(c->entries);
	kfree(c);
}

static int __xloop_file_fmt_qcow_cache_entry_flush(
	struct xloop_file_fmt *xlo_fmt, struct xloop_file_fmt_qcow_cache *c, int i)
{
	if (!c->entries[i].dirty || !c->entries[i].offset) {
		return 0;
	} else {
		dev_err_ratelimited(xloop_file_fmt_to_dev(xlo_fmt), "flush dirty "
			"cache tables is not supported yet\n");
		return -ENOSYS;
	}
}

static int __xloop_file_fmt_qcow_cache_do_get(struct xloop_file_fmt *xlo_fmt,
	struct xloop_file_fmt_qcow_cache *c, u64 offset, void **table,
	bool read_from_disk)
{
	struct xloop_device *xlo = xloop_file_fmt_get_xlo(xlo_fmt);
	int i;
	int ret;
	int lookup_index;
	u64 min_lru_counter = U64_MAX;
	int min_lru_index = -1;
	u64 read_offset;
	u64 offset_div_table_size;
	size_t len;

	ASSERT(offset != 0);

	if (!IS_ALIGNED(offset, c->table_size)) {
		dev_err_ratelimited(xloop_file_fmt_to_dev(xlo_fmt), "cannot get entry "
			"from %s cache: offset %llx is unaligned\n", 
			__xloop_file_fmt_qcow_cache_get_name(xlo_fmt, c), offset);
		return -EIO;
	}

	/* Check if the table is already cached */
	offset_div_table_size = div_u64(offset, c->table_size) * 4;
	div_u64_rem(offset_div_table_size, c->size, &lookup_index);
	i = lookup_index;
	do {
		const struct xloop_file_fmt_qcow_cache_table *t =
			&c->entries[i];
		if (t->offset == offset) {
			goto found;
		}
		if (t->ref == 0 && t->lru_counter < min_lru_counter) {
			min_lru_counter = t->lru_counter;
			min_lru_index = i;
		}
		if (++i == c->size) {
			i = 0;
		}
	} while (i != lookup_index);

	if (min_lru_index == -1) {
		BUG();
		panic("Oops: This can't happen in current synchronous code, "
			"but leave the check here as a reminder for whoever "
			"starts using AIO with the QCOW cache");
	}

	/* Cache miss: write a table back and replace it */
	i = min_lru_index;

	ret = __xloop_file_fmt_qcow_cache_entry_flush(xlo_fmt, c, i);
	if (ret < 0) {
		return ret;
	}

	c->entries[i].offset = 0;
	if (read_from_disk) {
		read_offset = offset;
		len = kernel_read(xlo->xlo_backing_file,
			__xloop_file_fmt_qcow_cache_get_table_addr(c, i),
			c->table_size, &read_offset);
		if (len < 0) {
			len = ret;
			return ret;
		}
	}

	c->entries[i].offset = offset;

	/* And return the right table */
found:
	c->entries[i].ref++;
	*table = __xloop_file_fmt_qcow_cache_get_table_addr(c, i);

	return 0;
}

int xloop_file_fmt_qcow_cache_get(struct xloop_file_fmt *xlo_fmt, u64 offset,
	void **table)
{
	struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
	struct xloop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache;

	return __xloop_file_fmt_qcow_cache_do_get(xlo_fmt, c, offset, table,
		true);
}

void xloop_file_fmt_qcow_cache_put(struct xloop_file_fmt *xlo_fmt, void **table)
{
	struct xloop_file_fmt_qcow_data *qcow_data = xlo_fmt->private_data;
	struct xloop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache;
	int i = __xloop_file_fmt_qcow_cache_get_table_idx(c, *table);

	c->entries[i].ref--;
	*table = NULL;

	if (c->entries[i].ref == 0) {
		c->entries[i].lru_counter = ++c->lru_counter;
	}

	ASSERT(c->entries[i].ref >= 0);
}