From c3881247493c6c984ae38411c14f9121fbc60df3 Mon Sep 17 00:00:00 2001 From: Jonathan Bauer Date: Fri, 29 May 2020 14:29:05 +0200 Subject: Initial code from Manuel Bentele kept unchanged for reference. --- Kconfig | 93 ++ Makefile | 11 + cryptoloop.c | 216 +++++ loop_file_fmt.c | 328 +++++++ loop_file_fmt.h | 351 +++++++ loop_file_fmt_qcow_cache.c | 218 +++++ loop_file_fmt_qcow_cache.h | 51 + loop_file_fmt_qcow_cluster.c | 270 ++++++ loop_file_fmt_qcow_cluster.h | 23 + loop_file_fmt_qcow_main.c | 945 ++++++++++++++++++ loop_file_fmt_qcow_main.h | 417 ++++++++ loop_file_fmt_raw.c | 450 +++++++++ loop_main.c | 2173 ++++++++++++++++++++++++++++++++++++++++++ loop_main.h | 106 +++ 14 files changed, 5652 insertions(+) create mode 100644 Kconfig create mode 100644 Makefile create mode 100644 cryptoloop.c create mode 100644 loop_file_fmt.c create mode 100644 loop_file_fmt.h create mode 100644 loop_file_fmt_qcow_cache.c create mode 100644 loop_file_fmt_qcow_cache.h create mode 100644 loop_file_fmt_qcow_cluster.c create mode 100644 loop_file_fmt_qcow_cluster.h create mode 100644 loop_file_fmt_qcow_main.c create mode 100644 loop_file_fmt_qcow_main.h create mode 100644 loop_file_fmt_raw.c create mode 100644 loop_main.c create mode 100644 loop_main.h diff --git a/Kconfig b/Kconfig new file mode 100644 index 0000000..d227337 --- /dev/null +++ b/Kconfig @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Loop device driver configuration +# + +config BLK_DEV_LOOP + tristate "Loopback device support" + ---help--- + Saying Y here will allow you to use a regular file as a block + device; you can then create a file system on that block device and + mount it just as you would mount other block devices such as hard + drive partitions, CD-ROM drives or floppy drives. The loop devices + are block special device files with major number 7 and typically + called /dev/loop0, /dev/loop1 etc. + + This is useful if you want to check an ISO 9660 file system before + burning the CD, or if you want to use floppy images without first + writing them to floppy. Furthermore, some Linux distributions avoid + the need for a dedicated Linux partition by keeping their complete + root file system inside a DOS FAT file using this loop device + driver. + + To use the loop device, you need the losetup utility, found in the + util-linux package, see + . + + The loop device driver can also be used to "hide" a file system in + a disk partition, floppy, or regular file, either using encryption + (scrambling the data) or steganography (hiding the data in the low + bits of, say, a sound file). This is also safe if the file resides + on a remote file server. + + There are several ways of encrypting disks. Some of these require + kernel patches. The vanilla kernel offers the cryptoloop option + and a Device Mapper target (which is superior, as it supports all + file systems). If you want to use the cryptoloop, say Y to both + LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12 + or later) version of util-linux. Additionally, be aware that + the cryptoloop is not safe for storing journaled filesystems. + + Note that this loop device has nothing to do with the loopback + device used for network connections from the machine to itself. + + To compile this driver as a module, choose M here: the + module will be called loop. + + Most users will answer N here. + +config BLK_DEV_LOOP_MIN_COUNT + int "Number of loop devices to pre-create at init time" + depends on BLK_DEV_LOOP + default 8 + help + Static number of loop devices to be unconditionally pre-created + at init time. + + This default value can be overwritten on the kernel command + line or with module-parameter loop.max_loop. + + The historic default is 8. If a late 2011 version of losetup(8) + is used, it can be set to 0, since needed loop devices can be + dynamically allocated with the /dev/loop-control interface. + +config BLK_DEV_CRYPTOLOOP + tristate "Cryptoloop Support" + select CRYPTO + select CRYPTO_CBC + depends on BLK_DEV_LOOP + ---help--- + Say Y here if you want to be able to use the ciphers that are + provided by the CryptoAPI as loop transformation. This might be + used as hard disk encryption. + + WARNING: This device is not safe for journaled file systems like + ext3 or Reiserfs. Please use the Device Mapper crypto module + instead, which can be configured to be on-disk compatible with the + cryptoloop device. + +config BLK_DEV_LOOP_FILE_FMT_RAW + tristate "Loop device binary file format support" + depends on BLK_DEV_LOOP + ---help--- + Say Y or M here if you want to enable the binary (RAW) file format + support of the loop device module. + +config BLK_DEV_LOOP_FILE_FMT_QCOW + tristate "Loop device QCOW file format support" + depends on BLK_DEV_LOOP + select ZLIB_INFLATE + select ZLIB_DEFLATE + ---help--- + Say Y or M here if you want to enable the QEMU's copy on write (QCOW) + file format support of the loop device module. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a82cd7a --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +loop-y += loop_main.o loop_file_fmt.o +obj-$(CONFIG_BLK_DEV_LOOP) += loop.o + +obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o + +obj-$(CONFIG_BLK_DEV_LOOP_FILE_FMT_RAW) += loop_file_fmt_raw.o + +loop_file_fmt_qcow-y += loop_file_fmt_qcow_main.o loop_file_fmt_qcow_cluster.o loop_file_fmt_qcow_cache.o +obj-$(CONFIG_BLK_DEV_LOOP_FILE_FMT_QCOW) += loop_file_fmt_qcow.o diff --git a/cryptoloop.c b/cryptoloop.c new file mode 100644 index 0000000..4d78436 --- /dev/null +++ b/cryptoloop.c @@ -0,0 +1,216 @@ +/* + Linux loop encryption enabling module + + Copyright (C) 2002 Herbert Valerio Riedel + Copyright (C) 2003 Fruhwirth Clemens + + This module is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This module is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this module; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include +#include +#include +#include +#include +#include +#include "loop_main.h" + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI"); +MODULE_AUTHOR("Herbert Valerio Riedel "); + +#define LOOP_IV_SECTOR_BITS 9 +#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS) + +static int +cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) +{ + int err = -EINVAL; + int cipher_len; + int mode_len; + char cms[LO_NAME_SIZE]; /* cipher-mode string */ + char *mode; + char *cmsp = cms; /* c-m string pointer */ + struct crypto_skcipher *tfm; + + /* encryption breaks for non sector aligned offsets */ + + if (info->lo_offset % LOOP_IV_SECTOR_SIZE) + goto out; + + strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); + cms[LO_NAME_SIZE - 1] = 0; + + cipher_len = strcspn(cmsp, "-"); + + mode = cmsp + cipher_len; + mode_len = 0; + if (*mode) { + mode++; + mode_len = strcspn(mode, "-"); + } + + if (!mode_len) { + mode = "cbc"; + mode_len = 3; + } + + if (cipher_len + mode_len + 3 > LO_NAME_SIZE) + return -EINVAL; + + memmove(cms, mode, mode_len); + cmsp = cms + mode_len; + *cmsp++ = '('; + memcpy(cmsp, info->lo_crypt_name, cipher_len); + cmsp += cipher_len; + *cmsp++ = ')'; + *cmsp = 0; + + tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key, + info->lo_encrypt_key_size); + + if (err != 0) + goto out_free_tfm; + + lo->key_data = tfm; + return 0; + + out_free_tfm: + crypto_free_skcipher(tfm); + + out: + return err; +} + + +typedef int (*encdec_cbc_t)(struct skcipher_request *req); + +static int +cryptoloop_transfer(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t IV) +{ + struct crypto_skcipher *tfm = lo->key_data; + SKCIPHER_REQUEST_ON_STACK(req, tfm); + struct scatterlist sg_out; + struct scatterlist sg_in; + + encdec_cbc_t encdecfunc; + struct page *in_page, *out_page; + unsigned in_offs, out_offs; + int err; + + skcipher_request_set_tfm(req, tfm); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + + sg_init_table(&sg_out, 1); + sg_init_table(&sg_in, 1); + + if (cmd == READ) { + in_page = raw_page; + in_offs = raw_off; + out_page = loop_page; + out_offs = loop_off; + encdecfunc = crypto_skcipher_decrypt; + } else { + in_page = loop_page; + in_offs = loop_off; + out_page = raw_page; + out_offs = raw_off; + encdecfunc = crypto_skcipher_encrypt; + } + + while (size > 0) { + const int sz = min(size, LOOP_IV_SECTOR_SIZE); + u32 iv[4] = { 0, }; + iv[0] = cpu_to_le32(IV & 0xffffffff); + + sg_set_page(&sg_in, in_page, sz, in_offs); + sg_set_page(&sg_out, out_page, sz, out_offs); + + skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv); + err = encdecfunc(req); + if (err) + goto out; + + IV++; + size -= sz; + in_offs += sz; + out_offs += sz; + } + + err = 0; + +out: + skcipher_request_zero(req); + return err; +} + +static int +cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) +{ + return -EINVAL; +} + +static int +cryptoloop_release(struct loop_device *lo) +{ + struct crypto_skcipher *tfm = lo->key_data; + if (tfm != NULL) { + crypto_free_skcipher(tfm); + lo->key_data = NULL; + return 0; + } + printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n"); + return -EINVAL; +} + +static struct loop_func_table cryptoloop_funcs = { + .number = LO_CRYPT_CRYPTOAPI, + .init = cryptoloop_init, + .ioctl = cryptoloop_ioctl, + .transfer = cryptoloop_transfer, + .release = cryptoloop_release, + .owner = THIS_MODULE +}; + +static int __init +init_cryptoloop(void) +{ + int rc = loop_register_transfer(&cryptoloop_funcs); + + if (rc) + printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); + return rc; +} + +static void __exit +cleanup_cryptoloop(void) +{ + if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI)) + printk(KERN_ERR + "cryptoloop: loop_unregister_transfer failed\n"); +} + +module_init(init_cryptoloop); +module_exit(cleanup_cryptoloop); diff --git a/loop_file_fmt.c b/loop_file_fmt.c new file mode 100644 index 0000000..ff356f1 --- /dev/null +++ b/loop_file_fmt.c @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt.c + * + * File format subsystem for the loop device module. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#include +#include + +#include "loop_file_fmt.h" + +/* storage for all registered file format drivers */ +static struct loop_file_fmt_driver *loop_file_fmt_drivers[MAX_LO_FILE_FMT] = { + NULL +}; + +int loop_file_fmt_register_driver(struct loop_file_fmt_driver *drv) +{ + int ret = 0; + + if (drv == NULL) + return -EFAULT; + + if (drv->file_fmt_type > MAX_LO_FILE_FMT) + return -EINVAL; + + if (loop_file_fmt_drivers[drv->file_fmt_type] == NULL) { + loop_file_fmt_drivers[drv->file_fmt_type] = drv; + printk(KERN_INFO "loop_file_fmt: successfully registered file " + "format driver %s", drv->name); + } else { + printk(KERN_WARNING "loop_file_fmt: driver for file format " + "already registered"); + ret = -EBUSY; + } + + return ret; +} +EXPORT_SYMBOL(loop_file_fmt_register_driver); + +void loop_file_fmt_unregister_driver(struct loop_file_fmt_driver *drv) +{ + if (drv == NULL) + return; + + if (drv->file_fmt_type > MAX_LO_FILE_FMT) + return; + + loop_file_fmt_drivers[drv->file_fmt_type] = NULL; + printk(KERN_INFO "loop_file_fmt: successfully unregistered file " + "format driver %s", drv->name); +} +EXPORT_SYMBOL(loop_file_fmt_unregister_driver); + +struct loop_file_fmt *loop_file_fmt_alloc(void) +{ + return kzalloc(sizeof(struct loop_file_fmt), GFP_KERNEL); +} + +void loop_file_fmt_free(struct loop_file_fmt *lo_fmt) +{ + kfree(lo_fmt); +} + +int loop_file_fmt_set_lo(struct loop_file_fmt *lo_fmt, struct loop_device *lo) +{ + if (lo_fmt == NULL) + return -EINVAL; + + lo_fmt->lo = lo; + + return 0; +} +EXPORT_SYMBOL(loop_file_fmt_set_lo); + +struct loop_device *loop_file_fmt_get_lo(struct loop_file_fmt *lo_fmt) +{ + return lo_fmt->lo; +} +EXPORT_SYMBOL(loop_file_fmt_get_lo); + +int loop_file_fmt_init(struct loop_file_fmt *lo_fmt, + u32 file_fmt_type) +{ + struct loop_file_fmt_ops *ops; + struct module *drv; + int ret = 0; + + if (file_fmt_type > MAX_LO_FILE_FMT) + return -EINVAL; + + lo_fmt->file_fmt_type = file_fmt_type; + + if (lo_fmt->file_fmt_state != file_fmt_uninitialized) { + printk(KERN_WARNING "loop_file_fmt: file format is " + "initialized already"); + return -EINVAL; + } + + /* check if new file format driver is registered */ + if (loop_file_fmt_drivers[lo_fmt->file_fmt_type] == NULL) { + printk(KERN_ERR "loop_file_fmt: file format driver is not " + "available"); + return -ENODEV; + } + + printk(KERN_INFO "loop_file_fmt: use file format driver %s", + loop_file_fmt_drivers[lo_fmt->file_fmt_type]->name); + + drv = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->owner; + if (!try_module_get(drv)) { + printk(KERN_ERR "loop_file_fmt: file format driver %s can not " + "be accessed", + loop_file_fmt_drivers[lo_fmt->file_fmt_type]->name); + return -ENODEV; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->init)) { + ret = ops->init(lo_fmt); + if (ret < 0) + goto free_drv; + } + + /* after increasing the refcount of file format driver module and + * the successful initialization, the file format is initialized */ + lo_fmt->file_fmt_state = file_fmt_initialized; + + return ret; + +free_drv: + module_put(drv); + lo_fmt->file_fmt_state = file_fmt_uninitialized; + return ret; +} + +void loop_file_fmt_exit(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_ops *ops; + struct module *drv; + + if (lo_fmt->file_fmt_state != file_fmt_initialized) { + printk(KERN_WARNING "loop_file_fmt: file format is " + "uninitialized already"); + return; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->exit)) + ops->exit(lo_fmt); + + drv = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->owner; + module_put(drv); + + /* after decreasing the refcount of file format driver module, + * the file format is uninitialized */ + lo_fmt->file_fmt_state = file_fmt_uninitialized; +} + +int loop_file_fmt_read(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not read"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->read)) + return ops->read(lo_fmt, rq); + else + return -EIO; +} + +int loop_file_fmt_read_aio(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not read aio"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->read_aio)) + return ops->read_aio(lo_fmt, rq); + else + return -EIO; +} + +int loop_file_fmt_write(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not write"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->write)) + return ops->write(lo_fmt, rq); + else + return -EIO; +} + +int loop_file_fmt_write_aio(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not write aio"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->write_aio)) + return ops->write_aio(lo_fmt, rq); + else + return -EIO; +} + +int loop_file_fmt_discard(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not discard"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->discard)) + return ops->discard(lo_fmt, rq); + else + return -EIO; +} + +int loop_file_fmt_flush(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not flush"); + return -EINVAL; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->flush)) + return ops->flush(lo_fmt); + + return 0; +} + +loff_t loop_file_fmt_sector_size(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_ops *ops; + + if (unlikely(lo_fmt->file_fmt_state != file_fmt_initialized)) { + printk(KERN_ERR "loop_file_fmt: file format is " + "not initialized, can not read sector size"); + return 0; + } + + ops = loop_file_fmt_drivers[lo_fmt->file_fmt_type]->ops; + if (likely(ops->sector_size)) + return ops->sector_size(lo_fmt); + else + return 0; +} + +int loop_file_fmt_change(struct loop_file_fmt *lo_fmt, + u32 file_fmt_type_new) +{ + if (file_fmt_type_new > MAX_LO_FILE_FMT) + return -EINVAL; + + /* Unload the old file format driver if the file format is + * initialized */ + if (lo_fmt->file_fmt_state == file_fmt_initialized) + loop_file_fmt_exit(lo_fmt); + + /* Load the new file format driver because the file format is + * uninitialized now */ + return loop_file_fmt_init(lo_fmt, file_fmt_type_new); +} + +ssize_t loop_file_fmt_print_type(u32 file_fmt_type, char *file_fmt_name) +{ + ssize_t len = 0; + + switch (file_fmt_type) { + case LO_FILE_FMT_RAW: + len = sprintf(file_fmt_name, "%s", "RAW"); + break; + case LO_FILE_FMT_QCOW: + len = sprintf(file_fmt_name, "%s", "QCOW"); + break; + case LO_FILE_FMT_VDI: + len = sprintf(file_fmt_name, "%s", "VDI"); + break; + case LO_FILE_FMT_VMDK: + len = sprintf(file_fmt_name, "%s", "VMDK"); + break; + default: + len = sprintf(file_fmt_name, "%s", "ERROR: Unsupported loop " + "file format!"); + break; + } + + return len; +} +EXPORT_SYMBOL(loop_file_fmt_print_type); diff --git a/loop_file_fmt.h b/loop_file_fmt.h new file mode 100644 index 0000000..5c0e493 --- /dev/null +++ b/loop_file_fmt.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt.h + * + * File format subsystem for the loop device module. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#ifndef _LINUX_LOOP_FILE_FMT_H +#define _LINUX_LOOP_FILE_FMT_H + +#include "loop_main.h" + +struct loop_file_fmt; + +/** + * struct loop_file_fmt_ops - File format subsystem operations + * + * Data structure representing the file format subsystem interface. + */ +struct loop_file_fmt_ops { + /** + * @init: Initialization callback function + */ + int (*init) (struct loop_file_fmt *lo_fmt); + + /** + * @exit: Release callback function + */ + void (*exit) (struct loop_file_fmt *lo_fmt); + + /** + * @read: Read IO callback function + */ + int (*read) (struct loop_file_fmt *lo_fmt, + struct request *rq); + + /** + * @write: Write IO callback function + */ + int (*write) (struct loop_file_fmt *lo_fmt, + struct request *rq); + + /** + * @read_aio: Asynchronous read IO callback function + */ + int (*read_aio) (struct loop_file_fmt *lo_fmt, + struct request *rq); + + /** + * @write_aio: Asynchronous write IO callback function + */ + int (*write_aio) (struct loop_file_fmt *lo_fmt, + struct request *rq); + + /** + * @discard: Discard IO callback function + */ + int (*discard) (struct loop_file_fmt *lo_fmt, + struct request *rq); + + /** + * @flush: Flush callback function + */ + int (*flush) (struct loop_file_fmt *lo_fmt); + + /** + * @sector_size: Get sector size callback function + */ + loff_t (*sector_size) (struct loop_file_fmt *lo_fmt); +}; + +/** + * struct loop_file_fmt_driver - File format subsystem driver + * + * Data structure to implement file format drivers for the file format + * subsystem. + */ +struct loop_file_fmt_driver { + /** + * @name: Name of the file format driver + */ + const char *name; + + /** + * @file_fmt_type: Loop file format type of the file format driver + */ + const u32 file_fmt_type; + + /** + * @ops: Driver's implemented file format operations + */ + struct loop_file_fmt_ops *ops; + + /** + * @ops: Owner of the file format driver + */ + struct module *owner; +}; + +/* + * states of the file format + * + * transitions: + * loop_file_fmt_init(...) + * ---> uninitialized ------------------------------> initialized + * loop_file_fmt_exit(...) + * initialized ------------------------------> uninitialized + * loop_file_fmt_read(...) + * initialized ------------------------------> initialized + * loop_file_fmt_read_aio(...) + * initialized ------------------------------> initialized + * loop_file_fmt_write(...) + * initialized ------------------------------> initialized + * loop_file_fmt_write_aio(...) + * initialized ------------------------------> initialized + * loop_file_fmt_discard(...) + * initialized ------------------------------> initialized + * loop_file_fmt_flush(...) + * initialized ------------------------------> initialized + * loop_file_fmt_sector_size(...) + * initialized ------------------------------> initialized + * + * loop_file_fmt_change(...) + * +-----------------------------------------------------------+ + * | exit(...) init(...) | + * | initialized -------> uninitialized -------> initialized | + * +-----------------------------------------------------------+ + */ +enum { + file_fmt_uninitialized = 0, + file_fmt_initialized +}; + +/** + * struct loop_file_fmt - Loop file format + * + * Data structure to use with the file format the loop file format subsystem. + */ +struct loop_file_fmt { + /** + * @file_fmt_type: Current type of the loop file format + */ + u32 file_fmt_type; + + /** + * @file_fmt_state: Current state of the loop file format + */ + int file_fmt_state; + + /** + * @lo: Link to a file format's loop device + */ + struct loop_device *lo; + + /** + * @private_data: Optional link to a file format's driver specific data + */ + void *private_data; +}; + + +/* subsystem functions for the driver implementation */ + +/** + * loop_file_fmt_register_driver - Register a loop file format driver + * @drv: File format driver + * + * Registers the specified loop file format driver @drv by the loop file format + * subsystem. + */ +extern int loop_file_fmt_register_driver(struct loop_file_fmt_driver *drv); + +/** + * loop_file_fmt_unregister_driver - Unregister a loop file format driver + * @drv: File format driver + * + * Unregisters the specified loop file format driver @drv from the loop file + * format subsystem. + */ +extern void loop_file_fmt_unregister_driver(struct loop_file_fmt_driver *drv); + + +/* subsystem functions for subsystem usage */ + +/** + * loop_file_fmt_alloc - Allocate a loop file format + * + * Dynamically allocates a loop file format and returns a pointer to the + * created loop file format. + */ +extern struct loop_file_fmt *loop_file_fmt_alloc(void); + +/** + * loop_file_fmt_free - Free an allocated loop file format + * @lo_fmt: Loop file format + * + * Frees the already allocated loop file format @lo_fmt. + */ +extern void loop_file_fmt_free(struct loop_file_fmt *lo_fmt); + +/** + * loop_file_fmt_set_lo - Set the loop file format's loop device + * @lo_fmt: Loop file format + * @lo: Loop device + * + * The link to the loop device @lo is set in the loop file format @lo_fmt. + */ +extern int loop_file_fmt_set_lo(struct loop_file_fmt *lo_fmt, + struct loop_device *lo); + +/** + * loop_file_fmt_get_lo - Get the loop file format's loop device + * @lo_fmt: Loop file format + * + * Returns a pointer to the loop device of the loop file format @lo_fmt. + */ +extern struct loop_device *loop_file_fmt_get_lo(struct loop_file_fmt *lo_fmt); + +/** + * loop_file_fmt_init - Initialize a loop file format + * @lo_fmt: Loop file format + * @file_fmt_type: Type of the file format + * + * Initializes the specified loop file format @lo_fmt and sets up the correct + * file format type @file_fmt_type. Depending on @file_fmt_type, the correct + * loop file format driver is loaded in the subsystems backend. If no loop file + * format driver for the specified file format is available an error is + * returned. + */ +extern int loop_file_fmt_init(struct loop_file_fmt *lo_fmt, + u32 file_fmt_type); + +/** + * loop_file_fmt_exit - Release a loop file format + * @lo_fmt: Loop file format + * + * Releases the specified loop file format @lo_fmt and all its resources. + */ +extern void loop_file_fmt_exit(struct loop_file_fmt *lo_fmt); + +/** + * loop_file_fmt_read - Read IO from a loop file format + * @lo_fmt: Loop file format + * @rq: IO Request + * + * Reads IO from the file format's loop device by sending the IO read request + * @rq to the loop file format subsystem. The subsystem calls the registered + * callback function of the suitable loop file format driver. + */ +extern int loop_file_fmt_read(struct loop_file_fmt *lo_fmt, + struct request *rq); + +/** + * loop_file_fmt_read_aio - Read IO from a loop file format asynchronously + * @lo_fmt: Loop file format + * @rq: IO Request + * + * Reads IO from the file format's loop device asynchronously by sending the + * IO read aio request @rq to the loop file format subsystem. The subsystem + * calls the registered callback function of the suitable loop file format + * driver. + */ +extern int loop_file_fmt_read_aio(struct loop_file_fmt *lo_fmt, + struct request *rq); + +/** + * loop_file_fmt_write - Write IO to a loop file format + * @lo_fmt: Loop file format + * @rq: IO Request + * + * Write IO to the file format's loop device by sending the IO write request + * @rq to the loop file format subsystem. The subsystem calls the registered + * callback function of the suitable loop file format driver. + */ +extern int loop_file_fmt_write(struct loop_file_fmt *lo_fmt, + struct request *rq); + +/** + * loop_file_fmt_write_aio - Write IO to a loop file format asynchronously + * @lo_fmt: Loop file format + * @rq: IO Request + * + * Write IO to the file format's loop device asynchronously by sending the + * IO write aio request @rq to the loop file format subsystem. The subsystem + * calls the registered callback function of the suitable loop file format + * driver. + */ +extern int loop_file_fmt_write_aio(struct loop_file_fmt *lo_fmt, + struct request *rq); + +/** + * loop_file_fmt_discard - Discard IO on a loop file format + * @lo_fmt: Loop file format + * @rq: IO Request + * + * Discard IO on the file format's loop device by sending the IO discard + * request @rq to the loop file format subsystem. The subsystem calls the + * registered callback function of the suitable loop file format driver. + */ +extern int loop_file_fmt_discard(struct loop_file_fmt *lo_fmt, + struct request *rq); + +/** + * loop_file_fmt_flush - Flush a loop file format + * @lo_fmt: Loop file format + * + * Flush the file format's loop device by calling the registered callback + * function of the suitable loop file format driver. + */ +extern int loop_file_fmt_flush(struct loop_file_fmt *lo_fmt); + +/** + * loop_file_fmt_sector_size - Get sector size of a loop file format + * @lo_fmt: Loop file format + * + * Returns the physical sector size of the loop file format's loop device. + * If the loop file format implements a sparse disk image format, then this + * function returns the virtual sector size. + */ +extern loff_t loop_file_fmt_sector_size(struct loop_file_fmt *lo_fmt); + +/** + * loop_file_fmt_change - Change the loop file format's type + * @lo_fmt: Loop file format + * @file_fmt_type_new: Loop file format type + * + * Changes the file format type of the already initialized loop file format + * @lo_fmt. Therefore, the function releases the old file format and frees all + * of its resources before the loop file format @lo_fmt is initialized and set + * up with the new file format @file_fmt_type_new. + */ +extern int loop_file_fmt_change(struct loop_file_fmt *lo_fmt, + u32 file_fmt_type_new); + + +/* helper functions of the subsystem */ + +/** + * loop_file_fmt_print_type - Convert file format type to string + * @file_fmt_type: Loop file format type + * @file_fmt_name: Loop file format type string + * + * Converts the specified numeric @file_fmt_type value into a human readable + * string stating the file format as string in @file_fmt_name. + */ +extern ssize_t loop_file_fmt_print_type(u32 file_fmt_type, + char *file_fmt_name); + +#endif diff --git a/loop_file_fmt_qcow_cache.c b/loop_file_fmt_qcow_cache.c new file mode 100644 index 0000000..7d3af73 --- /dev/null +++ b/loop_file_fmt_qcow_cache.c @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow_cache.c + * + * QCOW file format driver for the loop device module. + * + * Ported QCOW2 implementation of the QEMU project (GPL-2.0): + * L2/refcount table cache for the QCOW2 format. + * + * The copyright (C) 2010 of the original code is owned by + * Kevin Wolf + * + * Copyright (C) 2019 Manuel Bentele + */ + +#include +#include +#include +#include +#include +#include + +#include "loop_file_fmt_qcow_main.h" +#include "loop_file_fmt_qcow_cache.h" + +static inline void *__loop_file_fmt_qcow_cache_get_table_addr( + struct loop_file_fmt_qcow_cache *c, int table) +{ + return (u8 *) c->table_array + (size_t) table * c->table_size; +} + +static inline int __loop_file_fmt_qcow_cache_get_table_idx( + struct loop_file_fmt_qcow_cache *c, void *table) +{ + ptrdiff_t table_offset = (u8 *) table - (u8 *) c->table_array; + int idx = table_offset / c->table_size; + ASSERT(idx >= 0 && idx < c->size && table_offset % c->table_size == 0); + return idx; +} + +static inline const char *__loop_file_fmt_qcow_cache_get_name( + struct loop_file_fmt *lo_fmt, struct loop_file_fmt_qcow_cache *c) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + + if (c == qcow_data->refcount_block_cache) { + return "refcount block"; + } else if (c == qcow_data->l2_table_cache) { + return "L2 table"; + } else { + /* do not abort, because this is not critical */ + return "unknown"; + } +} + +struct loop_file_fmt_qcow_cache *loop_file_fmt_qcow_cache_create( + struct loop_file_fmt *lo_fmt, int num_tables, unsigned table_size) +{ +#ifdef CONFIG_DEBUG_DRIVER + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; +#endif + struct loop_file_fmt_qcow_cache *c; + + ASSERT(num_tables > 0); + ASSERT(is_power_of_2(table_size)); + ASSERT(table_size >= (1 << QCOW_MIN_CLUSTER_BITS)); + ASSERT(table_size <= qcow_data->cluster_size); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { + return NULL; + } + + c->size = num_tables; + c->table_size = table_size; + c->entries = vzalloc(sizeof(struct loop_file_fmt_qcow_cache_table) * + num_tables); + c->table_array = vzalloc(num_tables * c->table_size); + + if (!c->entries || !c->table_array) { + vfree(c->table_array); + vfree(c->entries); + kfree(c); + c = NULL; + } + + return c; +} + +void loop_file_fmt_qcow_cache_destroy(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache; + int i; + + for (i = 0; i < c->size; i++) { + ASSERT(c->entries[i].ref == 0); + } + + vfree(c->table_array); + vfree(c->entries); + kfree(c); +} + +static int __loop_file_fmt_qcow_cache_entry_flush( + struct loop_file_fmt_qcow_cache *c, int i) +{ + if (!c->entries[i].dirty || !c->entries[i].offset) { + return 0; + } else { + printk(KERN_ERR "loop_file_fmt_qcow: Flush dirty cache tables " + "is not supported yet\n"); + return -ENOSYS; + } +} + +static int __loop_file_fmt_qcow_cache_do_get(struct loop_file_fmt *lo_fmt, + struct loop_file_fmt_qcow_cache *c, u64 offset, void **table, + bool read_from_disk) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + int i; + int ret; + int lookup_index; + u64 min_lru_counter = U64_MAX; + int min_lru_index = -1; + u64 read_offset; + size_t len; + + ASSERT(offset != 0); + + if (!IS_ALIGNED(offset, c->table_size)) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: Cannot get " + "entry from %s cache: offset %llx is unaligned\n", + __loop_file_fmt_qcow_cache_get_name(lo_fmt, c), + offset); + return -EIO; + } + + /* Check if the table is already cached */ + i = lookup_index = (offset / c->table_size * 4) % c->size; + do { + const struct loop_file_fmt_qcow_cache_table *t = + &c->entries[i]; + if (t->offset == offset) { + goto found; + } + if (t->ref == 0 && t->lru_counter < min_lru_counter) { + min_lru_counter = t->lru_counter; + min_lru_index = i; + } + if (++i == c->size) { + i = 0; + } + } while (i != lookup_index); + + if (min_lru_index == -1) { + BUG(); + panic("Oops: This can't happen in current synchronous code, " + "but leave the check here as a reminder for whoever " + "starts using AIO with the QCOW cache"); + } + + /* Cache miss: write a table back and replace it */ + i = min_lru_index; + + ret = __loop_file_fmt_qcow_cache_entry_flush(c, i); + if (ret < 0) { + return ret; + } + + c->entries[i].offset = 0; + if (read_from_disk) { + read_offset = offset; + len = kernel_read(lo->lo_backing_file, + __loop_file_fmt_qcow_cache_get_table_addr(c, i), + c->table_size, &read_offset); + if (len < 0) { + len = ret; + return ret; + } + } + + c->entries[i].offset = offset; + + /* And return the right table */ +found: + c->entries[i].ref++; + *table = __loop_file_fmt_qcow_cache_get_table_addr(c, i); + + return 0; +} + +int loop_file_fmt_qcow_cache_get(struct loop_file_fmt *lo_fmt, u64 offset, + void **table) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache; + + return __loop_file_fmt_qcow_cache_do_get(lo_fmt, c, offset, table, + true); +} + +void loop_file_fmt_qcow_cache_put(struct loop_file_fmt *lo_fmt, void **table) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_file_fmt_qcow_cache *c = qcow_data->l2_table_cache; + int i = __loop_file_fmt_qcow_cache_get_table_idx(c, *table); + + c->entries[i].ref--; + *table = NULL; + + if (c->entries[i].ref == 0) { + c->entries[i].lru_counter = ++c->lru_counter; + } + + ASSERT(c->entries[i].ref >= 0); +} diff --git a/loop_file_fmt_qcow_cache.h b/loop_file_fmt_qcow_cache.h new file mode 100644 index 0000000..1abf9b2 --- /dev/null +++ b/loop_file_fmt_qcow_cache.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow_cache.h + * + * Ported QCOW2 implementation of the QEMU project (GPL-2.0): + * L2/refcount table cache for the QCOW2 format. + * + * The copyright (C) 2010 of the original code is owned by + * Kevin Wolf + * + * Copyright (C) 2019 Manuel Bentele + */ + +#ifndef _LINUX_LOOP_FILE_FMT_QCOW_CACHE_H +#define _LINUX_LOOP_FILE_FMT_QCOW_CACHE_H + +#include "loop_file_fmt.h" + +struct loop_file_fmt_qcow_cache_table { + s64 offset; + u64 lru_counter; + int ref; + bool dirty; +}; + +struct loop_file_fmt_qcow_cache { + struct loop_file_fmt_qcow_cache_table *entries; + struct loop_file_fmt_qcow_cache *depends; + int size; + int table_size; + bool depends_on_flush; + void *table_array; + u64 lru_counter; + u64 cache_clean_lru_counter; +}; + +extern struct loop_file_fmt_qcow_cache *loop_file_fmt_qcow_cache_create( + struct loop_file_fmt *lo_fmt, + int num_tables, + unsigned table_size); + +extern void loop_file_fmt_qcow_cache_destroy(struct loop_file_fmt *lo_fmt); + +extern int loop_file_fmt_qcow_cache_get(struct loop_file_fmt *lo_fmt, + u64 offset, + void **table); + +extern void loop_file_fmt_qcow_cache_put(struct loop_file_fmt *lo_fmt, + void **table); + +#endif diff --git a/loop_file_fmt_qcow_cluster.c b/loop_file_fmt_qcow_cluster.c new file mode 100644 index 0000000..9c91a8b --- /dev/null +++ b/loop_file_fmt_qcow_cluster.c @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow_cluster.c + * + * Ported QCOW2 implementation of the QEMU project (GPL-2.0): + * Cluster calculation and lookup for the QCOW2 format. + * + * The copyright (C) 2004-2006 of the original code is owned by Fabrice Bellard. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#include +#include + +#include "loop_file_fmt.h" +#include "loop_file_fmt_qcow_main.h" +#include "loop_file_fmt_qcow_cache.h" +#include "loop_file_fmt_qcow_cluster.h" + +/* + * Loads a L2 slice into memory (L2 slices are the parts of L2 tables + * that are loaded by the qcow2 cache). If the slice is in the cache, + * the cache is used; otherwise the L2 slice is loaded from the image + * file. + */ +static int __loop_file_fmt_qcow_cluster_l2_load(struct loop_file_fmt *lo_fmt, + u64 offset, u64 l2_offset, u64 **l2_slice) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + + int start_of_slice = sizeof(u64) * ( + loop_file_fmt_qcow_offset_to_l2_index(qcow_data, offset) - + loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset) + ); + + ASSERT(qcow_data->l2_table_cache != NULL); + return loop_file_fmt_qcow_cache_get(lo_fmt, l2_offset + start_of_slice, + (void **) l2_slice); +} + +/* + * Checks how many clusters in a given L2 slice are contiguous in the image + * file. As soon as one of the flags in the bitmask stop_flags changes compared + * to the first cluster, the search is stopped and the cluster is not counted + * as contiguous. (This allows it, for example, to stop at the first compressed + * cluster which may require a different handling) + */ +static int __loop_file_fmt_qcow_cluster_count_contiguous( + struct loop_file_fmt *lo_fmt, int nb_clusters, int cluster_size, + u64 *l2_slice, u64 stop_flags) +{ + int i; + enum loop_file_fmt_qcow_cluster_type first_cluster_type; + u64 mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; + u64 first_entry = be64_to_cpu(l2_slice[0]); + u64 offset = first_entry & mask; + + first_cluster_type = loop_file_fmt_qcow_get_cluster_type(lo_fmt, + first_entry); + if (first_cluster_type == QCOW_CLUSTER_UNALLOCATED) { + return 0; + } + + /* must be allocated */ + ASSERT(first_cluster_type == QCOW_CLUSTER_NORMAL || + first_cluster_type == QCOW_CLUSTER_ZERO_ALLOC); + + for (i = 0; i < nb_clusters; i++) { + u64 l2_entry = be64_to_cpu(l2_slice[i]) & mask; + if (offset + (u64) i * cluster_size != l2_entry) { + break; + } + } + + return i; +} + +/* + * Checks how many consecutive unallocated clusters in a given L2 + * slice have the same cluster type. + */ +static int __loop_file_fmt_qcow_cluster_count_contiguous_unallocated( + struct loop_file_fmt *lo_fmt, int nb_clusters, u64 *l2_slice, + enum loop_file_fmt_qcow_cluster_type wanted_type) +{ + int i; + + ASSERT(wanted_type == QCOW_CLUSTER_ZERO_PLAIN || + wanted_type == QCOW_CLUSTER_UNALLOCATED); + + for (i = 0; i < nb_clusters; i++) { + u64 entry = be64_to_cpu(l2_slice[i]); + enum loop_file_fmt_qcow_cluster_type type = + loop_file_fmt_qcow_get_cluster_type(lo_fmt, entry); + + if (type != wanted_type) { + break; + } + } + + return i; +} + +/* + * For a given offset of the virtual disk, find the cluster type and offset in + * the qcow2 file. The offset is stored in *cluster_offset. + * + * On entry, *bytes is the maximum number of contiguous bytes starting at + * offset that we are interested in. + * + * On exit, *bytes is the number of bytes starting at offset that have the same + * cluster type and (if applicable) are stored contiguously in the image file. + * Compressed clusters are always returned one by one. + * + * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error + * cases. + */ +int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt, + u64 offset, unsigned int *bytes, u64 *cluster_offset) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + unsigned int l2_index; + u64 l1_index, l2_offset, *l2_slice; + int c; + unsigned int offset_in_cluster; + u64 bytes_available, bytes_needed, nb_clusters; + enum loop_file_fmt_qcow_cluster_type type; + int ret; + + offset_in_cluster = loop_file_fmt_qcow_offset_into_cluster(qcow_data, + offset); + bytes_needed = (u64) *bytes + offset_in_cluster; + + /* compute how many bytes there are between the start of the cluster + * containing offset and the end of the l2 slice that contains + * the entry pointing to it */ + bytes_available = ((u64)( + qcow_data->l2_slice_size - + loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, offset)) + ) << qcow_data->cluster_bits; + + if (bytes_needed > bytes_available) { + bytes_needed = bytes_available; + } + + *cluster_offset = 0; + + /* seek to the l2 offset in the l1 table */ + l1_index = loop_file_fmt_qcow_offset_to_l1_index(qcow_data, offset); + if (l1_index >= qcow_data->l1_size) { + type = QCOW_CLUSTER_UNALLOCATED; + goto out; + } + + l2_offset = qcow_data->l1_table[l1_index] & L1E_OFFSET_MASK; + if (!l2_offset) { + type = QCOW_CLUSTER_UNALLOCATED; + goto out; + } + + if (loop_file_fmt_qcow_offset_into_cluster(qcow_data, l2_offset)) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: L2 table " + "offset %llx unaligned (L1 index: %llx)", l2_offset, + l1_index); + return -EIO; + } + + /* load the l2 slice in memory */ + ret = __loop_file_fmt_qcow_cluster_l2_load(lo_fmt, offset, l2_offset, + &l2_slice); + if (ret < 0) { + return ret; + } + + /* find the cluster offset for the given disk offset */ + l2_index = loop_file_fmt_qcow_offset_to_l2_slice_index(qcow_data, + offset); + *cluster_offset = be64_to_cpu(l2_slice[l2_index]); + + nb_clusters = loop_file_fmt_qcow_size_to_clusters(qcow_data, + bytes_needed); + /* bytes_needed <= *bytes + offset_in_cluster, both of which are + * unsigned integers; the minimum cluster size is 512, so this + * assertion is always true */ + ASSERT(nb_clusters <= INT_MAX); + + type = loop_file_fmt_qcow_get_cluster_type(lo_fmt, *cluster_offset); + if (qcow_data->qcow_version < 3 && ( + type == QCOW_CLUSTER_ZERO_PLAIN || + type == QCOW_CLUSTER_ZERO_ALLOC)) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: zero cluster " + "entry found in pre-v3 image (L2 offset: %llx, " + "L2 index: %x)\n", l2_offset, l2_index); + ret = -EIO; + goto fail; + } + switch (type) { + case QCOW_CLUSTER_COMPRESSED: + if (loop_file_fmt_qcow_has_data_file(lo_fmt)) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: " + "compressed cluster entry found in image with " + "external data file (L2 offset: %llx, " + "L2 index: %x)", l2_offset, l2_index); + ret = -EIO; + goto fail; + } + /* Compressed clusters can only be processed one by one */ + c = 1; + *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; + break; + case QCOW_CLUSTER_ZERO_PLAIN: + case QCOW_CLUSTER_UNALLOCATED: + /* how many empty clusters ? */ + c = __loop_file_fmt_qcow_cluster_count_contiguous_unallocated( + lo_fmt, nb_clusters, &l2_slice[l2_index], type); + *cluster_offset = 0; + break; + case QCOW_CLUSTER_ZERO_ALLOC: + case QCOW_CLUSTER_NORMAL: + /* how many allocated clusters ? */ + c = __loop_file_fmt_qcow_cluster_count_contiguous(lo_fmt, + nb_clusters, qcow_data->cluster_size, + &l2_slice[l2_index], QCOW_OFLAG_ZERO); + *cluster_offset &= L2E_OFFSET_MASK; + if (loop_file_fmt_qcow_offset_into_cluster(qcow_data, + *cluster_offset)) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: " + "cluster allocation offset %llx unaligned " + "(L2 offset: %llx, L2 index: %x)\n", + *cluster_offset, l2_offset, l2_index); + ret = -EIO; + goto fail; + } + if (loop_file_fmt_qcow_has_data_file(lo_fmt) && + *cluster_offset != offset - offset_in_cluster) { + printk_ratelimited(KERN_ERR "loop_file_fmt_qcow: " + "external data file host cluster offset %llx " + "does not match guest cluster offset: %llx, " + "L2 index: %x)", *cluster_offset, + offset - offset_in_cluster, l2_index); + ret = -EIO; + goto fail; + } + break; + default: + BUG(); + } + + loop_file_fmt_qcow_cache_put(lo_fmt, (void **) &l2_slice); + + bytes_available = (s64) c * qcow_data->cluster_size; + +out: + if (bytes_available > bytes_needed) { + bytes_available = bytes_needed; + } + + /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; + * subtracting offset_in_cluster will therefore definitely yield + * something not exceeding UINT_MAX */ + ASSERT(bytes_available - offset_in_cluster <= UINT_MAX); + *bytes = bytes_available - offset_in_cluster; + + return type; + +fail: + loop_file_fmt_qcow_cache_put(lo_fmt, (void **) &l2_slice); + return ret; +} diff --git a/loop_file_fmt_qcow_cluster.h b/loop_file_fmt_qcow_cluster.h new file mode 100644 index 0000000..d62e331 --- /dev/null +++ b/loop_file_fmt_qcow_cluster.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow_cluster.h + * + * Ported QCOW2 implementation of the QEMU project (GPL-2.0): + * Cluster calculation and lookup for the QCOW2 format. + * + * The copyright (C) 2004-2006 of the original code is owned by Fabrice Bellard. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#ifndef _LINUX_LOOP_FILE_FMT_QCOW_CLUSTER_H +#define _LINUX_LOOP_FILE_FMT_QCOW_CLUSTER_H + +#include "loop_file_fmt.h" + +extern int loop_file_fmt_qcow_cluster_get_offset(struct loop_file_fmt *lo_fmt, + u64 offset, + unsigned int *bytes, + u64 *cluster_offset); + +#endif diff --git a/loop_file_fmt_qcow_main.c b/loop_file_fmt_qcow_main.c new file mode 100644 index 0000000..4fb786b --- /dev/null +++ b/loop_file_fmt_qcow_main.c @@ -0,0 +1,945 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow.c + * + * QCOW file format driver for the loop device module. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loop_file_fmt.h" +#include "loop_file_fmt_qcow_main.h" +#include "loop_file_fmt_qcow_cache.h" +#include "loop_file_fmt_qcow_cluster.h" + +static int __qcow_file_fmt_header_read(struct loop_file_fmt *lo_fmt, + struct loop_file_fmt_qcow_header *header) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + ssize_t len; + loff_t offset; + int ret = 0; + + /* read QCOW header */ + offset = 0; + len = kernel_read(lo->lo_backing_file, header, sizeof(*header), + &offset); + if (len < 0) { + printk(KERN_ERR "loop_file_fmt_qcow: could not read QCOW " + "header"); + return len; + } + + header->magic = be32_to_cpu(header->magic); + header->version = be32_to_cpu(header->version); + header->backing_file_offset = be64_to_cpu(header->backing_file_offset); + header->backing_file_size = be32_to_cpu(header->backing_file_size); + header->cluster_bits = be32_to_cpu(header->cluster_bits); + header->size = be64_to_cpu(header->size); + header->crypt_method = be32_to_cpu(header->crypt_method); + header->l1_size = be32_to_cpu(header->l1_size); + header->l1_table_offset = be64_to_cpu(header->l1_table_offset); + header->refcount_table_offset = + be64_to_cpu(header->refcount_table_offset); + header->refcount_table_clusters = + be32_to_cpu(header->refcount_table_clusters); + header->nb_snapshots = be32_to_cpu(header->nb_snapshots); + header->snapshots_offset = be64_to_cpu(header->snapshots_offset); + + /* check QCOW file format and header version */ + if (header->magic != QCOW_MAGIC) { + printk(KERN_ERR "loop_file_fmt_qcow: image is not in QCOW " + "format"); + return -EINVAL; + } + + if (header->version < 2 || header->version > 3) { + printk(KERN_ERR "loop_file_fmt_qcow: unsupported QCOW version " + "%d", header->version); + return -ENOTSUPP; + } + + /* initialize version 3 header fields */ + if (header->version == 2) { + header->incompatible_features = 0; + header->compatible_features = 0; + header->autoclear_features = 0; + header->refcount_order = 4; + header->header_length = 72; + } else { + header->incompatible_features = + be64_to_cpu(header->incompatible_features); + header->compatible_features = + be64_to_cpu(header->compatible_features); + header->autoclear_features = + be64_to_cpu(header->autoclear_features); + header->refcount_order = be32_to_cpu(header->refcount_order); + header->header_length = be32_to_cpu(header->header_length); + + if (header->header_length < 104) { + printk(KERN_ERR "loop_file_fmt_qcow: QCOW header too " + "short"); + return -EINVAL; + } + } + + return ret; +} + +static int __qcow_file_fmt_validate_table(struct loop_file_fmt *lo_fmt, + u64 offset, u64 entries, size_t entry_len, s64 max_size_bytes, + const char *table_name) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + + if (entries > max_size_bytes / entry_len) { + printk(KERN_INFO "loop_file_fmt_qcow: %s too large", + table_name); + return -EFBIG; + } + + /* Use signed S64_MAX as the maximum even for u64 header fields, + * because values will be passed to qemu functions taking s64. */ + if ((S64_MAX - entries * entry_len < offset) || ( + loop_file_fmt_qcow_offset_into_cluster(qcow_data, offset) != 0) + ) { + printk(KERN_INFO "loop_file_fmt_qcow: %s offset invalid", + table_name); + return -EINVAL; + } + + return 0; +} + +static inline loff_t __qcow_file_fmt_rq_get_pos(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + return ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; +} + +static int __qcow_file_fmt_compression_init(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + int ret = 0; + + qcow_data->strm = kzalloc(sizeof(*qcow_data->strm), GFP_KERNEL); + if (!qcow_data->strm) { + ret = -ENOMEM; + goto out; + } + + qcow_data->strm->workspace = vzalloc(zlib_inflate_workspacesize()); + if (!qcow_data->strm->workspace) { + ret = -ENOMEM; + goto out_free_strm; + } + + return ret; + +out_free_strm: + kfree(qcow_data->strm); +out: + return ret; +} + +static void __qcow_file_fmt_compression_exit(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + + if (qcow_data->strm->workspace) + vfree(qcow_data->strm->workspace); + + if (qcow_data->strm) + kfree(qcow_data->strm); +} + +#ifdef CONFIG_DEBUG_FS +static void __qcow_file_fmt_header_to_buf(struct loop_file_fmt *lo_fmt, + const struct loop_file_fmt_qcow_header *header) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + char *header_buf = qcow_data->dbgfs_file_qcow_header_buf; + ssize_t len = 0; + + len += sprintf(header_buf + len, "magic: %d\n", + header->magic); + len += sprintf(header_buf + len, "version: %d\n", + header->version); + len += sprintf(header_buf + len, "backing_file_offset: %lld\n", + header->backing_file_offset); + len += sprintf(header_buf + len, "backing_file_size: %d\n", + header->backing_file_size); + len += sprintf(header_buf + len, "cluster_bits: %d\n", + header->cluster_bits); + len += sprintf(header_buf + len, "size: %lld\n", + header->size); + len += sprintf(header_buf + len, "crypt_method: %d\n", + header->crypt_method); + len += sprintf(header_buf + len, "l1_size: %d\n", + header->l1_size); + len += sprintf(header_buf + len, "l1_table_offset: %lld\n", + header->l1_table_offset); + len += sprintf(header_buf + len, "refcount_table_offset: %lld\n", + header->refcount_table_offset); + len += sprintf(header_buf + len, "refcount_table_clusters: %d\n", + header->refcount_table_clusters); + len += sprintf(header_buf + len, "nb_snapshots: %d\n", + header->nb_snapshots); + len += sprintf(header_buf + len, "snapshots_offset: %lld\n", + header->snapshots_offset); + + if (header->version == 3) { + len += sprintf(header_buf + len, + "incompatible_features: %lld\n", + header->incompatible_features); + len += sprintf(header_buf + len, + "compatible_features: %lld\n", + header->compatible_features); + len += sprintf(header_buf + len, + "autoclear_features: %lld\n", + header->autoclear_features); + len += sprintf(header_buf + len, + "refcount_order: %d\n", + header->refcount_order); + len += sprintf(header_buf + len, + "header_length: %d\n", + header->header_length); + } + + ASSERT(len < QCOW_HEADER_BUF_LEN); +} + +static ssize_t __qcow_file_fmt_dbgfs_hdr_read(struct file *file, + char __user *buf, size_t size, loff_t *ppos) +{ + struct loop_file_fmt *lo_fmt = file->private_data; + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + char *header_buf = qcow_data->dbgfs_file_qcow_header_buf; + + return simple_read_from_buffer(buf, size, ppos, header_buf, + strlen(header_buf)); +} + +static const struct file_operations qcow_file_fmt_dbgfs_hdr_fops = { + .open = simple_open, + .read = __qcow_file_fmt_dbgfs_hdr_read +}; + +static ssize_t __qcow_file_fmt_dbgfs_ofs_read(struct file *file, + char __user *buf, size_t size, loff_t *ppos) +{ + struct loop_file_fmt *lo_fmt = file->private_data; + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + unsigned int cur_bytes = 1; + u64 offset = 0; + u64 cluster_offset = 0; + s64 offset_in_cluster = 0; + ssize_t len = 0; + int ret = 0; + + /* read the share debugfs offset */ + ret = mutex_lock_interruptible(&qcow_data->dbgfs_qcow_offset_mutex); + if (ret) + return ret; + + offset = qcow_data->dbgfs_qcow_offset; + mutex_unlock(&qcow_data->dbgfs_qcow_offset_mutex); + + /* calculate and print the cluster offset */ + ret = loop_file_fmt_qcow_cluster_get_offset(lo_fmt, + offset, &cur_bytes, &cluster_offset); + if (ret < 0) + return -EINVAL; + + offset_in_cluster = loop_file_fmt_qcow_offset_into_cluster(qcow_data, + offset); + + len = sprintf(qcow_data->dbgfs_file_qcow_cluster_buf, + "offset: %lld\ncluster_offset: %lld\noffset_in_cluster: %lld\n", + offset, cluster_offset, offset_in_cluster); + + ASSERT(len < QCOW_CLUSTER_BUF_LEN); + + return simple_read_from_buffer(buf, size, ppos, + qcow_data->dbgfs_file_qcow_cluster_buf, len); +} + +static ssize_t __qcow_file_fmt_dbgfs_ofs_write(struct file *file, + const char __user *buf, size_t size, loff_t *ppos) +{ + struct loop_file_fmt *lo_fmt = file->private_data; + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + ssize_t len = 0; + int ret = 0; + + if (*ppos > QCOW_OFFSET_BUF_LEN || size > QCOW_OFFSET_BUF_LEN) + return -EINVAL; + + len = simple_write_to_buffer(qcow_data->dbgfs_file_qcow_offset_buf, + QCOW_OFFSET_BUF_LEN, ppos, buf, size); + if (len < 0) + return len; + + qcow_data->dbgfs_file_qcow_offset_buf[len] = '\0'; + + ret = mutex_lock_interruptible(&qcow_data->dbgfs_qcow_offset_mutex); + if (ret) + return ret; + + ret = kstrtou64(qcow_data->dbgfs_file_qcow_offset_buf, 10, + &qcow_data->dbgfs_qcow_offset); + if (ret < 0) + goto out; + + ret = len; +out: + mutex_unlock(&qcow_data->dbgfs_qcow_offset_mutex); + return ret; +} + +static const struct file_operations qcow_file_fmt_dbgfs_ofs_fops = { + .open = simple_open, + .read = __qcow_file_fmt_dbgfs_ofs_read, + .write = __qcow_file_fmt_dbgfs_ofs_write +}; + +static int __qcow_file_fmt_dbgfs_init(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + int ret = 0; + + qcow_data->dbgfs_dir = debugfs_create_dir("QCOW", lo->lo_dbgfs_dir); + if (IS_ERR_OR_NULL(qcow_data->dbgfs_dir)) { + ret = -ENODEV; + goto out; + } + + qcow_data->dbgfs_file_qcow_header = debugfs_create_file("header", + S_IRUGO, qcow_data->dbgfs_dir, lo_fmt, + &qcow_file_fmt_dbgfs_hdr_fops); + if (IS_ERR_OR_NULL(qcow_data->dbgfs_file_qcow_header)) { + ret = -ENODEV; + goto out_free_dbgfs_dir; + } + + qcow_data->dbgfs_file_qcow_offset = debugfs_create_file("offset", + S_IRUGO | S_IWUSR, qcow_data->dbgfs_dir, lo_fmt, + &qcow_file_fmt_dbgfs_ofs_fops); + if (IS_ERR_OR_NULL(qcow_data->dbgfs_file_qcow_offset)) { + qcow_data->dbgfs_file_qcow_offset = NULL; + ret = -ENODEV; + goto out_free_dbgfs_hdr; + } + + qcow_data->dbgfs_qcow_offset = 0; + mutex_init(&qcow_data->dbgfs_qcow_offset_mutex); + + return ret; + +out_free_dbgfs_hdr: + debugfs_remove(qcow_data->dbgfs_file_qcow_header); + qcow_data->dbgfs_file_qcow_header = NULL; +out_free_dbgfs_dir: + debugfs_remove(qcow_data->dbgfs_dir); + qcow_data->dbgfs_dir = NULL; +out: + return ret; +} + +static void __qcow_file_fmt_dbgfs_exit(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + + if (qcow_data->dbgfs_file_qcow_offset) + debugfs_remove(qcow_data->dbgfs_file_qcow_offset); + + mutex_destroy(&qcow_data->dbgfs_qcow_offset_mutex); + + if (qcow_data->dbgfs_file_qcow_header) + debugfs_remove(qcow_data->dbgfs_file_qcow_header); + + if (qcow_data->dbgfs_dir) + debugfs_remove(qcow_data->dbgfs_dir); +} +#endif + +static int qcow_file_fmt_init(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data; + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + struct loop_file_fmt_qcow_header header; + u64 l1_vm_state_index; + u64 l2_cache_size; + u64 l2_cache_entry_size; + ssize_t len; + unsigned int i; + int ret = 0; + + /* allocate memory for saving QCOW file format data */ + qcow_data = kzalloc(sizeof(*qcow_data), GFP_KERNEL); + if (!qcow_data) + return -ENOMEM; + + lo_fmt->private_data = qcow_data; + + /* read the QCOW file header */ + ret = __qcow_file_fmt_header_read(lo_fmt, &header); + if (ret) + goto free_qcow_data; + + /* save information of the header fields in human readable format in + * a file buffer to access it with debugfs */ +#ifdef CONFIG_DEBUG_FS + __qcow_file_fmt_header_to_buf(lo_fmt, &header); +#endif + + qcow_data->qcow_version = header.version; + + /* Initialise cluster size */ + if (header.cluster_bits < QCOW_MIN_CLUSTER_BITS + || header.cluster_bits > QCOW_MAX_CLUSTER_BITS) { + printk(KERN_ERR "loop_file_fmt_qcow: unsupported cluster " + "size: 2^%d", header.cluster_bits); + ret = -EINVAL; + goto free_qcow_data; + } + + qcow_data->cluster_bits = header.cluster_bits; + qcow_data->cluster_size = 1 << qcow_data->cluster_bits; + qcow_data->cluster_sectors = 1 << + (qcow_data->cluster_bits - SECTOR_SHIFT); + + if (header.header_length > qcow_data->cluster_size) { + printk(KERN_ERR "loop_file_fmt_qcow: QCOW header exceeds " + "cluster size"); + ret = -EINVAL; + goto free_qcow_data; + } + + if (header.backing_file_offset > qcow_data->cluster_size) { + printk(KERN_ERR "loop_file_fmt_qcow: invalid backing file " + "offset"); + ret = -EINVAL; + goto free_qcow_data; + } + + if (header.backing_file_offset) { + printk(KERN_ERR "loop_file_fmt_qcow: backing file support not " + "available"); + ret = -ENOTSUPP; + goto free_qcow_data; + } + + /* handle feature bits */ + qcow_data->incompatible_features = header.incompatible_features; + qcow_data->compatible_features = header.compatible_features; + qcow_data->autoclear_features = header.autoclear_features; + + if (qcow_data->incompatible_features & QCOW_INCOMPAT_DIRTY) { + printk(KERN_ERR "loop_file_fmt_qcow: image contains " + "inconsistent refcounts"); + ret = -EACCES; + goto free_qcow_data; + } + + if (qcow_data->incompatible_features & QCOW_INCOMPAT_CORRUPT) { + printk(KERN_ERR "loop_file_fmt_qcow: image is corrupt; cannot " + "be opened read/write"); + ret = -EACCES; + goto free_qcow_data; + } + + if (qcow_data->incompatible_features & QCOW_INCOMPAT_DATA_FILE) { + printk(KERN_ERR "loop_file_fmt_qcow: clusters in the external " + "data file are not refcounted"); + ret = -EACCES; + goto free_qcow_data; + } + + /* Check support for various header values */ + if (header.refcount_order > 6) { + printk(KERN_ERR "loop_file_fmt_qcow: reference count entry " + "width too large; may not exceed 64 bits"); + ret = -EINVAL; + goto free_qcow_data; + } + qcow_data->refcount_order = header.refcount_order; + qcow_data->refcount_bits = 1 << qcow_data->refcount_order; + qcow_data->refcount_max = U64_C(1) << (qcow_data->refcount_bits - 1); + qcow_data->refcount_max += qcow_data->refcount_max - 1; + + qcow_data->crypt_method_header = header.crypt_method; + if (qcow_data->crypt_method_header) { + printk(KERN_ERR "loop_file_fmt_qcow: encryption support not " + "available"); + ret = -ENOTSUPP; + goto free_qcow_data; + } + + /* L2 is always one cluster */ + qcow_data->l2_bits = qcow_data->cluster_bits - 3; + qcow_data->l2_size = 1 << qcow_data->l2_bits; + /* 2^(qcow_data->refcount_order - 3) is the refcount width in bytes */ + qcow_data->refcount_block_bits = qcow_data->cluster_bits - + (qcow_data->refcount_order - 3); + qcow_data->refcount_block_size = 1 << qcow_data->refcount_block_bits; + qcow_data->size = header.size; + qcow_data->csize_shift = (62 - (qcow_data->cluster_bits - 8)); + qcow_data->csize_mask = (1 << (qcow_data->cluster_bits - 8)) - 1; + qcow_data->cluster_offset_mask = (1LL << qcow_data->csize_shift) - 1; + + qcow_data->refcount_table_offset = header.refcount_table_offset; + qcow_data->refcount_table_size = header.refcount_table_clusters << + (qcow_data->cluster_bits - 3); + + if (header.refcount_table_clusters == 0) { + printk(KERN_ERR "loop_file_fmt_qcow: image does not contain a " + "reference count table"); + ret = -EINVAL; + goto free_qcow_data; + } + + ret = __qcow_file_fmt_validate_table(lo_fmt, + qcow_data->refcount_table_offset, + header.refcount_table_clusters, qcow_data->cluster_size, + QCOW_MAX_REFTABLE_SIZE, "Reference count table"); + if (ret < 0) { + goto free_qcow_data; + } + + /* The total size in bytes of the snapshot table is checked in + * qcow2_read_snapshots() because the size of each snapshot is + * variable and we don't know it yet. + * Here we only check the offset and number of snapshots. */ + ret = __qcow_file_fmt_validate_table(lo_fmt, header.snapshots_offset, + header.nb_snapshots, + sizeof(struct loop_file_fmt_qcow_snapshot_header), + sizeof(struct loop_file_fmt_qcow_snapshot_header) * + QCOW_MAX_SNAPSHOTS, "Snapshot table"); + if (ret < 0) { + goto free_qcow_data; + } + + /* read the level 1 table */ + ret = __qcow_file_fmt_validate_table(lo_fmt, header.l1_table_offset, + header.l1_size, sizeof(u64), QCOW_MAX_L1_SIZE, + "Active L1 table"); + if (ret < 0) { + goto free_qcow_data; + } + qcow_data->l1_size = header.l1_size; + qcow_data->l1_table_offset = header.l1_table_offset; + + l1_vm_state_index = loop_file_fmt_qcow_size_to_l1(qcow_data, + header.size); + if (l1_vm_state_index > INT_MAX) { + printk(KERN_ERR "loop_file_fmt_qcow: image is too big"); + ret = -EFBIG; + goto free_qcow_data; + } + qcow_data->l1_vm_state_index = l1_vm_state_index; + + /* the L1 table must contain at least enough entries to put header.size + * bytes */ + if (qcow_data->l1_size < qcow_data->l1_vm_state_index) { + printk(KERN_ERR "loop_file_fmt_qcow: L1 table is too small"); + ret = -EINVAL; + goto free_qcow_data; + } + + if (qcow_data->l1_size > 0) { + qcow_data->l1_table = vzalloc(round_up(qcow_data->l1_size * + sizeof(u64), 512)); + if (qcow_data->l1_table == NULL) { + printk(KERN_ERR "loop_file_fmt_qcow: could not " + "allocate L1 table"); + ret = -ENOMEM; + goto free_qcow_data; + } + len = kernel_read(lo->lo_backing_file, qcow_data->l1_table, + qcow_data->l1_size * sizeof(u64), + &qcow_data->l1_table_offset); + if (len < 0) { + printk(KERN_ERR "loop_file_fmt_qcow: could not read L1 " + "table"); + ret = len; + goto free_l1_table; + } + for (i = 0; i < qcow_data->l1_size; i++) { + qcow_data->l1_table[i] = + be64_to_cpu(qcow_data->l1_table[i]); + } + } + + /* Internal snapshots */ + qcow_data->snapshots_offset = header.snapshots_offset; + qcow_data->nb_snapshots = header.nb_snapshots; + + if (qcow_data->nb_snapshots > 0) { + printk(KERN_ERR "loop_file_fmt_qcow: snapshots support not " + "available"); + ret = -ENOTSUPP; + goto free_l1_table; + } + + + /* create cache for L2 */ + l2_cache_size = qcow_data->size / (qcow_data->cluster_size / 8); + l2_cache_entry_size = min(qcow_data->cluster_size, (int)4096); + + /* limit the L2 size to maximum QCOW_DEFAULT_L2_CACHE_MAX_SIZE */ + l2_cache_size = min(l2_cache_size, (u64)QCOW_DEFAULT_L2_CACHE_MAX_SIZE); + + /* calculate the number of cache tables */ + l2_cache_size /= l2_cache_entry_size; + if (l2_cache_size < QCOW_MIN_L2_CACHE_SIZE) { + l2_cache_size = QCOW_MIN_L2_CACHE_SIZE; + } + + if (l2_cache_size > INT_MAX) { + printk(KERN_ERR "loop_file_fmt_qcow: L2 cache size too big"); + ret = -EINVAL; + goto free_l1_table; + } + + qcow_data->l2_slice_size = l2_cache_entry_size / sizeof(u64); + + qcow_data->l2_table_cache = loop_file_fmt_qcow_cache_create(lo_fmt, + l2_cache_size, l2_cache_entry_size); + if (!qcow_data->l2_table_cache) { + ret = -ENOMEM; + goto free_l1_table; + } + + /* initialize compression support */ + ret = __qcow_file_fmt_compression_init(lo_fmt); + if (ret < 0) + goto free_l2_cache; + + /* initialize debugfs entries */ +#ifdef CONFIG_DEBUG_FS + ret = __qcow_file_fmt_dbgfs_init(lo_fmt); + if (ret < 0) + goto free_l2_cache; +#endif + + return ret; + +free_l2_cache: + loop_file_fmt_qcow_cache_destroy(lo_fmt); +free_l1_table: + vfree(qcow_data->l1_table); +free_qcow_data: + kfree(qcow_data); + lo_fmt->private_data = NULL; + return ret; +} + +static void qcow_file_fmt_exit(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + +#ifdef CONFIG_DEBUG_FS + __qcow_file_fmt_dbgfs_exit(lo_fmt); +#endif + + __qcow_file_fmt_compression_exit(lo_fmt); + + if (qcow_data->l1_table) { + vfree(qcow_data->l1_table); + } + + if (qcow_data->l2_table_cache) { + loop_file_fmt_qcow_cache_destroy(lo_fmt); + } + + if (qcow_data) { + kfree(qcow_data); + lo_fmt->private_data = NULL; + } +} + +static ssize_t __qcow_file_fmt_buffer_decompress(struct loop_file_fmt *lo_fmt, + void *dest, + size_t dest_size, + const void *src, + size_t src_size) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + int ret = 0; + + qcow_data->strm->avail_in = src_size; + qcow_data->strm->next_in = (void *) src; + qcow_data->strm->avail_out = dest_size; + qcow_data->strm->next_out = dest; + + ret = zlib_inflateInit2(qcow_data->strm, -12); + if (ret != Z_OK) { + return -1; + } + + ret = zlib_inflate(qcow_data->strm, Z_FINISH); + if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) + || qcow_data->strm->avail_out != 0) { + /* We approve Z_BUF_ERROR because we need @dest buffer to be + * filled, but @src buffer may be processed partly (because in + * qcow2 we know size of compressed data with precision of one + * sector) */ + ret = -1; + } + + zlib_inflateEnd(qcow_data->strm); + + return ret; +} + +static int __qcow_file_fmt_read_compressed(struct loop_file_fmt *lo_fmt, + struct bio_vec *bvec, + u64 file_cluster_offset, + u64 offset, + u64 bytes, + u64 bytes_done) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + int ret = 0, csize, nb_csectors; + u64 coffset; + u8 *in_buf, *out_buf; + ssize_t len; + void *data; + unsigned long irq_flags; + int offset_in_cluster = loop_file_fmt_qcow_offset_into_cluster( + qcow_data, offset); + + coffset = file_cluster_offset & qcow_data->cluster_offset_mask; + nb_csectors = ((file_cluster_offset >> qcow_data->csize_shift) & + qcow_data->csize_mask) + 1; + csize = nb_csectors * QCOW_COMPRESSED_SECTOR_SIZE - + (coffset & ~QCOW_COMPRESSED_SECTOR_MASK); + + in_buf = vmalloc(csize); + if (!in_buf) { + return -ENOMEM; + } + + out_buf = vmalloc(qcow_data->cluster_size); + if (!out_buf) { + ret = -ENOMEM; + goto out_free_in_buf; + } + + len = kernel_read(lo->lo_backing_file, in_buf, csize, &coffset); + if (len < 0) { + ret = len; + goto out_free_out_buf; + } + + if (__qcow_file_fmt_buffer_decompress(lo_fmt, out_buf, + qcow_data->cluster_size, in_buf, csize) < 0) { + ret = -EIO; + goto out_free_out_buf; + } + + ASSERT(bytes <= bvec->bv_len); + data = bvec_kmap_irq(bvec, &irq_flags) + bytes_done; + memcpy(data, out_buf + offset_in_cluster, bytes); + flush_dcache_page(bvec->bv_page); + bvec_kunmap_irq(data, &irq_flags); + +out_free_out_buf: + vfree(out_buf); +out_free_in_buf: + vfree(in_buf); + + return ret; +} + +static int __qcow_file_fmt_read_bvec(struct loop_file_fmt *lo_fmt, + struct bio_vec *bvec, + loff_t *ppos) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + int offset_in_cluster; + int ret; + unsigned int cur_bytes; /* number of bytes in current iteration */ + u64 bytes; + u64 cluster_offset = 0; + u64 bytes_done = 0; + void *data; + unsigned long irq_flags; + ssize_t len; + loff_t pos_read; + + bytes = bvec->bv_len; + + while (bytes != 0) { + + /* prepare next request */ + cur_bytes = bytes; + + ret = loop_file_fmt_qcow_cluster_get_offset(lo_fmt, *ppos, + &cur_bytes, &cluster_offset); + if (ret < 0) { + goto fail; + } + + offset_in_cluster = loop_file_fmt_qcow_offset_into_cluster( + qcow_data, *ppos); + + switch (ret) { + case QCOW_CLUSTER_UNALLOCATED: + case QCOW_CLUSTER_ZERO_PLAIN: + case QCOW_CLUSTER_ZERO_ALLOC: + data = bvec_kmap_irq(bvec, &irq_flags) + bytes_done; + memset(data, 0, cur_bytes); + flush_dcache_page(bvec->bv_page); + bvec_kunmap_irq(data, &irq_flags); + break; + + case QCOW_CLUSTER_COMPRESSED: + ret = __qcow_file_fmt_read_compressed(lo_fmt, bvec, + cluster_offset, *ppos, cur_bytes, bytes_done); + if (ret < 0) { + goto fail; + } + + break; + + case QCOW_CLUSTER_NORMAL: + if ((cluster_offset & 511) != 0) { + ret = -EIO; + goto fail; + } + + pos_read = cluster_offset + offset_in_cluster; + + data = bvec_kmap_irq(bvec, &irq_flags) + bytes_done; + len = kernel_read(lo->lo_backing_file, data, cur_bytes, + &pos_read); + flush_dcache_page(bvec->bv_page); + bvec_kunmap_irq(data, &irq_flags); + + if (len < 0) + return len; + + break; + + default: + ret = -EIO; + goto fail; + } + + bytes -= cur_bytes; + *ppos += cur_bytes; + bytes_done += cur_bytes; + } + + ret = 0; + +fail: + return ret; +} + +static int qcow_file_fmt_read(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct bio_vec bvec; + struct req_iterator iter; + loff_t pos; + int ret = 0; + + pos = __qcow_file_fmt_rq_get_pos(lo_fmt, rq); + + rq_for_each_segment(bvec, rq, iter) { + ret = __qcow_file_fmt_read_bvec(lo_fmt, &bvec, &pos); + if (ret) + return ret; + + cond_resched(); + } + + return ret; +} + +static loff_t qcow_file_fmt_sector_size(struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + loff_t loopsize; + + if (qcow_data->size > 0) + loopsize = qcow_data->size; + else + return 0; + + if (lo->lo_offset > 0) + loopsize -= lo->lo_offset; + + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; + + /* + * Unfortunately, if we want to do I/O on the device, + * the number of 512-byte sectors has to fit into a sector_t. + */ + return loopsize >> 9; +} + +static struct loop_file_fmt_ops qcow_file_fmt_ops = { + .init = qcow_file_fmt_init, + .exit = qcow_file_fmt_exit, + .read = qcow_file_fmt_read, + .write = NULL, + .read_aio = NULL, + .write_aio = NULL, + .discard = NULL, + .flush = NULL, + .sector_size = qcow_file_fmt_sector_size +}; + +static struct loop_file_fmt_driver qcow_file_fmt_driver = { + .name = "QCOW", + .file_fmt_type = LO_FILE_FMT_QCOW, + .ops = &qcow_file_fmt_ops, + .owner = THIS_MODULE +}; + +static int __init loop_file_fmt_qcow_init(void) +{ + printk(KERN_INFO "loop_file_fmt_qcow: init loop device QCOW file " + "format driver"); + return loop_file_fmt_register_driver(&qcow_file_fmt_driver); +} + +static void __exit loop_file_fmt_qcow_exit(void) +{ + printk(KERN_INFO "loop_file_fmt_qcow: exit loop device QCOW file " + "format driver"); + loop_file_fmt_unregister_driver(&qcow_file_fmt_driver); +} + +module_init(loop_file_fmt_qcow_init); +module_exit(loop_file_fmt_qcow_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Manuel Bentele "); +MODULE_DESCRIPTION("Loop device QCOW file format driver"); +MODULE_SOFTDEP("pre: loop"); diff --git a/loop_file_fmt_qcow_main.h b/loop_file_fmt_qcow_main.h new file mode 100644 index 0000000..9e4951f --- /dev/null +++ b/loop_file_fmt_qcow_main.h @@ -0,0 +1,417 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_qcow.h + * + * QCOW file format driver for the loop device module. + * + * Ported QCOW2 implementation of the QEMU project (GPL-2.0): + * Declarations for the QCOW2 file format. + * + * The copyright (C) 2004-2006 of the original code is owned by Fabrice Bellard. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#ifndef _LINUX_LOOP_FILE_FMT_QCOW_H +#define _LINUX_LOOP_FILE_FMT_QCOW_H + +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +#include +#endif + +#include "loop_file_fmt.h" + +#ifdef CONFIG_DEBUG_DRIVER +#define ASSERT(x) \ +do { \ + if (!(x)) { \ + printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ + __FILE__, __LINE__, #x); \ + BUG(); \ + } \ +} while (0) +#else +#define ASSERT(x) do { } while (0) +#endif + +#define KiB (1024) +#define MiB (1024 * 1024) + +#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb) + +#define QCOW_CRYPT_NONE 0 +#define QCOW_CRYPT_AES 1 +#define QCOW_CRYPT_LUKS 2 + +#define QCOW_MAX_CRYPT_CLUSTERS 32 +#define QCOW_MAX_SNAPSHOTS 65536 + +/* Field widths in QCOW mean normal cluster offsets cannot reach + * 64PB; depending on cluster size, compressed clusters can have a + * smaller limit (64PB for up to 16k clusters, then ramps down to + * 512TB for 2M clusters). */ +#define QCOW_MAX_CLUSTER_OFFSET ((1ULL << 56) - 1) + +/* 8 MB refcount table is enough for 2 PB images at 64k cluster size + * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */ +#define QCOW_MAX_REFTABLE_SIZE (8 * MiB) + +/* 32 MB L1 table is enough for 2 PB images at 64k cluster size + * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */ +#define QCOW_MAX_L1_SIZE (32 * MiB) + +/* Allow for an average of 1k per snapshot table entry, should be plenty of + * space for snapshot names and IDs */ +#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS) + +/* Bitmap header extension constraints */ +#define QCOW_MAX_BITMAPS 65535 +#define QCOW_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW_MAX_BITMAPS) + +/* indicate that the refcount of the referenced cluster is exactly one. */ +#define QCOW_OFLAG_COPIED (1ULL << 63) +/* indicate that the cluster is compressed (they never have the copied flag) */ +#define QCOW_OFLAG_COMPRESSED (1ULL << 62) +/* The cluster reads as all zeros */ +#define QCOW_OFLAG_ZERO (1ULL << 0) + +#define QCOW_MIN_CLUSTER_BITS 9 +#define QCOW_MAX_CLUSTER_BITS 21 + +/* Defined in the qcow2 spec (compressed cluster descriptor) */ +#define QCOW_COMPRESSED_SECTOR_SIZE 512U +#define QCOW_COMPRESSED_SECTOR_MASK (~(QCOW_COMPRESSED_SECTOR_SIZE - 1)) + +/* Must be at least 2 to cover COW */ +#define QCOW_MIN_L2_CACHE_SIZE 2 /* cache entries */ + +/* Must be at least 4 to cover all cases of refcount table growth */ +#define QCOW_MIN_REFCOUNT_CACHE_SIZE 4 /* clusters */ + +#define QCOW_DEFAULT_L2_CACHE_MAX_SIZE (32 * MiB) +#define QCOW_DEFAULT_CACHE_CLEAN_INTERVAL 600 /* seconds */ + +#define QCOW_DEFAULT_CLUSTER_SIZE 65536 + +/* Buffer size for debugfs file buffer to display QCOW header information */ +#define QCOW_HEADER_BUF_LEN 1024 + +/* Buffer size for debugfs file buffer to receive and display offset and + * cluster offset information */ +#define QCOW_OFFSET_BUF_LEN 32 +#define QCOW_CLUSTER_BUF_LEN 128 + +struct loop_file_fmt_qcow_header { + u32 magic; + u32 version; + u64 backing_file_offset; + u32 backing_file_size; + u32 cluster_bits; + u64 size; /* in bytes */ + u32 crypt_method; + u32 l1_size; + u64 l1_table_offset; + u64 refcount_table_offset; + u32 refcount_table_clusters; + u32 nb_snapshots; + u64 snapshots_offset; + + /* The following fields are only valid for version >= 3 */ + u64 incompatible_features; + u64 compatible_features; + u64 autoclear_features; + + u32 refcount_order; + u32 header_length; +} __attribute__((packed)); + +struct loop_file_fmt_qcow_snapshot_header { + /* header is 8 byte aligned */ + u64 l1_table_offset; + + u32 l1_size; + u16 id_str_size; + u16 name_size; + + u32 date_sec; + u32 date_nsec; + + u64 vm_clock_nsec; + + u32 vm_state_size; + /* for extension */ + u32 extra_data_size; + /* extra data follows */ + /* id_str follows */ + /* name follows */ +} __attribute__((packed)); + +enum { + QCOW_FEAT_TYPE_INCOMPATIBLE = 0, + QCOW_FEAT_TYPE_COMPATIBLE = 1, + QCOW_FEAT_TYPE_AUTOCLEAR = 2, +}; + +/* incompatible feature bits */ +enum { + QCOW_INCOMPAT_DIRTY_BITNR = 0, + QCOW_INCOMPAT_CORRUPT_BITNR = 1, + QCOW_INCOMPAT_DATA_FILE_BITNR = 2, + QCOW_INCOMPAT_DIRTY = 1 << QCOW_INCOMPAT_DIRTY_BITNR, + QCOW_INCOMPAT_CORRUPT = 1 << QCOW_INCOMPAT_CORRUPT_BITNR, + QCOW_INCOMPAT_DATA_FILE = 1 << QCOW_INCOMPAT_DATA_FILE_BITNR, + + QCOW_INCOMPAT_MASK = QCOW_INCOMPAT_DIRTY + | QCOW_INCOMPAT_CORRUPT + | QCOW_INCOMPAT_DATA_FILE, +}; + +/* compatible feature bits */ +enum { + QCOW_COMPAT_LAZY_REFCOUNTS_BITNR = 0, + QCOW_COMPAT_LAZY_REFCOUNTS = 1 << QCOW_COMPAT_LAZY_REFCOUNTS_BITNR, + + QCOW_COMPAT_FEAT_MASK = QCOW_COMPAT_LAZY_REFCOUNTS, +}; + +/* autoclear feature bits */ +enum { + QCOW_AUTOCLEAR_BITMAPS_BITNR = 0, + QCOW_AUTOCLEAR_DATA_FILE_RAW_BITNR = 1, + QCOW_AUTOCLEAR_BITMAPS = 1 << QCOW_AUTOCLEAR_BITMAPS_BITNR, + QCOW_AUTOCLEAR_DATA_FILE_RAW = 1 << QCOW_AUTOCLEAR_DATA_FILE_RAW_BITNR, + + QCOW_AUTOCLEAR_MASK = QCOW_AUTOCLEAR_BITMAPS | + QCOW_AUTOCLEAR_DATA_FILE_RAW, +}; + +struct loop_file_fmt_qcow_data { + u64 size; + int cluster_bits; + int cluster_size; + int cluster_sectors; + int l2_slice_size; + int l2_bits; + int l2_size; + int l1_size; + int l1_vm_state_index; + int refcount_block_bits; + int refcount_block_size; + int csize_shift; + int csize_mask; + u64 cluster_offset_mask; + u64 l1_table_offset; + u64 *l1_table; + + struct loop_file_fmt_qcow_cache *l2_table_cache; + struct loop_file_fmt_qcow_cache *refcount_block_cache; + + u64 *refcount_table; + u64 refcount_table_offset; + u32 refcount_table_size; + u32 max_refcount_table_index; /* Last used entry in refcount_table */ + u64 free_cluster_index; + u64 free_byte_offset; + + u32 crypt_method_header; + u64 snapshots_offset; + int snapshots_size; + unsigned int nb_snapshots; + + u32 nb_bitmaps; + u64 bitmap_directory_size; + u64 bitmap_directory_offset; + + int qcow_version; + bool use_lazy_refcounts; + int refcount_order; + int refcount_bits; + u64 refcount_max; + + u64 incompatible_features; + u64 compatible_features; + u64 autoclear_features; + + struct z_stream_s *strm; + + /* debugfs entries */ +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; + struct dentry *dbgfs_file_qcow_header; + char dbgfs_file_qcow_header_buf[QCOW_HEADER_BUF_LEN]; + struct dentry *dbgfs_file_qcow_offset; + char dbgfs_file_qcow_offset_buf[QCOW_OFFSET_BUF_LEN]; + char dbgfs_file_qcow_cluster_buf[QCOW_CLUSTER_BUF_LEN]; + u64 dbgfs_qcow_offset; + struct mutex dbgfs_qcow_offset_mutex; +#endif +}; + +struct loop_file_fmt_qcow_cow_region { + /** + * Offset of the COW region in bytes from the start of the first + * cluster touched by the request. + */ + unsigned offset; + + /** Number of bytes to copy */ + unsigned nb_bytes; +}; + +enum loop_file_fmt_qcow_cluster_type { + QCOW_CLUSTER_UNALLOCATED, + QCOW_CLUSTER_ZERO_PLAIN, + QCOW_CLUSTER_ZERO_ALLOC, + QCOW_CLUSTER_NORMAL, + QCOW_CLUSTER_COMPRESSED, +}; + +enum loop_file_fmt_qcow_metadata_overlap { + QCOW_OL_MAIN_HEADER_BITNR = 0, + QCOW_OL_ACTIVE_L1_BITNR = 1, + QCOW_OL_ACTIVE_L2_BITNR = 2, + QCOW_OL_REFCOUNT_TABLE_BITNR = 3, + QCOW_OL_REFCOUNT_BLOCK_BITNR = 4, + QCOW_OL_SNAPSHOT_TABLE_BITNR = 5, + QCOW_OL_INACTIVE_L1_BITNR = 6, + QCOW_OL_INACTIVE_L2_BITNR = 7, + QCOW_OL_BITMAP_DIRECTORY_BITNR = 8, + + QCOW_OL_MAX_BITNR = 9, + + QCOW_OL_NONE = 0, + QCOW_OL_MAIN_HEADER = (1 << QCOW_OL_MAIN_HEADER_BITNR), + QCOW_OL_ACTIVE_L1 = (1 << QCOW_OL_ACTIVE_L1_BITNR), + QCOW_OL_ACTIVE_L2 = (1 << QCOW_OL_ACTIVE_L2_BITNR), + QCOW_OL_REFCOUNT_TABLE = (1 << QCOW_OL_REFCOUNT_TABLE_BITNR), + QCOW_OL_REFCOUNT_BLOCK = (1 << QCOW_OL_REFCOUNT_BLOCK_BITNR), + QCOW_OL_SNAPSHOT_TABLE = (1 << QCOW_OL_SNAPSHOT_TABLE_BITNR), + QCOW_OL_INACTIVE_L1 = (1 << QCOW_OL_INACTIVE_L1_BITNR), + /* NOTE: Checking overlaps with inactive L2 tables will result in bdrv + * reads. */ + QCOW_OL_INACTIVE_L2 = (1 << QCOW_OL_INACTIVE_L2_BITNR), + QCOW_OL_BITMAP_DIRECTORY = (1 << QCOW_OL_BITMAP_DIRECTORY_BITNR), +}; + +/* Perform all overlap checks which can be done in constant time */ +#define QCOW_OL_CONSTANT \ + (QCOW_OL_MAIN_HEADER | QCOW_OL_ACTIVE_L1 | QCOW_OL_REFCOUNT_TABLE | \ + QCOW_OL_SNAPSHOT_TABLE | QCOW_OL_BITMAP_DIRECTORY) + +/* Perform all overlap checks which don't require disk access */ +#define QCOW_OL_CACHED \ + (QCOW_OL_CONSTANT | QCOW_OL_ACTIVE_L2 | QCOW_OL_REFCOUNT_BLOCK | \ + QCOW_OL_INACTIVE_L1) + +/* Perform all overlap checks */ +#define QCOW_OL_ALL \ + (QCOW_OL_CACHED | QCOW_OL_INACTIVE_L2) + +#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL +#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL +#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL + +#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL + +#define INV_OFFSET (-1ULL) + +static inline bool loop_file_fmt_qcow_has_data_file( + struct loop_file_fmt *lo_fmt) +{ + /* At the moment, there is no support for copy on write! */ + return false; +} + +static inline bool loop_file_fmt_qcow_data_file_is_raw( + struct loop_file_fmt *lo_fmt) +{ + struct loop_file_fmt_qcow_data *qcow_data = lo_fmt->private_data; + return !!(qcow_data->autoclear_features & + QCOW_AUTOCLEAR_DATA_FILE_RAW); +} + +static inline s64 loop_file_fmt_qcow_start_of_cluster( + struct loop_file_fmt_qcow_data *qcow_data, s64 offset) +{ + return offset & ~(qcow_data->cluster_size - 1); +} + +static inline s64 loop_file_fmt_qcow_offset_into_cluster( + struct loop_file_fmt_qcow_data *qcow_data, s64 offset) +{ + return offset & (qcow_data->cluster_size - 1); +} + +static inline s64 loop_file_fmt_qcow_size_to_clusters( + struct loop_file_fmt_qcow_data *qcow_data, u64 size) +{ + return (size + (qcow_data->cluster_size - 1)) >> + qcow_data->cluster_bits; +} + +static inline s64 loop_file_fmt_qcow_size_to_l1( + struct loop_file_fmt_qcow_data *qcow_data, s64 size) +{ + int shift = qcow_data->cluster_bits + qcow_data->l2_bits; + return (size + (1ULL << shift) - 1) >> shift; +} + +static inline int loop_file_fmt_qcow_offset_to_l1_index( + struct loop_file_fmt_qcow_data *qcow_data, u64 offset) +{ + return offset >> (qcow_data->l2_bits + qcow_data->cluster_bits); +} + +static inline int loop_file_fmt_qcow_offset_to_l2_index( + struct loop_file_fmt_qcow_data *qcow_data, s64 offset) +{ + return (offset >> qcow_data->cluster_bits) & (qcow_data->l2_size - 1); +} + +static inline int loop_file_fmt_qcow_offset_to_l2_slice_index( + struct loop_file_fmt_qcow_data *qcow_data, s64 offset) +{ + return (offset >> qcow_data->cluster_bits) & + (qcow_data->l2_slice_size - 1); +} + +static inline s64 loop_file_fmt_qcow_vm_state_offset( + struct loop_file_fmt_qcow_data *qcow_data) +{ + return (s64)qcow_data->l1_vm_state_index << + (qcow_data->cluster_bits + qcow_data->l2_bits); +} + +static inline enum loop_file_fmt_qcow_cluster_type +loop_file_fmt_qcow_get_cluster_type(struct loop_file_fmt *lo_fmt, u64 l2_entry) +{ + if (l2_entry & QCOW_OFLAG_COMPRESSED) { + return QCOW_CLUSTER_COMPRESSED; + } else if (l2_entry & QCOW_OFLAG_ZERO) { + if (l2_entry & L2E_OFFSET_MASK) { + return QCOW_CLUSTER_ZERO_ALLOC; + } + return QCOW_CLUSTER_ZERO_PLAIN; + } else if (!(l2_entry & L2E_OFFSET_MASK)) { + /* Offset 0 generally means unallocated, but it is ambiguous + * with external data files because 0 is a valid offset there. + * However, all clusters in external data files always have + * refcount 1, so we can rely on QCOW_OFLAG_COPIED to + * disambiguate. */ + if (loop_file_fmt_qcow_has_data_file(lo_fmt) && + (l2_entry & QCOW_OFLAG_COPIED)) { + return QCOW_CLUSTER_NORMAL; + } else { + return QCOW_CLUSTER_UNALLOCATED; + } + } else { + return QCOW_CLUSTER_NORMAL; + } +} + +#endif diff --git a/loop_file_fmt_raw.c b/loop_file_fmt_raw.c new file mode 100644 index 0000000..134a794 --- /dev/null +++ b/loop_file_fmt_raw.c @@ -0,0 +1,450 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * loop_file_fmt_raw.c + * + * RAW file format driver for the loop device module. + * + * Copyright (C) 2019 Manuel Bentele + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loop_file_fmt.h" + +static inline loff_t __raw_file_fmt_rq_get_pos(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + return ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; +} + +/* transfer function for DEPRECATED cryptoloop support */ +static inline int __raw_file_fmt_do_transfer(struct loop_file_fmt *lo_fmt, + int cmd, + struct page *rpage, + unsigned roffs, + struct page *lpage, + unsigned loffs, + int size, + sector_t rblock) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + int ret; + + ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); + if (likely(!ret)) + return 0; + + printk_ratelimited(KERN_ERR + "loop: Transfer error at byte offset %llu, length %i.\n", + (unsigned long long)rblock << 9, size); + return ret; +} + +static int raw_file_fmt_read_transfer(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct bio_vec bvec, b; + struct req_iterator iter; + struct iov_iter i; + struct page *page; + struct loop_device *lo; + ssize_t len; + int ret = 0; + loff_t pos; + + page = alloc_page(GFP_NOIO); + if (unlikely(!page)) + return -ENOMEM; + + lo = loop_file_fmt_get_lo(lo_fmt); + pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + + rq_for_each_segment(bvec, rq, iter) { + loff_t offset = pos; + + b.bv_page = page; + b.bv_offset = 0; + b.bv_len = bvec.bv_len; + + iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); + len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); + if (len < 0) { + ret = len; + goto out_free_page; + } + + ret = __raw_file_fmt_do_transfer(lo_fmt, READ, page, 0, + bvec.bv_page, bvec.bv_offset, len, offset >> 9); + if (ret) + goto out_free_page; + + flush_dcache_page(bvec.bv_page); + + if (len != bvec.bv_len) { + struct bio *bio; + + __rq_for_each_bio(bio, rq) + zero_fill_bio(bio); + break; + } + } + + ret = 0; +out_free_page: + __free_page(page); + return ret; +} + +static int raw_file_fmt_read(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct bio_vec bvec; + struct req_iterator iter; + struct iov_iter i; + struct loop_device *lo; + ssize_t len; + loff_t pos; + + lo = loop_file_fmt_get_lo(lo_fmt); + + if (lo->transfer) + return raw_file_fmt_read_transfer(lo_fmt, rq); + + pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + + rq_for_each_segment(bvec, rq, iter) { + iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); + len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); + if (len < 0) + return len; + + flush_dcache_page(bvec.bv_page); + + if (len != bvec.bv_len) { + struct bio *bio; + + __rq_for_each_bio(bio, rq) + zero_fill_bio(bio); + break; + } + cond_resched(); + } + + return 0; +} + +static void __raw_file_fmt_rw_aio_do_completion(struct loop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + + if (!atomic_dec_and_test(&cmd->ref)) + return; + kfree(cmd->bvec); + cmd->bvec = NULL; + blk_mq_complete_request(rq); +} + +static void __raw_file_fmt_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) +{ + struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); + + if (cmd->css) + css_put(cmd->css); + cmd->ret = ret; + __raw_file_fmt_rw_aio_do_completion(cmd); +} + +static int __raw_file_fmt_rw_aio(struct loop_file_fmt *lo_fmt, + struct request *rq, + bool rw) +{ + struct iov_iter iter; + struct bio_vec *bvec; + struct bio *bio = rq->bio; + struct file *file; + struct loop_device *lo; + struct loop_cmd *cmd; + unsigned int offset; + int segments = 0; + int ret; + loff_t pos; + + lo = loop_file_fmt_get_lo(lo_fmt); + file = lo->lo_backing_file; + cmd = blk_mq_rq_to_pdu(rq); + pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + + if (rq->bio != rq->biotail) { + struct req_iterator iter; + struct bio_vec tmp; + + __rq_for_each_bio(bio, rq) + segments += bio_segments(bio); + bvec = kmalloc_array(segments, sizeof(struct bio_vec), + GFP_NOIO); + if (!bvec) + return -EIO; + cmd->bvec = bvec; + + /* + * The bios of the request may be started from the middle of + * the 'bvec' because of bio splitting, so we can't directly + * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment + * API will take care of all details for us. + */ + rq_for_each_segment(tmp, rq, iter) { + *bvec = tmp; + bvec++; + } + bvec = cmd->bvec; + offset = 0; + } else { + /* + * Same here, this bio may be started from the middle of the + * 'bvec' because of bio splitting, so offset from the bvec + * must be passed to iov iterator + */ + offset = bio->bi_iter.bi_bvec_done; + bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + segments = bio_segments(bio); + } + atomic_set(&cmd->ref, 2); + + iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, + segments, blk_rq_bytes(rq)); + iter.iov_offset = offset; + + cmd->iocb.ki_pos = pos; + cmd->iocb.ki_filp = file; + cmd->iocb.ki_complete = __raw_file_fmt_rw_aio_complete; + cmd->iocb.ki_flags = IOCB_DIRECT; + cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + if (cmd->css) + kthread_associate_blkcg(cmd->css); + + if (rw == WRITE) + ret = call_write_iter(file, &cmd->iocb, &iter); + else + ret = call_read_iter(file, &cmd->iocb, &iter); + + __raw_file_fmt_rw_aio_do_completion(cmd); + kthread_associate_blkcg(NULL); + + if (ret != -EIOCBQUEUED) + cmd->iocb.ki_complete(&cmd->iocb, ret, 0); + return 0; +} + +static int raw_file_fmt_read_aio(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + return __raw_file_fmt_rw_aio(lo_fmt, rq, READ); +} + +static int __raw_file_fmt_write_bvec(struct file *file, + struct bio_vec *bvec, + loff_t *ppos) +{ + struct iov_iter i; + ssize_t bw; + + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); + + file_start_write(file); + bw = vfs_iter_write(file, &i, ppos, 0); + file_end_write(file); + + if (likely(bw == bvec->bv_len)) + return 0; + + printk_ratelimited(KERN_ERR + "loop_file_fmt_raw: Write error at byte offset %llu, length " + "%i.\n", (unsigned long long)*ppos, bvec->bv_len); + if (bw >= 0) + bw = -EIO; + return bw; +} + +static int raw_file_fmt_write_transfer(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct bio_vec bvec, b; + struct req_iterator iter; + struct page *page; + struct loop_device *lo; + int ret = 0; + loff_t pos; + + lo = loop_file_fmt_get_lo(lo_fmt); + pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + + page = alloc_page(GFP_NOIO); + if (unlikely(!page)) + return -ENOMEM; + + rq_for_each_segment(bvec, rq, iter) { + ret = __raw_file_fmt_do_transfer(lo_fmt, WRITE, page, 0, + bvec.bv_page, bvec.bv_offset, bvec.bv_len, pos >> 9); + if (unlikely(ret)) + break; + + b.bv_page = page; + b.bv_offset = 0; + b.bv_len = bvec.bv_len; + ret = __raw_file_fmt_write_bvec(lo->lo_backing_file, &b, + &pos); + if (ret < 0) + break; + } + + __free_page(page); + return ret; +} + +static int raw_file_fmt_write(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + struct bio_vec bvec; + struct req_iterator iter; + struct loop_device *lo; + int ret = 0; + loff_t pos; + + lo = loop_file_fmt_get_lo(lo_fmt); + + if (lo->transfer) + return raw_file_fmt_write_transfer(lo_fmt, rq); + + pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + + rq_for_each_segment(bvec, rq, iter) { + ret = __raw_file_fmt_write_bvec(lo->lo_backing_file, &bvec, + &pos); + if (ret < 0) + break; + cond_resched(); + } + + return ret; +} + +static int raw_file_fmt_write_aio(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + return __raw_file_fmt_rw_aio(lo_fmt, rq, WRITE); +} + +static int raw_file_fmt_discard(struct loop_file_fmt *lo_fmt, + struct request *rq) +{ + loff_t pos = __raw_file_fmt_rq_get_pos(lo_fmt, rq); + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + + /* + * We use punch hole to reclaim the free space used by the + * image a.k.a. discard. However we do not support discard if + * encryption is enabled, because it may give an attacker + * useful information. + */ + struct file *file = lo->lo_backing_file; + int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; + int ret; + + if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { + ret = -EOPNOTSUPP; + goto out; + } + + ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); + if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) + ret = -EIO; + out: + return ret; +} + +static int raw_file_fmt_flush(struct loop_file_fmt *lo_fmt) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + struct file *file = lo->lo_backing_file; + int ret = vfs_fsync(file, 0); + if (unlikely(ret && ret != -EINVAL)) + ret = -EIO; + + return ret; +} + +static loff_t raw_file_fmt_sector_size(struct loop_file_fmt *lo_fmt) +{ + struct loop_device *lo = loop_file_fmt_get_lo(lo_fmt); + loff_t loopsize; + + /* Compute loopsize in bytes */ + loopsize = i_size_read(lo->lo_backing_file->f_mapping->host); + if (lo->lo_offset > 0) + loopsize -= lo->lo_offset; + /* offset is beyond i_size, weird but possible */ + if (loopsize < 0) + return 0; + + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; + + /* + * Unfortunately, if we want to do I/O on the device, + * the number of 512-byte sectors has to fit into a sector_t. + */ + return loopsize >> 9; +} + +static struct loop_file_fmt_ops raw_file_fmt_ops = { + .init = NULL, + .exit = NULL, + .read = raw_file_fmt_read, + .write = raw_file_fmt_write, + .read_aio = raw_file_fmt_read_aio, + .write_aio = raw_file_fmt_write_aio, + .discard = raw_file_fmt_discard, + .flush = raw_file_fmt_flush, + .sector_size = raw_file_fmt_sector_size +}; + +static struct loop_file_fmt_driver raw_file_fmt_driver = { + .name = "RAW", + .file_fmt_type = LO_FILE_FMT_RAW, + .ops = &raw_file_fmt_ops, + .owner = THIS_MODULE +}; + +static int __init loop_file_fmt_raw_init(void) +{ + printk(KERN_INFO "loop_file_fmt_raw: init loop device RAW file format " + "driver"); + return loop_file_fmt_register_driver(&raw_file_fmt_driver); +} + +static void __exit loop_file_fmt_raw_exit(void) +{ + printk(KERN_INFO "loop_file_fmt_raw: exit loop device RAW file format " + "driver"); + loop_file_fmt_unregister_driver(&raw_file_fmt_driver); +} + +module_init(loop_file_fmt_raw_init); +module_exit(loop_file_fmt_raw_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Manuel Bentele "); +MODULE_DESCRIPTION("Loop device RAW file format driver"); +MODULE_SOFTDEP("pre: loop"); diff --git a/loop_main.c b/loop_main.c new file mode 100644 index 0000000..7c1bb19 --- /dev/null +++ b/loop_main.c @@ -0,0 +1,2173 @@ +/* + * loop_main.c + * + * Written by Theodore Ts'o, 3/29/93 + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + * + * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 + * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 + * + * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 + * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 + * + * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 + * + * Added devfs support - Richard Gooch 16-Jan-1998 + * + * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 + * + * Loadable modules and other fixes by AK, 1998 + * + * Make real block number available to downstream transfer functions, enables + * CBC (and relatives) mode encryption requiring unique IVs per data block. + * Reed H. Petty, rhp@draper.net + * + * Maximum number of loop devices now dynamic via max_loop module parameter. + * Russell Kroll 19990701 + * + * Maximum number of loop devices when compiled-in now selectable by passing + * max_loop=<1-255> to the kernel on boot. + * Erik I. Bolsø, , Oct 31, 1999 + * + * Completely rewrite request handling to be make_request_fn style and + * non blocking, pushing work to a helper thread. Lots of fixes from + * Al Viro too. + * Jens Axboe , Nov 2000 + * + * Support up to 256 loop devices + * Heinz Mauelshagen , Feb 2002 + * + * Support for falling back on the write file operation when the address space + * operations write_begin is not available on the backing filesystem. + * Anton Altaparmakov, 16 Feb 2005 + * + * Support for using file formats. + * Manuel Bentele , 2019 + * + * Still To Fix: + * - Advisory locking is ignored here. + * - Should use an own CAP_* category instead of CAP_SYS_ADMIN + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loop_file_fmt.h" +#include "loop_main.h" + +#include + +static DEFINE_IDR(loop_index_idr); +static DEFINE_MUTEX(loop_ctl_mutex); + +static int max_part; +static int part_shift; + +static int transfer_xor(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block) +{ + char *raw_buf = kmap_atomic(raw_page) + raw_off; + char *loop_buf = kmap_atomic(loop_page) + loop_off; + char *in, *out, *key; + int i, keysize; + + if (cmd == READ) { + in = raw_buf; + out = loop_buf; + } else { + in = loop_buf; + out = raw_buf; + } + + key = lo->lo_encrypt_key; + keysize = lo->lo_encrypt_key_size; + for (i = 0; i < size; i++) + *out++ = *in++ ^ key[(i & 511) % keysize]; + + kunmap_atomic(loop_buf); + kunmap_atomic(raw_buf); + cond_resched(); + return 0; +} + +static int xor_init(struct loop_device *lo, const struct loop_info64 *info) +{ + if (unlikely(info->lo_encrypt_key_size <= 0)) + return -EINVAL; + return 0; +} + +static struct loop_func_table none_funcs = { + .number = LO_CRYPT_NONE, +}; + +static struct loop_func_table xor_funcs = { + .number = LO_CRYPT_XOR, + .transfer = transfer_xor, + .init = xor_init +}; + +/* xfer_funcs[0] is special - its release function is never called */ +static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { + &none_funcs, + &xor_funcs +}; + +static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) +{ + loff_t loopsize; + + /* Compute loopsize in bytes */ + loopsize = i_size_read(file->f_mapping->host); + if (offset > 0) + loopsize -= offset; + /* offset is beyond i_size, weird but possible */ + if (loopsize < 0) + return 0; + + if (sizelimit > 0 && sizelimit < loopsize) + loopsize = sizelimit; + /* + * Unfortunately, if we want to do I/O on the device, + * the number of 512-byte sectors has to fit into a sector_t. + */ + return loopsize >> 9; +} + +static loff_t get_loop_size(struct loop_device *lo, struct file *file) +{ + return get_size(lo->lo_offset, lo->lo_sizelimit, file); +} + +static void __loop_update_dio(struct loop_device *lo, bool dio) +{ + struct file *file = lo->lo_backing_file; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + unsigned short sb_bsize = 0; + unsigned dio_align = 0; + bool use_dio; + + if (inode->i_sb->s_bdev) { + sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); + dio_align = sb_bsize - 1; + } + + /* + * We support direct I/O only if lo_offset is aligned with the + * logical I/O size of backing device, and the logical block + * size of loop is bigger than the backing device's and the loop + * needn't transform transfer. + * + * TODO: the above condition may be loosed in the future, and + * direct I/O may be switched runtime at that time because most + * of requests in sane applications should be PAGE_SIZE aligned + */ + if (dio) { + if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && + !(lo->lo_offset & dio_align) && + mapping->a_ops->direct_IO && + !lo->transfer) + use_dio = true; + else + use_dio = false; + } else { + use_dio = false; + } + + if (lo->use_dio == use_dio) + return; + + /* flush dirty pages before changing direct IO */ + loop_file_fmt_flush(lo->lo_fmt); + + /* + * The flag of LO_FLAGS_DIRECT_IO is handled similarly with + * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup + * will get updated by ioctl(LOOP_GET_STATUS) + */ + blk_mq_freeze_queue(lo->lo_queue); + lo->use_dio = use_dio; + if (use_dio) { + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); + lo->lo_flags |= LO_FLAGS_DIRECT_IO; + } else { + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); + lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; + } + blk_mq_unfreeze_queue(lo->lo_queue); +} + +static int +figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) +{ + loff_t size = loop_file_fmt_sector_size(lo->lo_fmt); + sector_t x = (sector_t)size; + struct block_device *bdev = lo->lo_device; + + if (unlikely((loff_t)x != size)) + return -EFBIG; + if (lo->lo_offset != offset) + lo->lo_offset = offset; + if (lo->lo_sizelimit != sizelimit) + lo->lo_sizelimit = sizelimit; + + set_capacity(lo->lo_disk, x); + bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); + /* let user-space know about the new size */ + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + return 0; +} + +static void lo_complete_rq(struct request *rq) +{ + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + blk_status_t ret = BLK_STS_OK; + + if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || + req_op(rq) != REQ_OP_READ) { + if (cmd->ret < 0) + ret = BLK_STS_IOERR; + goto end_io; + } + + /* + * Short READ - if we got some data, advance our request and + * retry it. If we got no data, end the rest with EIO. + */ + if (cmd->ret) { + blk_update_request(rq, BLK_STS_OK, cmd->ret); + cmd->ret = 0; + blk_mq_requeue_request(rq, true); + } else { + if (cmd->use_aio) { + struct bio *bio = rq->bio; + + while (bio) { + zero_fill_bio(bio); + bio = bio->bi_next; + } + } + ret = BLK_STS_IOERR; +end_io: + blk_mq_end_request(rq, ret); + } +} + +static int do_req_filebacked(struct loop_device *lo, struct request *rq) +{ + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + + switch (req_op(rq)) { + case REQ_OP_FLUSH: + return loop_file_fmt_flush(lo->lo_fmt); + case REQ_OP_DISCARD: + case REQ_OP_WRITE_ZEROES: + return loop_file_fmt_discard(lo->lo_fmt, rq); + case REQ_OP_WRITE: + if (cmd->use_aio) + return loop_file_fmt_write_aio(lo->lo_fmt, rq); + else + return loop_file_fmt_write(lo->lo_fmt, rq); + case REQ_OP_READ: + if (cmd->use_aio) + return loop_file_fmt_read_aio(lo->lo_fmt, rq); + else + return loop_file_fmt_read(lo->lo_fmt, rq); + default: + WARN_ON_ONCE(1); + return -EIO; + break; + } +} + +static inline void loop_update_dio(struct loop_device *lo) +{ + __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | + lo->use_dio); +} + +static void loop_reread_partitions(struct loop_device *lo, + struct block_device *bdev) +{ + int rc; + + rc = blkdev_reread_part(bdev); + if (rc) + pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", + __func__, lo->lo_number, lo->lo_file_name, rc); +} + +static inline int is_loop_device(struct file *file) +{ + struct inode *i = file->f_mapping->host; + + return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; +} + +static int loop_validate_file(struct file *file, struct block_device *bdev) +{ + struct inode *inode = file->f_mapping->host; + struct file *f = file; + + /* Avoid recursion */ + while (is_loop_device(f)) { + struct loop_device *l; + + if (f->f_mapping->host->i_bdev == bdev) + return -EBADF; + + l = f->f_mapping->host->i_bdev->bd_disk->private_data; + if (l->lo_state != Lo_bound) { + return -EINVAL; + } + f = l->lo_backing_file; + } + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) + return -EINVAL; + return 0; +} + +/* + * loop_change_fd switched the backing store of a loopback device to + * a new file. This is useful for operating system installers to free up + * the original file and in High Availability environments to switch to + * an alternative location for the content in case of server meltdown. + * This can only work if the loop device is used read-only, and if the + * new backing store is the same size and type as the old backing store. + */ +static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, + unsigned int arg) +{ + struct file *file = NULL, *old_file; + int error; + bool partscan; + + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + return error; + error = -ENXIO; + if (lo->lo_state != Lo_bound) + goto out_err; + + /* the loop device has to be read-only */ + error = -EINVAL; + if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) + goto out_err; + + error = -EBADF; + file = fget(arg); + if (!file) + goto out_err; + + error = loop_validate_file(file, bdev); + if (error) + goto out_err; + + old_file = lo->lo_backing_file; + + error = -EINVAL; + + /* size of the new backing store needs to be the same */ + if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) + goto out_err; + + /* and ... switch */ + blk_mq_freeze_queue(lo->lo_queue); + mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); + lo->lo_backing_file = file; + lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); + mapping_set_gfp_mask(file->f_mapping, + lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + loop_update_dio(lo); + blk_mq_unfreeze_queue(lo->lo_queue); + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; + mutex_unlock(&loop_ctl_mutex); + /* + * We must drop file reference outside of loop_ctl_mutex as dropping + * the file ref can take bd_mutex which creates circular locking + * dependency. + */ + fput(old_file); + if (partscan) + loop_reread_partitions(lo, bdev); + return 0; + +out_err: + mutex_unlock(&loop_ctl_mutex); + if (file) + fput(file); + return error; +} + +/* loop sysfs attributes */ + +static ssize_t loop_attr_show(struct device *dev, char *page, + ssize_t (*callback)(struct loop_device *, char *)) +{ + struct gendisk *disk = dev_to_disk(dev); + struct loop_device *lo = disk->private_data; + + return callback(lo, page); +} + +#define LOOP_ATTR_RO(_name) \ +static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ +static ssize_t loop_attr_do_show_##_name(struct device *d, \ + struct device_attribute *attr, char *b) \ +{ \ + return loop_attr_show(d, b, loop_attr_##_name##_show); \ +} \ +static struct device_attribute loop_attr_##_name = \ + __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL); + +static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) +{ + ssize_t ret; + char *p = NULL; + + spin_lock_irq(&lo->lo_lock); + if (lo->lo_backing_file) + p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); + spin_unlock_irq(&lo->lo_lock); + + if (IS_ERR_OR_NULL(p)) + ret = PTR_ERR(p); + else { + ret = strlen(p); + memmove(buf, p, ret); + buf[ret++] = '\n'; + buf[ret] = 0; + } + + return ret; +} + +static ssize_t __print_file_fmt_type(__u32 file_fmt_type, char* buf) { + switch(file_fmt_type) { + case LO_FILE_FMT_RAW: + sprintf(buf, "%s\n", "RAW"); + break; + case LO_FILE_FMT_QCOW: + sprintf(buf, "%s\n", "QCOW"); + break; + case LO_FILE_FMT_VDI: + sprintf(buf, "%s\n", "VDI"); + break; + case LO_FILE_FMT_VMDK: + sprintf(buf, "%s\n", "VMDK"); + break; + default: + sprintf(buf, "%s\n", "ERROR: Unsupported loop file format!"); + break; + } + + return strlen(buf); +} + +static ssize_t loop_attr_file_fmt_type_show(struct loop_device *lo, char *buf) +{ + return __print_file_fmt_type(lo->lo_fmt->file_fmt_type, buf); +} + +static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) +{ + return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); +} + +static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) +{ + return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); +} + +static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) +{ + int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); + + return sprintf(buf, "%s\n", autoclear ? "1" : "0"); +} + +static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) +{ + int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); + + return sprintf(buf, "%s\n", partscan ? "1" : "0"); +} + +static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) +{ + int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); + + return sprintf(buf, "%s\n", dio ? "1" : "0"); +} + +LOOP_ATTR_RO(backing_file); +LOOP_ATTR_RO(file_fmt_type); +LOOP_ATTR_RO(offset); +LOOP_ATTR_RO(sizelimit); +LOOP_ATTR_RO(autoclear); +LOOP_ATTR_RO(partscan); +LOOP_ATTR_RO(dio); + +static struct attribute *loop_attrs[] = { + &loop_attr_backing_file.attr, + &loop_attr_file_fmt_type.attr, + &loop_attr_offset.attr, + &loop_attr_sizelimit.attr, + &loop_attr_autoclear.attr, + &loop_attr_partscan.attr, + &loop_attr_dio.attr, + NULL, +}; + +static struct attribute_group loop_attribute_group = { + .name = "loop", + .attrs= loop_attrs, +}; + +static void loop_sysfs_init(struct loop_device *lo) +{ + lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); +} + +static void loop_sysfs_exit(struct loop_device *lo) +{ + if (lo->sysfs_inited) + sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); +} + +static void loop_config_discard(struct loop_device *lo) +{ + struct file *file = lo->lo_backing_file; + struct inode *inode = file->f_mapping->host; + struct request_queue *q = lo->lo_queue; + + /* + * We use punch hole to reclaim the free space used by the + * image a.k.a. discard. However we do not support discard if + * encryption is enabled, because it may give an attacker + * useful information. + */ + if ((!file->f_op->fallocate) || + lo->lo_encrypt_key_size) { + q->limits.discard_granularity = 0; + q->limits.discard_alignment = 0; + blk_queue_max_discard_sectors(q, 0); + blk_queue_max_write_zeroes_sectors(q, 0); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); + return; + } + + q->limits.discard_granularity = inode->i_sb->s_blocksize; + q->limits.discard_alignment = 0; + + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); + blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); +} + +static void loop_unprepare_queue(struct loop_device *lo) +{ + kthread_flush_worker(&lo->worker); + kthread_stop(lo->worker_task); +} + +static int loop_kthread_worker_fn(void *worker_ptr) +{ + current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO; + return kthread_worker_fn(worker_ptr); +} + +static int loop_prepare_queue(struct loop_device *lo) +{ + kthread_init_worker(&lo->worker); + lo->worker_task = kthread_run(loop_kthread_worker_fn, + &lo->worker, "loop%d", lo->lo_number); + if (IS_ERR(lo->worker_task)) + return -ENOMEM; + set_user_nice(lo->worker_task, MIN_NICE); + return 0; +} + +static int loop_set_fd(struct loop_device *lo, fmode_t mode, + struct block_device *bdev, unsigned int arg) +{ + struct file *file; + struct inode *inode; + struct address_space *mapping; + int lo_flags = 0; + int error; + loff_t size; + bool partscan; + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + goto out_putf; + + error = -EBUSY; + if (lo->lo_state != Lo_unbound) + goto out_unlock; + + error = loop_validate_file(file, bdev); + if (error) + goto out_unlock; + + mapping = file->f_mapping; + inode = mapping->host; + + if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || + !file->f_op->write_iter) + lo_flags |= LO_FLAGS_READ_ONLY; + + set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + + lo->use_dio = false; + lo->lo_device = bdev; + lo->lo_flags = lo_flags; + lo->lo_backing_file = file; + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_sizelimit = 0; + lo->old_gfp_mask = mapping_gfp_mask(mapping); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + + if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) + blk_queue_write_cache(lo->lo_queue, true, false); + + loop_update_dio(lo); + + error = loop_file_fmt_init(lo->lo_fmt, LO_FILE_FMT_RAW); + if (error) + goto out_unlock; + + size = loop_file_fmt_sector_size(lo->lo_fmt); + + error = -EFBIG; + if ((loff_t)(sector_t)size != size) + goto out_unlock; + error = loop_prepare_queue(lo); + if (error) + goto out_unlock; + + set_capacity(lo->lo_disk, size); + bd_set_size(bdev, size << 9); + loop_sysfs_init(lo); + /* let user-space know about the new size */ + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + + set_blocksize(bdev, S_ISBLK(inode->i_mode) ? + block_size(inode->i_bdev) : PAGE_SIZE); + + lo->lo_state = Lo_bound; + if (part_shift) + lo->lo_flags |= LO_FLAGS_PARTSCAN; + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; + + /* Grab the block_device to prevent its destruction after we + * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). + */ + bdgrab(bdev); + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); + return 0; + +out_unlock: + mutex_unlock(&loop_ctl_mutex); +out_putf: + fput(file); +out: + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return error; +} + +static int +loop_release_xfer(struct loop_device *lo) +{ + int err = 0; + struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { + if (xfer->release) + err = xfer->release(lo); + lo->transfer = NULL; + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; +} + +static int +loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, + const struct loop_info64 *i) +{ + int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + + if (!try_module_get(owner)) + return -EINVAL; + if (xfer->init) + err = xfer->init(lo, i); + if (err) + module_put(owner); + else + lo->lo_encryption = xfer; + } + return err; +} + +static int __loop_clr_fd(struct loop_device *lo, bool release) +{ + struct file *filp = NULL; + gfp_t gfp = lo->old_gfp_mask; + struct block_device *bdev = lo->lo_device; + int err = 0; + bool partscan = false; + int lo_number; + + mutex_lock(&loop_ctl_mutex); + if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { + err = -ENXIO; + goto out_unlock; + } + + filp = lo->lo_backing_file; + if (filp == NULL) { + err = -EINVAL; + goto out_unlock; + } + + /* freeze request queue during the transition */ + blk_mq_freeze_queue(lo->lo_queue); + + loop_file_fmt_exit(lo->lo_fmt); + + spin_lock_irq(&lo->lo_lock); + lo->lo_backing_file = NULL; + spin_unlock_irq(&lo->lo_lock); + + loop_release_xfer(lo); + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_device = NULL; + lo->lo_encryption = NULL; + lo->lo_offset = 0; + lo->lo_sizelimit = 0; + lo->lo_encrypt_key_size = 0; + memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); + memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); + memset(lo->lo_file_name, 0, LO_NAME_SIZE); + blk_queue_logical_block_size(lo->lo_queue, 512); + blk_queue_physical_block_size(lo->lo_queue, 512); + blk_queue_io_min(lo->lo_queue, 512); + if (bdev) { + bdput(bdev); + invalidate_bdev(bdev); + bdev->bd_inode->i_mapping->wb_err = 0; + } + set_capacity(lo->lo_disk, 0); + loop_sysfs_exit(lo); + if (bdev) { + bd_set_size(bdev, 0); + /* let user-space know about this change */ + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + } + mapping_set_gfp_mask(filp->f_mapping, gfp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + blk_mq_unfreeze_queue(lo->lo_queue); + + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; + lo_number = lo->lo_number; + loop_unprepare_queue(lo); +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) { + /* + * bd_mutex has been held already in release path, so don't + * acquire it if this function is called in such case. + * + * If the reread partition isn't from release path, lo_refcnt + * must be at least one and it can only become zero when the + * current holder is released. + */ + if (release) + err = __blkdev_reread_part(bdev); + else + err = blkdev_reread_part(bdev); + if (err) + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", + __func__, lo_number, err); + /* Device is gone, no point in returning error */ + err = 0; + } + + /* + * lo->lo_state is set to Lo_unbound here after above partscan has + * finished. + * + * There cannot be anybody else entering __loop_clr_fd() as + * lo->lo_backing_file is already cleared and Lo_rundown state + * protects us from all the other places trying to change the 'lo' + * device. + */ + mutex_lock(&loop_ctl_mutex); + lo->lo_flags = 0; + if (!part_shift) + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; + lo->lo_state = Lo_unbound; + mutex_unlock(&loop_ctl_mutex); + + /* + * Need not hold loop_ctl_mutex to fput backing file. + * Calling fput holding loop_ctl_mutex triggers a circular + * lock dependency possibility warning as fput can take + * bd_mutex which is usually taken before loop_ctl_mutex. + */ + if (filp) + fput(filp); + return err; +} + +static int loop_clr_fd(struct loop_device *lo) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + if (lo->lo_state != Lo_bound) { + mutex_unlock(&loop_ctl_mutex); + return -ENXIO; + } + /* + * If we've explicitly asked to tear down the loop device, + * and it has an elevated reference count, set it for auto-teardown when + * the last reference goes away. This stops $!~#$@ udev from + * preventing teardown because it decided that it needs to run blkid on + * the loopback device whenever they appear. xfstests is notorious for + * failing tests because blkid via udev races with a losetup + * /do something like mkfs/losetup -d causing the losetup -d + * command to fail with EBUSY. + */ + if (atomic_read(&lo->lo_refcnt) > 1) { + lo->lo_flags |= LO_FLAGS_AUTOCLEAR; + mutex_unlock(&loop_ctl_mutex); + return 0; + } + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); + + return __loop_clr_fd(lo, false); +} + +static int +loop_set_status(struct loop_device *lo, const struct loop_info64 *info) +{ + int err; + struct loop_func_table *xfer; + kuid_t uid = current_uid(); + struct block_device *bdev; + bool partscan = false; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + if (lo->lo_encrypt_key_size && + !uid_eq(lo->lo_key_owner, uid) && + !capable(CAP_SYS_ADMIN)) { + err = -EPERM; + goto out_unlock; + } + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto out_unlock; + } + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { + err = -EINVAL; + goto out_unlock; + } + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } + + /* I/O need to be drained during transfer transition */ + blk_mq_freeze_queue(lo->lo_queue); + + err = loop_release_xfer(lo); + if (err) + goto out_unfreeze; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + + if (type >= MAX_LO_CRYPT) { + err = -EINVAL; + goto out_unfreeze; + } + xfer = xfer_funcs[type]; + if (xfer == NULL) { + err = -EINVAL; + goto out_unfreeze; + } + } else + xfer = NULL; + + err = loop_init_xfer(lo, xfer, info); + if (err) + goto out_unfreeze; + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + /* kill_bdev should have truncated all the pages */ + if (lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { + err = -EFBIG; + goto out_unfreeze; + } + } + + if (lo->lo_fmt->file_fmt_type != info->lo_file_fmt_type) { + err = loop_file_fmt_change(lo->lo_fmt, info->lo_file_fmt_type); + if (err) + goto out_unfreeze; + + /* After change of the file format, recalculate the capacity of + * the loop device. figure_loop_size() automatically calls the + * sector_size function of the corresponding loop file format + * driver to determine the new capacity. */ + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { + err = -EFBIG; + goto out_unfreeze; + } + } + + loop_config_discard(lo); + + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->lo_file_name[LO_NAME_SIZE-1] = 0; + lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; + + if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != + (info->lo_flags & LO_FLAGS_AUTOCLEAR)) + lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; + lo->lo_fmt->file_fmt_type = info->lo_file_fmt_type; + if (info->lo_encrypt_key_size) { + memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, + info->lo_encrypt_key_size); + lo->lo_key_owner = uid; + } + + /* update dio if lo_offset or transfer is changed */ + __loop_update_dio(lo, lo->use_dio); + +out_unfreeze: + blk_mq_unfreeze_queue(lo->lo_queue); + + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { + lo->lo_flags |= LO_FLAGS_PARTSCAN; + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; + bdev = lo->lo_device; + partscan = true; + } +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); + + return err; +} + +static int +loop_get_status(struct loop_device *lo, struct loop_info64 *info) +{ + struct path path; + struct kstat stat; + int ret; + + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; + if (lo->lo_state != Lo_bound) { + mutex_unlock(&loop_ctl_mutex); + return -ENXIO; + } + + memset(info, 0, sizeof(*info)); + info->lo_number = lo->lo_number; + info->lo_offset = lo->lo_offset; + info->lo_sizelimit = lo->lo_sizelimit; + info->lo_flags = lo->lo_flags; + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); + info->lo_encrypt_type = + lo->lo_encryption ? lo->lo_encryption->number : 0; + if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + info->lo_encrypt_key_size = lo->lo_encrypt_key_size; + memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, + lo->lo_encrypt_key_size); + } + info->lo_file_fmt_type = lo->lo_fmt->file_fmt_type; + + /* Drop loop_ctl_mutex while we call into the filesystem. */ + path = lo->lo_backing_file->f_path; + path_get(&path); + mutex_unlock(&loop_ctl_mutex); + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); + if (!ret) { + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(stat.rdev); + } + path_put(&path); + return ret; +} + +static void +loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) +{ + memset(info64, 0, sizeof(*info64)); + info64->lo_number = info->lo_number; + info64->lo_device = info->lo_device; + info64->lo_inode = info->lo_inode; + info64->lo_rdevice = info->lo_rdevice; + info64->lo_offset = info->lo_offset; + info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info->lo_encrypt_type; + info64->lo_encrypt_key_size = info->lo_encrypt_key_size; + info64->lo_flags = info->lo_flags; + info64->lo_init[0] = info->lo_init[0]; + info64->lo_init[1] = info->lo_init[1]; + info64->lo_file_fmt_type = info->lo_file_fmt_type; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); + else + memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); + memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); +} + +static int +loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) +{ + memset(info, 0, sizeof(*info)); + info->lo_number = info64->lo_number; + info->lo_device = info64->lo_device; + info->lo_inode = info64->lo_inode; + info->lo_rdevice = info64->lo_rdevice; + info->lo_offset = info64->lo_offset; + info->lo_encrypt_type = info64->lo_encrypt_type; + info->lo_encrypt_key_size = info64->lo_encrypt_key_size; + info->lo_flags = info64->lo_flags; + info->lo_init[0] = info64->lo_init[0]; + info->lo_init[1] = info64->lo_init[1]; + info->lo_file_fmt_type = info64->lo_file_fmt_type; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); + else + memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); + + /* error in case values were truncated */ + if (info->lo_device != info64->lo_device || + info->lo_rdevice != info64->lo_rdevice || + info->lo_inode != info64->lo_inode || + info->lo_offset != info64->lo_offset) + return -EOVERFLOW; + + return 0; +} + +static int +loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) +{ + struct loop_info info; + struct loop_info64 info64; + int err; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info, arg, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + if (err) + return -EFAULT; + + if (info.lo_flags & LO_FLAGS_FILE_FMT) { + /* copy everything from the user space */ + err = copy_from_user(&info, arg, sizeof(info)); + if (err) + return -EFAULT; + } else { + info64.lo_file_fmt_type = LO_FILE_FMT_RAW; + } + + loop_info64_from_old(&info, &info64); + return loop_set_status(lo, &info64); +} + +static int +loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) +{ + struct loop_info64 info64; + int err; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info64, arg, + sizeof(info64) - sizeof(info64.lo_file_fmt_type)); + if (err) + return -EFAULT; + + if (info64.lo_flags & LO_FLAGS_FILE_FMT) { + /* copy everything from the user space */ + err = copy_from_user(&info64, arg, sizeof(info64)); + if (err) + return -EFAULT; + } else { + info64.lo_file_fmt_type = LO_FILE_FMT_RAW; + } + + return loop_set_status(lo, &info64); +} + +static int +loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { + struct loop_info info; + struct loop_info64 info64; + int lo_flags; + int err; + + if (!arg) + return -EINVAL; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info, arg, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + if (err) + return -EFAULT; + + lo_flags = info.lo_flags; + + err = loop_get_status(lo, &info64); + if (!err) + err = loop_info64_to_old(&info64, &info); + + if (lo_flags & LO_FLAGS_FILE_FMT) { + /* copy entire structure to user space because file format + * support is available */ + err = copy_to_user(arg, &info, sizeof(info)); + } else { + /* copy normal structure to user space */ + err = copy_to_user(arg, &info, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + } + + if (err) + return -EFAULT; + + return err; +} + +static int +loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { + struct loop_info64 info64; + u32 lo_flags; + int err; + + if (!arg) + return -EINVAL; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info64, arg, + sizeof(info64) - sizeof(info64.lo_file_fmt_type)); + if (err) + return -EFAULT; + + lo_flags = info64.lo_flags; + + err = loop_get_status(lo, &info64); + if (err) + return -EFAULT; + + if (lo_flags & LO_FLAGS_FILE_FMT) { + /* copy entire structure to user space because file format + * support is available */ + err = copy_to_user(arg, &info64, sizeof(info64)); + } else { + /* copy normal structure to user space */ + err = copy_to_user(arg, &info64, + sizeof(info64) - sizeof(info64.lo_file_fmt_type)); + } + + if (err) + return -EFAULT; + + return err; +} + +static int loop_set_capacity(struct loop_device *lo) +{ + if (unlikely(lo->lo_state != Lo_bound)) + return -ENXIO; + + return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); +} + +static int loop_set_dio(struct loop_device *lo, unsigned long arg) +{ + int error = -ENXIO; + if (lo->lo_state != Lo_bound) + goto out; + + __loop_update_dio(lo, !!arg); + if (lo->use_dio == !!arg) + return 0; + error = -EINVAL; + out: + return error; +} + +static int loop_set_block_size(struct loop_device *lo, unsigned long arg) +{ + int err = 0; + + if (lo->lo_state != Lo_bound) + return -ENXIO; + + if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) + return -EINVAL; + + if (lo->lo_queue->limits.logical_block_size != arg) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } + + blk_mq_freeze_queue(lo->lo_queue); + + /* kill_bdev should have truncated all the pages */ + if (lo->lo_queue->limits.logical_block_size != arg && + lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + + blk_queue_logical_block_size(lo->lo_queue, arg); + blk_queue_physical_block_size(lo->lo_queue, arg); + blk_queue_io_min(lo->lo_queue, arg); + loop_update_dio(lo); +out_unfreeze: + blk_mq_unfreeze_queue(lo->lo_queue); + + return err; +} + +static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, + unsigned long arg) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + switch (cmd) { + case LOOP_SET_CAPACITY: + err = loop_set_capacity(lo); + break; + case LOOP_SET_DIRECT_IO: + err = loop_set_dio(lo, arg); + break; + case LOOP_SET_BLOCK_SIZE: + err = loop_set_block_size(lo, arg); + break; + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } + mutex_unlock(&loop_ctl_mutex); + return err; +} + +static int lo_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct loop_device *lo = bdev->bd_disk->private_data; + int err; + + switch (cmd) { + case LOOP_SET_FD: + return loop_set_fd(lo, mode, bdev, arg); + case LOOP_CHANGE_FD: + return loop_change_fd(lo, bdev, arg); + case LOOP_CLR_FD: + return loop_clr_fd(lo); + case LOOP_SET_STATUS: + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + err = loop_set_status_old(lo, + (struct loop_info __user *)arg); + } + break; + case LOOP_GET_STATUS: + return loop_get_status_old(lo, (struct loop_info __user *) arg); + case LOOP_SET_STATUS64: + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + err = loop_set_status64(lo, + (struct loop_info64 __user *) arg); + } + break; + case LOOP_GET_STATUS64: + return loop_get_status64(lo, (struct loop_info64 __user *) arg); + case LOOP_SET_CAPACITY: + case LOOP_SET_DIRECT_IO: + case LOOP_SET_BLOCK_SIZE: + if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + /* Fall through */ + default: + err = lo_simple_ioctl(lo, cmd, arg); + break; + } + + return err; +} + +#ifdef CONFIG_COMPAT +struct compat_loop_info { + compat_int_t lo_number; /* ioctl r/o */ + compat_dev_t lo_device; /* ioctl r/o */ + compat_ulong_t lo_inode; /* ioctl r/o */ + compat_dev_t lo_rdevice; /* ioctl r/o */ + compat_int_t lo_offset; + compat_int_t lo_encrypt_type; + compat_int_t lo_encrypt_key_size; /* ioctl w/o */ + compat_int_t lo_flags; /* ioctl r/o */ + char lo_name[LO_NAME_SIZE]; + unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + compat_ulong_t lo_init[2]; + char reserved[4]; + compat_int_t lo_file_fmt_type; +} __attribute__((packed)); + +/* + * Transfer 32-bit compatibility structure in userspace to 64-bit loop info + * - noinlined to reduce stack space usage in main part of driver + */ +static noinline int +loop_info64_from_compat(const struct compat_loop_info __user *arg, + struct loop_info64 *info64) +{ + struct compat_loop_info info; + int err; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info, arg, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + if (err) + return -EFAULT; + + if (info.lo_flags & LO_FLAGS_FILE_FMT) { + /* copy everything from the user space */ + err = copy_from_user(&info, arg, sizeof(info)); + if (err) + return -EFAULT; + } else { + info.lo_file_fmt_type = LO_FILE_FMT_RAW; + } + + memset(info64, 0, sizeof(*info64)); + info64->lo_number = info.lo_number; + info64->lo_device = info.lo_device; + info64->lo_inode = info.lo_inode; + info64->lo_rdevice = info.lo_rdevice; + info64->lo_offset = info.lo_offset; + info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info.lo_encrypt_type; + info64->lo_encrypt_key_size = info.lo_encrypt_key_size; + info64->lo_flags = info.lo_flags; + info64->lo_init[0] = info.lo_init[0]; + info64->lo_init[1] = info.lo_init[1]; + info64->lo_file_fmt_type = info.lo_file_fmt_type; + if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); + else + memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); + memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); + return 0; +} + +/* + * Transfer 64-bit loop info to 32-bit compatibility structure in userspace + * - noinlined to reduce stack space usage in main part of driver + */ +static noinline int +loop_info64_to_compat(const struct loop_info64 *info64, + struct compat_loop_info __user *arg) +{ + struct compat_loop_info info; + compat_int_t lo_flags; + int err; + + /* backward compatibility: copy everything except the file format type + * field */ + err = copy_from_user(&info, arg, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + if (err) + return -EFAULT; + + lo_flags = info.lo_flags; + + memset(&info, 0, sizeof(info)); + info.lo_number = info64->lo_number; + info.lo_device = info64->lo_device; + info.lo_inode = info64->lo_inode; + info.lo_rdevice = info64->lo_rdevice; + info.lo_offset = info64->lo_offset; + info.lo_encrypt_type = info64->lo_encrypt_type; + info.lo_encrypt_key_size = info64->lo_encrypt_key_size; + info.lo_flags = info64->lo_flags; + info.lo_init[0] = info64->lo_init[0]; + info.lo_init[1] = info64->lo_init[1]; + info.lo_file_fmt_type = info64->lo_file_fmt_type; + if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); + else + memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); + memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); + + /* error in case values were truncated */ + if (info.lo_device != info64->lo_device || + info.lo_rdevice != info64->lo_rdevice || + info.lo_inode != info64->lo_inode || + info.lo_offset != info64->lo_offset || + info.lo_init[0] != info64->lo_init[0] || + info.lo_init[1] != info64->lo_init[1] || + info.lo_file_fmt_type != info64->lo_file_fmt_type) + return -EOVERFLOW; + + if (lo_flags & LO_FLAGS_FILE_FMT) { + /* copy entire structure to user space because file format + * support is available */ + err = copy_to_user(arg, &info, sizeof(info)); + } else { + /* copy normal structure to user space */ + err = copy_to_user(arg, &info, + sizeof(info) - sizeof(info.lo_file_fmt_type)); + } + + if (err) + return -EFAULT; + + return 0; +} + +static int +loop_set_status_compat(struct loop_device *lo, + const struct compat_loop_info __user *arg) +{ + struct loop_info64 info64; + int ret; + + ret = loop_info64_from_compat(arg, &info64); + if (ret < 0) + return ret; + return loop_set_status(lo, &info64); +} + +static int +loop_get_status_compat(struct loop_device *lo, + struct compat_loop_info __user *arg) +{ + struct loop_info64 info64; + int err; + + if (!arg) + return -EINVAL; + err = loop_get_status(lo, &info64); + if (!err) + err = loop_info64_to_compat(&info64, arg); + return err; +} + +static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct loop_device *lo = bdev->bd_disk->private_data; + int err; + + switch(cmd) { + case LOOP_SET_STATUS: + err = loop_set_status_compat(lo, + (const struct compat_loop_info __user *)arg); + break; + case LOOP_GET_STATUS: + err = loop_get_status_compat(lo, + (struct compat_loop_info __user *)arg); + break; + case LOOP_SET_CAPACITY: + case LOOP_CLR_FD: + case LOOP_GET_STATUS64: + case LOOP_SET_STATUS64: + arg = (unsigned long) compat_ptr(arg); + /* fall through */ + case LOOP_SET_FD: + case LOOP_CHANGE_FD: + case LOOP_SET_BLOCK_SIZE: + err = lo_ioctl(bdev, mode, cmd, arg); + break; + default: + err = -ENOIOCTLCMD; + break; + } + return err; +} +#endif + +static int lo_open(struct block_device *bdev, fmode_t mode) +{ + struct loop_device *lo; + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + lo = bdev->bd_disk->private_data; + if (!lo) { + err = -ENXIO; + goto out; + } + + atomic_inc(&lo->lo_refcnt); +out: + mutex_unlock(&loop_ctl_mutex); + return err; +} + +static void lo_release(struct gendisk *disk, fmode_t mode) +{ + struct loop_device *lo; + + mutex_lock(&loop_ctl_mutex); + lo = disk->private_data; + if (atomic_dec_return(&lo->lo_refcnt)) + goto out_unlock; + + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { + if (lo->lo_state != Lo_bound) + goto out_unlock; + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); + /* + * In autoclear mode, stop the loop thread + * and remove configuration after last close. + */ + __loop_clr_fd(lo, true); + return; + } else if (lo->lo_state == Lo_bound) { + /* + * Otherwise keep thread (if running) and config, + * but flush possible ongoing bios in thread. + */ + blk_mq_freeze_queue(lo->lo_queue); + blk_mq_unfreeze_queue(lo->lo_queue); + } + +out_unlock: + mutex_unlock(&loop_ctl_mutex); +} + +static const struct block_device_operations lo_fops = { + .owner = THIS_MODULE, + .open = lo_open, + .release = lo_release, + .ioctl = lo_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = lo_compat_ioctl, +#endif +}; + +/* + * And now the modules code and kernel interface. + */ +static int max_loop; +module_param(max_loop, int, 0444); +MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); +module_param(max_part, int, 0444); +MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); + +int loop_register_transfer(struct loop_func_table *funcs) +{ + unsigned int n = funcs->number; + + if (n >= MAX_LO_CRYPT || xfer_funcs[n]) + return -EINVAL; + xfer_funcs[n] = funcs; + return 0; +} + +static int unregister_transfer_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; + struct loop_func_table *xfer = data; + + mutex_lock(&loop_ctl_mutex); + if (lo->lo_encryption == xfer) + loop_release_xfer(lo); + mutex_unlock(&loop_ctl_mutex); + return 0; +} + +int loop_unregister_transfer(int number) +{ + unsigned int n = number; + struct loop_func_table *xfer; + + if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) + return -EINVAL; + + xfer_funcs[n] = NULL; + idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); + return 0; +} + +EXPORT_SYMBOL(loop_register_transfer); +EXPORT_SYMBOL(loop_unregister_transfer); + +static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct request *rq = bd->rq; + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct loop_device *lo = rq->q->queuedata; + + blk_mq_start_request(rq); + + if (lo->lo_state != Lo_bound) + return BLK_STS_IOERR; + + switch (req_op(rq)) { + case REQ_OP_FLUSH: + case REQ_OP_DISCARD: + case REQ_OP_WRITE_ZEROES: + cmd->use_aio = false; + break; + default: + cmd->use_aio = lo->use_dio; + break; + } + + /* always use the first bio's css */ +#ifdef CONFIG_BLK_CGROUP + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; + css_get(cmd->css); + } else +#endif + cmd->css = NULL; + kthread_queue_work(&lo->worker, &cmd->work); + + return BLK_STS_OK; +} + +static void loop_handle_cmd(struct loop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + const bool write = op_is_write(req_op(rq)); + struct loop_device *lo = rq->q->queuedata; + int ret = 0; + + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; + goto failed; + } + + ret = do_req_filebacked(lo, rq); + failed: + /* complete non-aio request */ + if (!cmd->use_aio || ret) { + cmd->ret = ret ? -EIO : 0; + blk_mq_complete_request(rq); + } +} + +static void loop_queue_work(struct kthread_work *work) +{ + struct loop_cmd *cmd = + container_of(work, struct loop_cmd, work); + + loop_handle_cmd(cmd); +} + +static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx, unsigned int numa_node) +{ + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + + kthread_init_work(&cmd->work, loop_queue_work); + return 0; +} + +static const struct blk_mq_ops loop_mq_ops = { + .queue_rq = loop_queue_rq, + .init_request = loop_init_request, + .complete = lo_complete_rq, +}; + +static struct dentry *loop_dbgfs_dir; + +static int loop_add(struct loop_device **l, int i) +{ + struct loop_device *lo; + struct gendisk *disk; + int err; + + err = -ENOMEM; + lo = kzalloc(sizeof(*lo), GFP_KERNEL); + if (!lo) + goto out; + + lo->lo_state = Lo_unbound; + + /* allocate id, if @id >= 0, we're requesting that specific id */ + if (i >= 0) { + err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); + if (err == -ENOSPC) + err = -EEXIST; + } else { + err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); + } + if (err < 0) + goto out_free_dev; + i = err; + + err = -ENOMEM; + lo->tag_set.ops = &loop_mq_ops; + lo->tag_set.nr_hw_queues = 1; + lo->tag_set.queue_depth = 128; + lo->tag_set.numa_node = NUMA_NO_NODE; + lo->tag_set.cmd_size = sizeof(struct loop_cmd); + lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + lo->tag_set.driver_data = lo; + + err = blk_mq_alloc_tag_set(&lo->tag_set); + if (err) + goto out_free_idr; + + lo->lo_queue = blk_mq_init_queue(&lo->tag_set); + if (IS_ERR_OR_NULL(lo->lo_queue)) { + err = PTR_ERR(lo->lo_queue); + goto out_cleanup_tags; + } + lo->lo_queue->queuedata = lo; + + blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); + + /* + * By default, we do buffer IO, so it doesn't make sense to enable + * merge because the I/O submitted to backing file is handled page by + * page. For directio mode, merge does help to dispatch bigger request + * to underlayer disk. We will enable merge once directio is enabled. + */ + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); + + err = -ENOMEM; + lo->lo_fmt = loop_file_fmt_alloc(); + if (!lo->lo_fmt) + goto out_free_queue; + + loop_file_fmt_set_lo(lo->lo_fmt, lo); + + err = -ENOMEM; + disk = lo->lo_disk = alloc_disk(1 << part_shift); + if (!disk) + goto out_free_file_fmt; + + /* + * Disable partition scanning by default. The in-kernel partition + * scanning can be requested individually per-device during its + * setup. Userspace can always add and remove partitions from all + * devices. The needed partition minors are allocated from the + * extended minor space, the main loop device numbers will continue + * to match the loop minors, regardless of the number of partitions + * used. + * + * If max_part is given, partition scanning is globally enabled for + * all loop devices. The minors for the main loop devices will be + * multiples of max_part. + * + * Note: Global-for-all-devices, set-only-at-init, read-only module + * parameteters like 'max_loop' and 'max_part' make things needlessly + * complicated, are too static, inflexible and may surprise + * userspace tools. Parameters like this in general should be avoided. + */ + if (!part_shift) + disk->flags |= GENHD_FL_NO_PART_SCAN; + disk->flags |= GENHD_FL_EXT_DEVT; + atomic_set(&lo->lo_refcnt, 0); + lo->lo_number = i; + spin_lock_init(&lo->lo_lock); + disk->major = LOOP_MAJOR; + disk->first_minor = i << part_shift; + disk->fops = &lo_fops; + disk->private_data = lo; + disk->queue = lo->lo_queue; + sprintf(disk->disk_name, "loop%d", i); + add_disk(disk); + *l = lo; + + /* initialize debugfs entries */ + /* create for each loop device a debugfs directory under 'loop' if + * the 'block' directory exists, otherwise create the loop directory in + * the root directory */ +#ifdef CONFIG_DEBUG_FS + lo->lo_dbgfs_dir = debugfs_create_dir(disk->disk_name, loop_dbgfs_dir); + + if (IS_ERR_OR_NULL(lo->lo_dbgfs_dir)) { + err = -ENODEV; + lo->lo_dbgfs_dir = NULL; + goto out_free_file_fmt; + } +#endif + + return lo->lo_number; + +out_free_file_fmt: + loop_file_fmt_free(lo->lo_fmt); +out_free_queue: + blk_cleanup_queue(lo->lo_queue); +out_cleanup_tags: + blk_mq_free_tag_set(&lo->tag_set); +out_free_idr: + idr_remove(&loop_index_idr, i); +out_free_dev: + kfree(lo); +out: + return err; +} + +static void loop_remove(struct loop_device *lo) +{ + loop_file_fmt_free(lo->lo_fmt); + debugfs_remove(lo->lo_dbgfs_dir); + del_gendisk(lo->lo_disk); + blk_cleanup_queue(lo->lo_queue); + blk_mq_free_tag_set(&lo->tag_set); + put_disk(lo->lo_disk); + kfree(lo); +} + +static int find_free_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; + struct loop_device **l = data; + + if (lo->lo_state == Lo_unbound) { + *l = lo; + return 1; + } + return 0; +} + +static int loop_lookup(struct loop_device **l, int i) +{ + struct loop_device *lo; + int ret = -ENODEV; + + if (i < 0) { + int err; + + err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); + if (err == 1) { + *l = lo; + ret = lo->lo_number; + } + goto out; + } + + /* lookup and return a specific i */ + lo = idr_find(&loop_index_idr, i); + if (lo) { + *l = lo; + ret = lo->lo_number; + } +out: + return ret; +} + +static struct kobject *loop_probe(dev_t dev, int *part, void *data) +{ + struct loop_device *lo; + struct kobject *kobj; + int err; + + mutex_lock(&loop_ctl_mutex); + err = loop_lookup(&lo, MINOR(dev) >> part_shift); + if (err < 0) + err = loop_add(&lo, MINOR(dev) >> part_shift); + if (err < 0) + kobj = NULL; + else + kobj = get_disk_and_module(lo->lo_disk); + mutex_unlock(&loop_ctl_mutex); + + *part = 0; + return kobj; +} + +static long loop_control_ioctl(struct file *file, unsigned int cmd, + unsigned long parm) +{ + struct loop_device *lo; + int ret; + + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; + + ret = -ENOSYS; + switch (cmd) { + case LOOP_CTL_ADD: + ret = loop_lookup(&lo, parm); + if (ret >= 0) { + ret = -EEXIST; + break; + } + ret = loop_add(&lo, parm); + break; + case LOOP_CTL_REMOVE: + ret = loop_lookup(&lo, parm); + if (ret < 0) + break; + if (lo->lo_state != Lo_unbound) { + ret = -EBUSY; + break; + } + if (atomic_read(&lo->lo_refcnt) > 0) { + ret = -EBUSY; + break; + } + lo->lo_disk->private_data = NULL; + idr_remove(&loop_index_idr, lo->lo_number); + loop_remove(lo); + break; + case LOOP_CTL_GET_FREE: + ret = loop_lookup(&lo, -1); + if (ret >= 0) + break; + ret = loop_add(&lo, -1); + } + mutex_unlock(&loop_ctl_mutex); + + return ret; +} + +static const struct file_operations loop_ctl_fops = { + .open = nonseekable_open, + .unlocked_ioctl = loop_control_ioctl, + .compat_ioctl = loop_control_ioctl, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct miscdevice loop_misc = { + .minor = LOOP_CTRL_MINOR, + .name = "loop-control", + .fops = &loop_ctl_fops, +}; + +MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); +MODULE_ALIAS("devname:loop-control"); + +static int __init loop_init(void) +{ + int i, nr; + unsigned long range; + struct loop_device *lo; + int err; + + part_shift = 0; + if (max_part > 0) { + part_shift = fls(max_part); + + /* + * Adjust max_part according to part_shift as it is exported + * to user space so that user can decide correct minor number + * if [s]he want to create more devices. + * + * Note that -1 is required because partition 0 is reserved + * for the whole disk. + */ + max_part = (1UL << part_shift) - 1; + } + + if ((1UL << part_shift) > DISK_MAX_PARTS) { + err = -EINVAL; + goto err_out; + } + + if (max_loop > 1UL << (MINORBITS - part_shift)) { + err = -EINVAL; + goto err_out; + } + + /* + * If max_loop is specified, create that many devices upfront. + * This also becomes a hard limit. If max_loop is not specified, + * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module + * init time. Loop devices can be requested on-demand with the + * /dev/loop-control interface, or be instantiated by accessing + * a 'dead' device node. + */ + if (max_loop) { + nr = max_loop; + range = max_loop << part_shift; + } else { + nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; + range = 1UL << MINORBITS; + } + + err = misc_register(&loop_misc); + if (err < 0) + goto err_out; + + + if (register_blkdev(LOOP_MAJOR, "loop")) { + err = -EIO; + goto misc_out; + } + +#ifdef CONFIG_DEBUG_FS + loop_dbgfs_dir = debugfs_create_dir("loop", NULL); + if (IS_ERR_OR_NULL(loop_dbgfs_dir)) { + err = -ENODEV; + goto misc_out; + } +#endif + + blk_register_region(MKDEV(LOOP_MAJOR, 0), range, + THIS_MODULE, loop_probe, NULL, NULL); + + /* pre-create number of devices given by config or max_loop */ + mutex_lock(&loop_ctl_mutex); + for (i = 0; i < nr; i++) + loop_add(&lo, i); + mutex_unlock(&loop_ctl_mutex); + + printk(KERN_INFO "loop: module loaded\n"); + return 0; + +misc_out: + misc_deregister(&loop_misc); +err_out: + return err; +} + +static int loop_exit_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; + + loop_remove(lo); + return 0; +} + +static void __exit loop_exit(void) +{ + unsigned long range; + + range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); + idr_destroy(&loop_index_idr); + + blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); + unregister_blkdev(LOOP_MAJOR, "loop"); + +#ifdef CONFIG_DEBUG_FS + debugfs_remove(loop_dbgfs_dir); +#endif + + misc_deregister(&loop_misc); +} + +module_init(loop_init); +module_exit(loop_exit); + +#ifndef MODULE +static int __init max_loop_setup(char *str) +{ + max_loop = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("max_loop=", max_loop_setup); +#endif diff --git a/loop_main.h b/loop_main.h new file mode 100644 index 0000000..33f6578 --- /dev/null +++ b/loop_main.h @@ -0,0 +1,106 @@ +/* + * loop_main.h + * + * Written by Theodore Ts'o, 3/29/93. + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + */ +#ifndef _LINUX_LOOP_H +#define _LINUX_LOOP_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +#include +#endif + +#include "loop_file_fmt.h" + +/* Possible states of device */ +enum { + Lo_unbound, + Lo_bound, + Lo_rundown, +}; + +struct loop_func_table; + +struct loop_device { + int lo_number; + atomic_t lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + int (*transfer)(struct loop_device *, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + char lo_file_name[LO_NAME_SIZE]; + char lo_crypt_name[LO_NAME_SIZE]; + char lo_encrypt_key[LO_KEY_SIZE]; + int lo_encrypt_key_size; + struct loop_func_table *lo_encryption; + __u32 lo_init[2]; + kuid_t lo_key_owner; /* Who set the key */ + int (*ioctl)(struct loop_device *, int cmd, + unsigned long arg); + + struct loop_file_fmt *lo_fmt; + + struct file * lo_backing_file; + struct block_device *lo_device; + void *key_data; + + gfp_t old_gfp_mask; + + spinlock_t lo_lock; + int lo_state; + struct kthread_worker worker; + struct task_struct *worker_task; + bool use_dio; + bool sysfs_inited; + + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + +#ifdef CONFIG_DEBUG_FS + struct dentry *lo_dbgfs_dir; +#endif +}; + +struct loop_cmd { + struct kthread_work work; + bool use_aio; /* use AIO interface to handle I/O */ + atomic_t ref; /* only for aio */ + long ret; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *css; +}; + +/* Support for loadable transfer modules */ +struct loop_func_table { + int number; /* filter type */ + int (*transfer)(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + int (*init)(struct loop_device *, const struct loop_info64 *); + /* release is called from loop_unregister_transfer or clr_fd */ + int (*release)(struct loop_device *); + int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); + struct module *owner; +}; + +int loop_register_transfer(struct loop_func_table *funcs); +int loop_unregister_transfer(int number); + +#endif -- cgit v1.2.3-55-g7522