summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Makefile2
-rw-r--r--fs/adfs/adfs.h70
-rw-r--r--fs/adfs/dir.c25
-rw-r--r--fs/adfs/dir_f.c38
-rw-r--r--fs/adfs/dir_fplus.c21
-rw-r--r--fs/adfs/inode.c12
-rw-r--r--fs/adfs/map.c15
-rw-r--r--fs/adfs/super.c121
-rw-r--r--fs/aio.c18
-rw-r--r--fs/anon_inodes.c13
-rw-r--r--fs/binfmt_misc.c20
-rw-r--r--fs/block_dev.c17
-rw-r--r--fs/btrfs/tests/btrfs-tests.c15
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/configfs/mount.c20
-rw-r--r--fs/d_path.c1
-rw-r--r--fs/dax.c1
-rw-r--r--fs/dcache.c100
-rw-r--r--fs/efivarfs/super.c25
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/fs_parser.c1
-rw-r--r--fs/fs_pin.c10
-rw-r--r--fs/fsopen.c2
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/internal.h15
-rw-r--r--fs/iomap.c2205
-rw-r--r--fs/iomap/Makefile15
-rw-r--r--fs/iomap/apply.c74
-rw-r--r--fs/iomap/buffered-io.c1073
-rw-r--r--fs/iomap/direct-io.c562
-rw-r--r--fs/iomap/fiemap.c144
-rw-r--r--fs/iomap/seek.c212
-rw-r--r--fs/iomap/swapfile.c178
-rw-r--r--fs/libfs.c82
-rw-r--r--fs/mount.h8
-rw-r--r--fs/namespace.c174
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/callback_proc.c28
-rw-r--r--fs/nfs/client.c24
-rw-r--r--fs/nfs/dir.c94
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c26
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c30
-rw-r--r--fs/nfs/internal.h7
-rw-r--r--fs/nfs/netns.h3
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs3client.c3
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4client.c14
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/nfs/nfs4proc.c80
-rw-r--r--fs/nfs/nfs4state.c49
-rw-r--r--fs/nfs/nfs4trace.c8
-rw-r--r--fs/nfs/nfs4trace.h283
-rw-r--r--fs/nfs/nfs4xdr.c16
-rw-r--r--fs/nfs/nfstrace.h233
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c20
-rw-r--r--fs/nfs/super.c63
-rw-r--r--fs/nfs/sysfs.c187
-rw-r--r--fs/nfs/sysfs.h25
-rw-r--r--fs/nfs/write.c7
-rw-r--r--fs/nfsd/nfsctl.c32
-rw-r--r--fs/notify/inotify/inotify_user.c8
-rw-r--r--fs/nsfs.c16
-rw-r--r--fs/openpromfs/inode.c20
-rw-r--r--fs/pipe.c15
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c7
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/ramfs/inode.c6
-rw-r--r--fs/super.c148
-rw-r--r--fs/sysfs/mount.c3
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/xfs/Makefile4
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c (renamed from fs/xfs/xfs_trans_inode.c)4
79 files changed, 3749 insertions, 3051 deletions
diff --git a/fs/Makefile b/fs/Makefile
index c9aea23aba56..d60089fd689b 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_COREDUMP) += coredump.o
obj-$(CONFIG_SYSCTL) += drop_caches.o
obj-$(CONFIG_FHANDLE) += fhandle.o
-obj-$(CONFIG_FS_IOMAP) += iomap.o
+obj-y += iomap/
obj-y += quota/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 804c6a77c5db..b7e844d2f321 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/adfs_fs.h>
@@ -8,6 +9,15 @@
#define ADFS_BAD_FRAG 1
#define ADFS_ROOT_FRAG 2
+#define ADFS_FILETYPE_NONE ((u16)~0)
+
+/* RISC OS 12-bit filetype is stored in load_address[19:8] */
+static inline u16 adfs_filetype(u32 loadaddr)
+{
+ return (loadaddr & 0xfff00000) == 0xfff00000 ?
+ (loadaddr >> 8) & 0xfff : ADFS_FILETYPE_NONE;
+}
+
#define ADFS_NDA_OWNER_READ (1 << 0)
#define ADFS_NDA_OWNER_WRITE (1 << 1)
#define ADFS_NDA_LOCKED (1 << 2)
@@ -18,22 +28,28 @@
#include "dir_f.h"
-struct buffer_head;
-
/*
* adfs file system inode data in memory
*/
struct adfs_inode_info {
loff_t mmu_private;
- unsigned long parent_id; /* object id of parent */
+ __u32 parent_id; /* parent indirect disc address */
__u32 loadaddr; /* RISC OS load address */
__u32 execaddr; /* RISC OS exec address */
- unsigned int filetype; /* RISC OS file type */
unsigned int attr; /* RISC OS permissions */
- unsigned int stamped:1; /* RISC OS file has date/time */
struct inode vfs_inode;
};
+static inline struct adfs_inode_info *ADFS_I(struct inode *inode)
+{
+ return container_of(inode, struct adfs_inode_info, vfs_inode);
+}
+
+static inline bool adfs_inode_is_stamped(struct inode *inode)
+{
+ return (ADFS_I(inode)->loadaddr & 0xfff00000) == 0xfff00000;
+}
+
/*
* Forward-declare this
*/
@@ -59,10 +75,8 @@ struct adfs_sb_info {
__u32 s_ids_per_zone; /* max. no ids in one zone */
__u32 s_idlen; /* length of ID in map */
__u32 s_map_size; /* sector size of a map */
- unsigned long s_size; /* total size (in blocks) of this fs */
signed int s_map2blk; /* shift left by this for map->sector*/
unsigned int s_log2sharesize;/* log2 share size */
- __le32 s_version; /* disc format version */
unsigned int s_namelen; /* maximum number of characters in name */
};
@@ -71,11 +85,6 @@ static inline struct adfs_sb_info *ADFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
-static inline struct adfs_inode_info *ADFS_I(struct inode *inode)
-{
- return container_of(inode, struct adfs_inode_info, vfs_inode);
-}
-
/*
* Directory handling
*/
@@ -89,7 +98,7 @@ struct adfs_dir {
struct buffer_head **bh_fplus;
unsigned int pos;
- unsigned int parent_id;
+ __u32 parent_id;
struct adfs_dirheader dirhead;
union adfs_dirtail dirtail;
@@ -101,20 +110,18 @@ struct adfs_dir {
#define ADFS_MAX_NAME_LEN (256 + 4) /* +4 for ,xyz hex filetype suffix */
struct object_info {
__u32 parent_id; /* parent object id */
- __u32 file_id; /* object id */
+ __u32 indaddr; /* indirect disc addr */
__u32 loadaddr; /* load address */
__u32 execaddr; /* execution address */
__u32 size; /* size */
__u8 attr; /* RISC OS attributes */
unsigned int name_len; /* name length */
char name[ADFS_MAX_NAME_LEN];/* file name */
-
- /* RISC OS file type (12-bit: derived from loadaddr) */
- __u16 filetype;
};
struct adfs_dir_ops {
- int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
+ int (*read)(struct super_block *sb, unsigned int indaddr,
+ unsigned int size, struct adfs_dir *dir);
int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
int (*getnext)(struct adfs_dir *dir, struct object_info *obj);
int (*update)(struct adfs_dir *dir, struct object_info *obj);
@@ -137,7 +144,7 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc);
int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
/* map.c */
-extern int adfs_map_lookup(struct super_block *sb, unsigned int frag_id, unsigned int offset);
+int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset);
extern unsigned int adfs_map_free(struct super_block *sb);
/* Misc */
@@ -145,6 +152,7 @@ __printf(3, 4)
void __adfs_error(struct super_block *sb, const char *function,
const char *fmt, ...);
#define adfs_error(sb, fmt...) __adfs_error(sb, __func__, fmt)
+void adfs_msg(struct super_block *sb, const char *pfx, const char *fmt, ...);
/* super.c */
@@ -182,16 +190,28 @@ static inline __u32 signed_asl(__u32 val, signed int shift)
*
* The root directory ID should always be looked up in the map [3.4]
*/
-static inline int
-__adfs_block_map(struct super_block *sb, unsigned int object_id,
- unsigned int block)
+static inline int __adfs_block_map(struct super_block *sb, u32 indaddr,
+ unsigned int block)
{
- if (object_id & 255) {
+ if (indaddr & 255) {
unsigned int off;
- off = (object_id & 255) - 1;
+ off = (indaddr & 255) - 1;
block += off << ADFS_SB(sb)->s_log2sharesize;
}
- return adfs_map_lookup(sb, object_id >> 8, block);
+ return adfs_map_lookup(sb, indaddr >> 8, block);
+}
+
+/* Return the disc record from the map */
+static inline
+struct adfs_discrecord *adfs_map_discrecord(struct adfs_discmap *dm)
+{
+ return (void *)(dm[0].dm_bh->b_data + 4);
+}
+
+static inline u64 adfs_disc_size(const struct adfs_discrecord *dr)
+{
+ return (u64)le32_to_cpu(dr->disc_size_high) << 32 |
+ le32_to_cpu(dr->disc_size);
}
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 35a4d9f4c3ae..a54c53244992 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -35,20 +35,14 @@ void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
if (obj->name_len <= 2 && dots == obj->name_len)
obj->name[0] = '^';
- obj->filetype = -1;
-
/*
- * object is a file and is filetyped and timestamped?
- * RISC OS 12-bit filetype is stored in load_address[19:8]
+ * If the object is a file, and the user requested the ,xyz hex
+ * filetype suffix to the name, check the filetype and append.
*/
- if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
- (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
- obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
-
- /* optionally append the ,xyz hex filetype suffix */
- if (ADFS_SB(dir->sb)->s_ftsuffix) {
- __u16 filetype = obj->filetype;
+ if (!(obj->attr & ADFS_NDA_DIRECTORY) && ADFS_SB(dir->sb)->s_ftsuffix) {
+ u16 filetype = adfs_filetype(obj->loadaddr);
+ if (filetype != ADFS_FILETYPE_NONE) {
obj->name[obj->name_len++] = ',';
obj->name[obj->name_len++] = hex_asc_lo(filetype >> 8);
obj->name[obj->name_len++] = hex_asc_lo(filetype >> 4);
@@ -92,7 +86,7 @@ adfs_readdir(struct file *file, struct dir_context *ctx)
goto unlock_out;
while (ops->getnext(&dir, &obj) == 0) {
if (!dir_emit(ctx, obj.name, obj.name_len,
- obj.file_id, DT_UNKNOWN))
+ obj.indaddr, DT_UNKNOWN))
break;
ctx->pos++;
}
@@ -113,8 +107,8 @@ adfs_dir_update(struct super_block *sb, struct object_info *obj, int wait)
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct adfs_dir dir;
- printk(KERN_INFO "adfs_dir_update: object %06X in dir %06X\n",
- obj->file_id, obj->parent_id);
+ printk(KERN_INFO "adfs_dir_update: object %06x in dir %06x\n",
+ obj->indaddr, obj->parent_id);
if (!ops->update) {
ret = -EINVAL;
@@ -178,7 +172,8 @@ static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
goto out;
if (ADFS_I(inode)->parent_id != dir.parent_id) {
- adfs_error(sb, "parent directory changed under me! (%lx but got %x)\n",
+ adfs_error(sb,
+ "parent directory changed under me! (%06x but got %06x)\n",
ADFS_I(inode)->parent_id, dir.parent_id);
ret = -EIO;
goto free_out;
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index 7557378e58b3..c1a950c7400a 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -6,7 +6,6 @@
*
* E and F format directory handling
*/
-#include <linux/buffer_head.h>
#include "adfs.h"
#include "dir_f.h"
@@ -124,12 +123,9 @@ adfs_dir_checkbyte(const struct adfs_dir *dir)
return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff;
}
-/*
- * Read and check that a directory is valid
- */
-static int
-adfs_dir_read(struct super_block *sb, unsigned long object_id,
- unsigned int size, struct adfs_dir *dir)
+/* Read and check that a directory is valid */
+static int adfs_dir_read(struct super_block *sb, u32 indaddr,
+ unsigned int size, struct adfs_dir *dir)
{
const unsigned int blocksize_bits = sb->s_blocksize_bits;
int blk = 0;
@@ -149,10 +145,10 @@ adfs_dir_read(struct super_block *sb, unsigned long object_id,
for (blk = 0; blk < size; blk++) {
int phys;
- phys = __adfs_block_map(sb, object_id, blk);
+ phys = __adfs_block_map(sb, indaddr, blk);
if (!phys) {
- adfs_error(sb, "dir object %lX has a hole at offset %d",
- object_id, blk);
+ adfs_error(sb, "dir %06x has a hole at offset %d",
+ indaddr, blk);
goto release_buffers;
}
@@ -180,8 +176,7 @@ adfs_dir_read(struct super_block *sb, unsigned long object_id,
return 0;
bad_dir:
- adfs_error(sb, "corrupted directory fragment %lX",
- object_id);
+ adfs_error(sb, "dir %06x is corrupted", indaddr);
release_buffers:
for (blk -= 1; blk >= 0; blk -= 1)
brelse(dir->bh[blk]);
@@ -208,7 +203,7 @@ adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
}
obj->name_len = name_len;
- obj->file_id = adfs_readval(de->dirinddiscadd, 3);
+ obj->indaddr = adfs_readval(de->dirinddiscadd, 3);
obj->loadaddr = adfs_readval(de->dirload, 4);
obj->execaddr = adfs_readval(de->direxec, 4);
obj->size = adfs_readval(de->dirlen, 4);
@@ -223,7 +218,7 @@ adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
static inline void
adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj)
{
- adfs_writeval(de->dirinddiscadd, 3, obj->file_id);
+ adfs_writeval(de->dirinddiscadd, 3, obj->indaddr);
adfs_writeval(de->dirload, 4, obj->loadaddr);
adfs_writeval(de->direxec, 4, obj->execaddr);
adfs_writeval(de->dirlen, 4, obj->size);
@@ -309,8 +304,7 @@ __adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj)
* the caller is responsible for holding the necessary
* locks.
*/
-static int
-adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id)
+static int adfs_dir_find_entry(struct adfs_dir *dir, u32 indaddr)
{
int pos, ret;
@@ -322,7 +316,7 @@ adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id)
if (!__adfs_dir_get(dir, pos, &obj))
break;
- if (obj.file_id == object_id) {
+ if (obj.indaddr == indaddr) {
ret = pos;
break;
}
@@ -331,15 +325,15 @@ adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id)
return ret;
}
-static int
-adfs_f_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
+static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size,
+ struct adfs_dir *dir)
{
int ret;
- if (sz != ADFS_NEWDIR_SIZE)
+ if (size != ADFS_NEWDIR_SIZE)
return -EIO;
- ret = adfs_dir_read(sb, id, sz, dir);
+ ret = adfs_dir_read(sb, indaddr, size, dir);
if (ret)
adfs_error(sb, "unable to read directory");
else
@@ -376,7 +370,7 @@ adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
struct super_block *sb = dir->sb;
int ret, i;
- ret = adfs_dir_find_entry(dir, obj->file_id);
+ ret = adfs_dir_find_entry(dir, obj->indaddr);
if (ret < 0) {
adfs_error(dir->sb, "unable to locate entry to update");
goto out;
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 6c5fbb0259c9..d56924c11b17 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 1997-1999 Russell King
*/
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include "adfs.h"
#include "dir_fplus.h"
@@ -37,17 +36,15 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
size = le32_to_cpu(h->bigdirsize);
if (size != sz) {
- printk(KERN_WARNING "adfs: adfs_fplus_read:"
- " directory header size %X\n"
- " does not match directory size %X\n",
- size, sz);
+ adfs_msg(sb, KERN_WARNING,
+ "directory header size %X does not match directory size %X",
+ size, sz);
}
if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
h->bigdirversion[2] != 0 || size & 2047 ||
h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
- printk(KERN_WARNING "adfs: dir object %X has"
- " malformed dir header\n", id);
+ adfs_error(sb, "dir %06x has malformed header", id);
goto out;
}
@@ -58,9 +55,10 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
kcalloc(size, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!bh_fplus) {
+ adfs_msg(sb, KERN_ERR,
+ "not enough memory for dir object %X (%d blocks)",
+ id, size);
ret = -ENOMEM;
- adfs_error(sb, "not enough memory for"
- " dir object %X (%d blocks)", id, size);
goto out;
}
dir->bh_fplus = bh_fplus;
@@ -91,8 +89,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
t->bigdirendmasseq != h->startmasseq ||
t->reserved[0] != 0 || t->reserved[1] != 0) {
- printk(KERN_WARNING "adfs: dir object %X has "
- "malformed dir end\n", id);
+ adfs_error(sb, "dir %06x has malformed tail", id);
goto out;
}
@@ -180,7 +177,7 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
obj->loadaddr = le32_to_cpu(bde.bigdirload);
obj->execaddr = le32_to_cpu(bde.bigdirexec);
obj->size = le32_to_cpu(bde.bigdirlen);
- obj->file_id = le32_to_cpu(bde.bigdirindaddr);
+ obj->indaddr = le32_to_cpu(bde.bigdirindaddr);
obj->attr = le32_to_cpu(bde.bigdirattr);
obj->name_len = le32_to_cpu(bde.bigdirobnamelen);
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 904d624541ad..124de75413a5 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -94,7 +94,7 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
return S_IFDIR | S_IXUGO | mode;
}
- switch (ADFS_I(inode)->filetype) {
+ switch (adfs_filetype(ADFS_I(inode)->loadaddr)) {
case 0xfc0: /* LinkFS */
return S_IFLNK|S_IRWXUGO;
@@ -174,7 +174,7 @@ adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
2208988800000000000LL;
s64 nsec;
- if (ADFS_I(inode)->stamped == 0)
+ if (!adfs_inode_is_stamped(inode))
goto cur_time;
high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */
@@ -213,7 +213,7 @@ adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
{
unsigned int high, low;
- if (ADFS_I(inode)->stamped) {
+ if (adfs_inode_is_stamped(inode)) {
/* convert 32-bit seconds to 40-bit centi-seconds */
low = (secs & 255) * 100;
high = (secs / 256) * 100 + (low >> 8) + 0x336e996a;
@@ -247,7 +247,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
inode->i_uid = ADFS_SB(sb)->s_uid;
inode->i_gid = ADFS_SB(sb)->s_gid;
- inode->i_ino = obj->file_id;
+ inode->i_ino = obj->indaddr;
inode->i_size = obj->size;
set_nlink(inode, 2);
inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >>
@@ -263,8 +263,6 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->loadaddr = obj->loadaddr;
ADFS_I(inode)->execaddr = obj->execaddr;
ADFS_I(inode)->attr = obj->attr;
- ADFS_I(inode)->filetype = obj->filetype;
- ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
inode->i_mode = adfs_atts2mode(sb, inode);
adfs_adfs2unix_time(&inode->i_mtime, inode);
@@ -355,7 +353,7 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
struct object_info obj;
int ret;
- obj.file_id = inode->i_ino;
+ obj.indaddr = inode->i_ino;
obj.name_len = 0;
obj.parent_id = ADFS_I(inode)->parent_id;
obj.loadaddr = ADFS_I(inode)->loadaddr;
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index 4d34338c6176..f44d12cef5be 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 1997-2002 Russell King
*/
-#include <linux/buffer_head.h>
#include <asm/unaligned.h>
#include "adfs.h"
@@ -64,9 +63,8 @@ static DEFINE_RWLOCK(adfs_map_lock);
* output of:
* gcc -D__KERNEL__ -O2 -I../../include -o - -S map.c
*/
-static int
-lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
- const unsigned int frag_id, unsigned int *offset)
+static int lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
+ const u32 frag_id, unsigned int *offset)
{
const unsigned int mapsize = dm->dm_endbit;
const u32 idmask = (1 << idlen) - 1;
@@ -185,9 +183,8 @@ error:
return 0;
}
-static int
-scan_map(struct adfs_sb_info *asb, unsigned int zone,
- const unsigned int frag_id, unsigned int mapoff)
+static int scan_map(struct adfs_sb_info *asb, unsigned int zone,
+ const u32 frag_id, unsigned int mapoff)
{
const unsigned int idlen = asb->s_idlen;
struct adfs_discmap *dm, *dm_end;
@@ -241,9 +238,7 @@ adfs_map_free(struct super_block *sb)
return signed_asl(total, asb->s_map2blk);
}
-int
-adfs_map_lookup(struct super_block *sb, unsigned int frag_id,
- unsigned int offset)
+int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset)
{
struct adfs_sb_info *asb = ADFS_SB(sb);
unsigned int zone, mapoff;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index ffb669f9bba7..65b04ebb51c3 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/buffer_head.h>
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
@@ -17,25 +16,42 @@
#include "dir_f.h"
#include "dir_fplus.h"
+#define ADFS_SB_FLAGS SB_NOATIME
+
#define ADFS_DEFAULT_OWNER_MASK S_IRWXU
#define ADFS_DEFAULT_OTHER_MASK (S_IRWXG | S_IRWXO)
void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...)
{
- char error_buf[128];
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- vsnprintf(error_buf, sizeof(error_buf), fmt, args);
- va_end(args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
- printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
+ printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %pV\n",
sb->s_id, function ? ": " : "",
- function ? function : "", error_buf);
+ function ? function : "", &vaf);
+
+ va_end(args);
+}
+
+void adfs_msg(struct super_block *sb, const char *pfx, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk("%sADFS-fs (%s): %pV\n", pfx, sb->s_id, &vaf);
+ va_end(args);
}
static int adfs_checkdiscrecord(struct adfs_discrecord *dr)
{
+ unsigned int max_idlen;
int i;
/* sector size must be 256, 512 or 1024 bytes */
@@ -55,8 +71,13 @@ static int adfs_checkdiscrecord(struct adfs_discrecord *dr)
if (le32_to_cpu(dr->disc_size_high) >> dr->log2secsize)
return 1;
- /* idlen must be no greater than 19 v2 [1.0] */
- if (dr->idlen > 19)
+ /*
+ * Maximum idlen is limited to 16 bits for new directories by
+ * the three-byte storage of an indirect disc address. For
+ * big directories, idlen must be no greater than 19 v2 [1.0]
+ */
+ max_idlen = dr->format_version ? 19 : 16;
+ if (dr->idlen > max_idlen)
return 1;
/* reserved bytes should be zero */
@@ -152,10 +173,10 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
-static int parse_options(struct super_block *sb, char *options)
+static int parse_options(struct super_block *sb, struct adfs_sb_info *asb,
+ char *options)
{
char *p;
- struct adfs_sb_info *asb = ADFS_SB(sb);
int option;
if (!options)
@@ -199,8 +220,9 @@ static int parse_options(struct super_block *sb, char *options)
asb->s_ftsuffix = option;
break;
default:
- printk("ADFS-fs: unrecognised mount option \"%s\" "
- "or missing value\n", p);
+ adfs_msg(sb, KERN_ERR,
+ "unrecognised mount option \"%s\" or missing value",
+ p);
return -EINVAL;
}
}
@@ -209,21 +231,31 @@ static int parse_options(struct super_block *sb, char *options)
static int adfs_remount(struct super_block *sb, int *flags, char *data)
{
+ struct adfs_sb_info temp_asb;
+ int ret;
+
sync_filesystem(sb);
- *flags |= SB_NODIRATIME;
- return parse_options(sb, data);
+ *flags |= ADFS_SB_FLAGS;
+
+ temp_asb = *ADFS_SB(sb);
+ ret = parse_options(sb, &temp_asb, data);
+ if (ret == 0)
+ *ADFS_SB(sb) = temp_asb;
+
+ return ret;
}
static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct adfs_sb_info *sbi = ADFS_SB(sb);
+ struct adfs_discrecord *dr = adfs_map_discrecord(sbi->s_map);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = ADFS_SUPER_MAGIC;
buf->f_namelen = sbi->s_namelen;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = sbi->s_size;
+ buf->f_blocks = adfs_disc_size(dr) >> sb->s_blocksize_bits;
buf->f_files = sbi->s_ids_per_zone * sbi->s_map_size;
buf->f_bavail =
buf->f_bfree = adfs_map_free(sb);
@@ -327,8 +359,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
i = zone - 1;
dm[0].dm_startblk = 0;
dm[0].dm_startbit = ADFS_DR_SIZE_BITS;
- dm[i].dm_endbit = (le32_to_cpu(dr->disc_size_high) << (32 - dr->log2bpmb)) +
- (le32_to_cpu(dr->disc_size) >> dr->log2bpmb) +
+ dm[i].dm_endbit = (adfs_disc_size(dr) >> dr->log2bpmb) +
(ADFS_DR_SIZE_BITS - i * zone_size);
if (adfs_checkmap(sb, dm))
@@ -344,27 +375,18 @@ error_free:
return ERR_PTR(-EIO);
}
-static inline unsigned long adfs_discsize(struct adfs_discrecord *dr, int block_bits)
-{
- unsigned long discsize;
-
- discsize = le32_to_cpu(dr->disc_size_high) << (32 - block_bits);
- discsize |= le32_to_cpu(dr->disc_size) >> block_bits;
-
- return discsize;
-}
-
static int adfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct adfs_discrecord *dr;
struct buffer_head *bh;
struct object_info root_obj;
unsigned char *b_data;
+ unsigned int blocksize;
struct adfs_sb_info *asb;
struct inode *root;
int ret = -EINVAL;
- sb->s_flags |= SB_NODIRATIME;
+ sb->s_flags |= ADFS_SB_FLAGS;
asb = kzalloc(sizeof(*asb), GFP_KERNEL);
if (!asb)
@@ -378,12 +400,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
asb->s_ftsuffix = 0;
- if (parse_options(sb, data))
+ if (parse_options(sb, asb, data))
goto error;
sb_set_blocksize(sb, BLOCK_SIZE);
if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
- adfs_error(sb, "unable to read superblock");
+ adfs_msg(sb, KERN_ERR, "error: unable to read superblock");
ret = -EIO;
goto error;
}
@@ -391,11 +413,8 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
b_data = bh->b_data + (ADFS_DISCRECORD % BLOCK_SIZE);
if (adfs_checkbblk(b_data)) {
- if (!silent)
- printk("VFS: Can't find an adfs filesystem on dev "
- "%s.\n", sb->s_id);
ret = -EINVAL;
- goto error_free_bh;
+ goto error_badfs;
}
dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
@@ -404,33 +423,33 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
* Do some sanity checks on the ADFS disc record
*/
if (adfs_checkdiscrecord(dr)) {
- if (!silent)
- printk("VPS: Can't find an adfs filesystem on dev "
- "%s.\n", sb->s_id);
ret = -EINVAL;
- goto error_free_bh;
+ goto error_badfs;
}
+ blocksize = 1 << dr->log2secsize;
brelse(bh);
- if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+
+ if (sb_set_blocksize(sb, blocksize)) {
bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
if (!bh) {
- adfs_error(sb, "couldn't read superblock on "
- "2nd try.");
+ adfs_msg(sb, KERN_ERR,
+ "error: couldn't read superblock on 2nd try.");
ret = -EIO;
goto error;
}
b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
if (adfs_checkbblk(b_data)) {
- adfs_error(sb, "disc record mismatch, very weird!");
+ adfs_msg(sb, KERN_ERR,
+ "error: disc record mismatch, very weird!");
ret = -EINVAL;
goto error_free_bh;
}
dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
} else {
if (!silent)
- printk(KERN_ERR "VFS: Unsupported blocksize on dev "
- "%s.\n", sb->s_id);
+ adfs_msg(sb, KERN_ERR,
+ "error: unsupported blocksize");
ret = -EINVAL;
goto error;
}
@@ -443,8 +462,6 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_idlen = dr->idlen;
asb->s_map_size = dr->nzones | (dr->nzones_high << 8);
asb->s_map2blk = dr->log2bpmb - dr->log2secsize;
- asb->s_size = adfs_discsize(dr, sb->s_blocksize_bits);
- asb->s_version = dr->format_version;
asb->s_log2sharesize = dr->log2sharesize;
asb->s_map = adfs_read_map(sb, dr);
@@ -460,9 +477,9 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
*/
sb->s_op = &adfs_sops;
- dr = (struct adfs_discrecord *)(asb->s_map[0].dm_bh->b_data + 4);
+ dr = adfs_map_discrecord(asb->s_map);
- root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root);
+ root_obj.parent_id = root_obj.indaddr = le32_to_cpu(dr->root);
root_obj.name_len = 0;
/* Set root object date as 01 Jan 1987 00:00:00 */
root_obj.loadaddr = 0xfff0003f;
@@ -470,13 +487,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
root_obj.size = ADFS_NEWDIR_SIZE;
root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ |
ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ;
- root_obj.filetype = -1;
/*
* If this is a F+ disk with variable length directories,
* get the root_size from the disc record.
*/
- if (asb->s_version) {
+ if (dr->format_version) {
root_obj.size = le32_to_cpu(dr->root_size);
asb->s_dir = &adfs_fplus_dir_ops;
asb->s_namelen = ADFS_FPLUS_NAME_LEN;
@@ -505,6 +521,11 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
}
return 0;
+error_badfs:
+ if (!silent)
+ adfs_msg(sb, KERN_ERR,
+ "error: can't find an ADFS filesystem on dev %s.",
+ sb->s_id);
error_free_bh:
brelse(bh);
error:
diff --git a/fs/aio.c b/fs/aio.c
index 8327db0c8e08..01e0fb9ae45a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -42,6 +42,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <asm/kmap_types.h>
#include <linux/uaccess.h>
@@ -249,15 +250,12 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
return file;
}
-static struct dentry *aio_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int aio_init_fs_context(struct fs_context *fc)
{
- struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
- AIO_RING_MAGIC);
-
- if (!IS_ERR(root))
- root->d_sb->s_iflags |= SB_I_NOEXEC;
- return root;
+ if (!init_pseudo(fc, AIO_RING_MAGIC))
+ return -ENOMEM;
+ fc->s_iflags |= SB_I_NOEXEC;
+ return 0;
}
/* aio_setup
@@ -268,7 +266,7 @@ static int __init aio_setup(void)
{
static struct file_system_type aio_fs = {
.name = "aio",
- .mount = aio_mount,
+ .init_fs_context = aio_init_fs_context,
.kill_sb = kill_anon_super,
};
aio_mnt = kern_mount(&aio_fs);
@@ -425,7 +423,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
BUG_ON(PageWriteback(old));
get_page(new);
- rc = migrate_page_move_mapping(mapping, new, old, mode, 1);
+ rc = migrate_page_move_mapping(mapping, new, old, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
put_page(new);
goto out_unlock;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index c2b8663f5b00..89714308c25b 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/magic.h>
#include <linux/anon_inodes.h>
+#include <linux/pseudo_fs.h>
#include <linux/uaccess.h>
@@ -39,16 +40,18 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
.d_dname = anon_inodefs_dname,
};
-static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int anon_inodefs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "anon_inode:", NULL,
- &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &anon_inodefs_dentry_operations;
+ return 0;
}
static struct file_system_type anon_inode_fs_type = {
.name = "anon_inodefs",
- .mount = anon_inodefs_mount,
+ .init_fs_context = anon_inodefs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index b8e145552ec7..cdb45829354d 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -23,6 +23,7 @@
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
@@ -821,7 +822,7 @@ static const struct super_operations s_ops = {
.evict_inode = bm_evict_inode,
};
-static int bm_fill_super(struct super_block *sb, void *data, int silent)
+static int bm_fill_super(struct super_block *sb, struct fs_context *fc)
{
int err;
static const struct tree_descr bm_files[] = {
@@ -836,10 +837,19 @@ static int bm_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct dentry *bm_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bm_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, bm_fill_super);
+ return get_tree_single(fc, bm_fill_super);
+}
+
+static const struct fs_context_operations bm_context_ops = {
+ .get_tree = bm_get_tree,
+};
+
+static int bm_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &bm_context_ops;
+ return 0;
}
static struct linux_binfmt misc_format = {
@@ -850,7 +860,7 @@ static struct linux_binfmt misc_format = {
static struct file_system_type bm_fs_type = {
.owner = THIS_MODULE,
.name = "binfmt_misc",
- .mount = bm_mount,
+ .init_fs_context = bm_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("binfmt_misc");
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f00b569a9f89..4707dfff991b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -26,6 +26,7 @@
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
#include <linux/log2.h>
@@ -821,19 +822,19 @@ static const struct super_operations bdev_sops = {
.evict_inode = bdev_evict_inode,
};
-static struct dentry *bd_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bd_init_fs_context(struct fs_context *fc)
{
- struct dentry *dent;
- dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
- if (!IS_ERR(dent))
- dent->d_sb->s_iflags |= SB_I_CGROUPWB;
- return dent;
+ struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ fc->s_iflags |= SB_I_CGROUPWB;
+ ctx->ops = &bdev_sops;
+ return 0;
}
static struct file_system_type bd_type = {
.name = "bdev",
- .mount = bd_mount,
+ .init_fs_context = bd_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 9238fd4f1734..1e3ba4949399 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -5,6 +5,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/magic.h>
#include "btrfs-tests.h"
#include "../ctree.h"
@@ -32,17 +33,19 @@ static const struct super_operations btrfs_test_super_ops = {
.destroy_inode = btrfs_test_destroy_inode,
};
-static struct dentry *btrfs_test_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+
+static int btrfs_test_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "btrfs_test:", &btrfs_test_super_ops,
- NULL, BTRFS_TEST_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &btrfs_test_super_ops;
+ return 0;
}
static struct file_system_type test_type = {
.name = "btrfs_test_fs",
- .mount = btrfs_test_mount,
+ .init_fs_context = btrfs_test_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index aab29f48c62d..4ca0b8ff9a72 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1267,7 +1267,7 @@ __dentry_leases_walk(struct ceph_mds_client *mdsc,
if (!spin_trylock(&dentry->d_lock))
continue;
- if (dentry->d_lockref.count < 0) {
+ if (__lockref_is_dead(&dentry->d_lockref)) {
list_del_init(&di->lease_list);
goto next;
}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 791304fdde9d..55438dd58189 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -52,7 +53,7 @@ static struct configfs_dirent configfs_root = {
.s_iattr = NULL,
};
-static int configfs_fill_super(struct super_block *sb, void *data, int silent)
+static int configfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dentry *root;
@@ -88,16 +89,25 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct dentry *configfs_do_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int configfs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, configfs_fill_super);
+ return get_tree_single(fc, configfs_fill_super);
+}
+
+static const struct fs_context_operations configfs_context_ops = {
+ .get_tree = configfs_get_tree,
+};
+
+static int configfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &configfs_context_ops;
+ return 0;
}
static struct file_system_type configfs_fs_type = {
.owner = THIS_MODULE,
.name = "configfs",
- .mount = configfs_do_mount,
+ .init_fs_context = configfs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("configfs");
diff --git a/fs/d_path.c b/fs/d_path.c
index e8fce6b1174f..a7d0a96b35ce 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -316,7 +316,6 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
end = ERR_PTR(-ENAMETOOLONG);
return end;
}
-EXPORT_SYMBOL(simple_dname);
/*
* Write full pathname from the root of the filesystem into the buffer.
diff --git a/fs/dax.c b/fs/dax.c
index e99e5f373c88..a237141d8787 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -26,7 +26,6 @@
#include <linux/mmu_notifier.h>
#include <linux/iomap.h>
#include <asm/pgalloc.h>
-#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>
diff --git a/fs/dcache.c b/fs/dcache.c
index f41121e5d1ec..e88cf0554e65 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -861,6 +861,32 @@ void dput(struct dentry *dentry)
}
EXPORT_SYMBOL(dput);
+static void __dput_to_list(struct dentry *dentry, struct list_head *list)
+__must_hold(&dentry->d_lock)
+{
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+ /* let the owner of the list it's on deal with it */
+ --dentry->d_lockref.count;
+ } else {
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ d_lru_del(dentry);
+ if (!--dentry->d_lockref.count)
+ d_shrink_add(dentry, list);
+ }
+}
+
+void dput_to_list(struct dentry *dentry, struct list_head *list)
+{
+ rcu_read_lock();
+ if (likely(fast_dput(dentry))) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+ if (!retain_dentry(dentry))
+ __dput_to_list(dentry, list);
+ spin_unlock(&dentry->d_lock);
+}
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
@@ -1067,7 +1093,7 @@ out:
return false;
}
-static void shrink_dentry_list(struct list_head *list)
+void shrink_dentry_list(struct list_head *list)
{
while (!list_empty(list)) {
struct dentry *dentry, *parent;
@@ -1089,18 +1115,9 @@ static void shrink_dentry_list(struct list_head *list)
rcu_read_unlock();
d_shrink_del(dentry);
parent = dentry->d_parent;
+ if (parent != dentry)
+ __dput_to_list(parent, list);
__dentry_kill(dentry);
- if (parent == dentry)
- continue;
- /*
- * We need to prune ancestors too. This is necessary to prevent
- * quadratic behavior of shrink_dcache_parent(), but is also
- * expected to be beneficial in reducing dentry cache
- * fragmentation.
- */
- dentry = parent;
- while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
- dentry = dentry_kill(dentry);
}
}
@@ -1445,8 +1462,11 @@ out:
struct select_data {
struct dentry *start;
+ union {
+ long found;
+ struct dentry *victim;
+ };
struct list_head dispose;
- int found;
};
static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
@@ -1478,6 +1498,37 @@ out:
return ret;
}
+static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
+{
+ struct select_data *data = _data;
+ enum d_walk_ret ret = D_WALK_CONTINUE;
+
+ if (data->start == dentry)
+ goto out;
+
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+ if (!dentry->d_lockref.count) {
+ rcu_read_lock();
+ data->victim = dentry;
+ return D_WALK_QUIT;
+ }
+ } else {
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ d_lru_del(dentry);
+ if (!dentry->d_lockref.count)
+ d_shrink_add(dentry, &data->dispose);
+ }
+ /*
+ * We can return to the caller if we have found some (this
+ * ensures forward progress). We'll be coming back to find
+ * the rest.
+ */
+ if (!list_empty(&data->dispose))
+ ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
+out:
+ return ret;
+}
+
/**
* shrink_dcache_parent - prune dcache
* @parent: parent of entries to prune
@@ -1487,12 +1538,9 @@ out:
void shrink_dcache_parent(struct dentry *parent)
{
for (;;) {
- struct select_data data;
+ struct select_data data = {.start = parent};
INIT_LIST_HEAD(&data.dispose);
- data.start = parent;
- data.found = 0;
-
d_walk(parent, &data, select_collect);
if (!list_empty(&data.dispose)) {
@@ -1503,6 +1551,24 @@ void shrink_dcache_parent(struct dentry *parent)
cond_resched();
if (!data.found)
break;
+ data.victim = NULL;
+ d_walk(parent, &data, select_collect2);
+ if (data.victim) {
+ struct dentry *parent;
+ spin_lock(&data.victim->d_lock);
+ if (!shrink_lock_dentry(data.victim)) {
+ spin_unlock(&data.victim->d_lock);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ parent = data.victim->d_parent;
+ if (parent != data.victim)
+ __dput_to_list(parent, &data.dispose);
+ __dentry_kill(data.victim);
+ }
+ }
+ if (!list_empty(&data.dispose))
+ shrink_dentry_list(&data.dispose);
}
}
EXPORT_SYMBOL(shrink_dcache_parent);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 5bc3c4a4c563..fa4f6447ddad 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -7,6 +7,7 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/ucs2_string.h>
@@ -28,8 +29,6 @@ static const struct super_operations efivarfs_ops = {
.evict_inode = efivarfs_evict_inode,
};
-static struct super_block *efivarfs_sb;
-
/*
* Compare two efivarfs file names.
*
@@ -188,14 +187,12 @@ static int efivarfs_destroy(struct efivar_entry *entry, void *data)
return 0;
}
-static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode = NULL;
struct dentry *root;
int err;
- efivarfs_sb = sb;
-
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -223,16 +220,24 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int efivarfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, efivarfs_fill_super);
+}
+
+static const struct fs_context_operations efivarfs_context_ops = {
+ .get_tree = efivarfs_get_tree,
+};
+
+static int efivarfs_init_fs_context(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, efivarfs_fill_super);
+ fc->ops = &efivarfs_context_ops;
+ return 0;
}
static void efivarfs_kill_sb(struct super_block *sb)
{
kill_litter_super(sb);
- efivarfs_sb = NULL;
/* Remove all entries and destroy */
__efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL);
@@ -241,7 +246,7 @@ static void efivarfs_kill_sb(struct super_block *sb)
static struct file_system_type efivarfs_type = {
.owner = THIS_MODULE,
.name = "efivarfs",
- .mount = efivarfs_mount,
+ .init_fs_context = efivarfs_init_fs_context,
.kill_sb = efivarfs_kill_sb,
};
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 0f9c073d78d5..d7f1f5011fac 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -291,7 +291,7 @@ static LIST_HEAD(tfile_check_list);
#include <linux/sysctl.h>
-static long zero;
+static long long_zero;
static long long_max = LONG_MAX;
struct ctl_table epoll_table[] = {
@@ -301,7 +301,7 @@ struct ctl_table epoll_table[] = {
.maxlen = sizeof(max_user_watches),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
- .extra1 = &zero,
+ .extra1 = &long_zero,
.extra2 = &long_max,
},
{ }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 4eb2f3920140..abbf14e9bd72 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2919,7 +2919,7 @@ int f2fs_migrate_page(struct address_space *mapping,
/* one extra reference was held for atomic_write page */
extra_count = atomic_written ? 1 : 0;
rc = migrate_page_move_mapping(mapping, newpage,
- page, mode, extra_count);
+ page, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
if (atomic_written)
mutex_unlock(&fi->inmem_lock);
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 0d388faa25d1..460ea4206fa2 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -264,6 +264,7 @@ int fs_lookup_param(struct fs_context *fc,
return invalf(fc, "%s: not usable as path", param->key);
}
+ f->refcnt++; /* filename_lookup() drops our ref. */
ret = filename_lookup(param->dirfd, f, flags, _path, NULL);
if (ret < 0) {
errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name);
diff --git a/fs/fs_pin.c b/fs/fs_pin.c
index a6497cf8ae53..47ef3c71ce90 100644
--- a/fs/fs_pin.c
+++ b/fs/fs_pin.c
@@ -19,20 +19,14 @@ void pin_remove(struct fs_pin *pin)
spin_unlock_irq(&pin->wait.lock);
}
-void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p)
+void pin_insert(struct fs_pin *pin, struct vfsmount *m)
{
spin_lock(&pin_lock);
- if (p)
- hlist_add_head(&pin->s_list, p);
+ hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins);
hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins);
spin_unlock(&pin_lock);
}
-void pin_insert(struct fs_pin *pin, struct vfsmount *m)
-{
- pin_insert_group(pin, m, &m->mnt_sb->s_pins);
-}
-
void pin_kill(struct fs_pin *p)
{
wait_queue_entry_t wait;
diff --git a/fs/fsopen.c b/fs/fsopen.c
index a8bf83ce8d4e..043ffa8dc263 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -226,6 +226,8 @@ static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
case FSCONFIG_CMD_CREATE:
if (fc->phase != FS_CONTEXT_CREATE_PARAMS)
return -EBUSY;
+ if (!mount_capable(fc))
+ return -EPERM;
fc->phase = FS_CONTEXT_CREATING;
ret = vfs_get_tree(fc);
if (ret)
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 14ce1e47f980..c23f6f243ad4 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -346,7 +346,7 @@ static int fuse_ctl_fill_super(struct super_block *sb, struct fs_context *fctx)
static int fuse_ctl_get_tree(struct fs_context *fc)
{
- return vfs_get_super(fc, vfs_get_single_super, fuse_ctl_fill_super);
+ return get_tree_single(fc, fuse_ctl_fill_super);
}
static const struct fs_context_operations fuse_ctl_context_ops = {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1dcc57189382..a478df035651 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1299,7 +1299,7 @@ static int hugetlbfs_get_tree(struct fs_context *fc)
int err = hugetlbfs_validate(fc);
if (err)
return err;
- return vfs_get_super(fc, vfs_get_independent_super, hugetlbfs_fill_super);
+ return get_tree_nodev(fc, hugetlbfs_fill_super);
}
static void hugetlbfs_fs_context_free(struct fs_context *fc)
diff --git a/fs/internal.h b/fs/internal.h
index 2f3c3de51fad..315fcd8d237c 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -14,6 +14,7 @@ struct path;
struct mount;
struct shrink_control;
struct fs_context;
+struct user_namespace;
/*
* block_dev.c
@@ -107,6 +108,7 @@ extern struct file *alloc_empty_file_noaccount(int, const struct cred *);
extern int reconfigure_super(struct fs_context *);
extern bool trylock_super(struct super_block *sb);
extern struct super_block *user_get_super(dev_t);
+extern bool mount_capable(struct fs_context *);
/*
* open.c
@@ -154,6 +156,9 @@ extern int d_set_mounted(struct dentry *dentry);
extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
extern struct dentry *d_alloc_cursor(struct dentry *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern char *simple_dname(struct dentry *, char *, int);
+extern void dput_to_list(struct dentry *, struct list_head *);
+extern void shrink_dentry_list(struct list_head *);
/*
* read_write.c
@@ -182,15 +187,5 @@ extern const struct dentry_operations ns_dentry_operations;
extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
unsigned long arg);
-/*
- * iomap support:
- */
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap);
-
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, const struct iomap_ops *ops, void *data,
- iomap_actor_t actor);
-
/* direct-io.c: */
int sb_init_dio_done_wq(struct super_block *sb);
diff --git a/fs/iomap.c b/fs/iomap.c
deleted file mode 100644
index 217c3e5a13d6..000000000000
--- a/fs/iomap.c
+++ /dev/null
@@ -1,2205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
- */
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
-#include <linux/iomap.h>
-#include <linux/uaccess.h>
-#include <linux/gfp.h>
-#include <linux/migrate.h>
-#include <linux/mm.h>
-#include <linux/mm_inline.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/file.h>
-#include <linux/uio.h>
-#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/dax.h>
-#include <linux/sched/signal.h>
-
-#include "internal.h"
-
-/*
- * Execute a iomap write on a segment of the mapping that spans a
- * contiguous range of pages that have identical block mapping state.
- *
- * This avoids the need to map pages individually, do individual allocations
- * for each page and most importantly avoid the need for filesystem specific
- * locking per page. Instead, all the operations are amortised over the entire
- * range of pages. It is assumed that the filesystems will lock whatever
- * resources they require in the iomap_begin call, and release them in the
- * iomap_end call.
- */
-loff_t
-iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
- const struct iomap_ops *ops, void *data, iomap_actor_t actor)
-{
- struct iomap iomap = { 0 };
- loff_t written = 0, ret;
-
- /*
- * Need to map a range from start position for length bytes. This can
- * span multiple pages - it is only guaranteed to return a range of a
- * single type of pages (e.g. all into a hole, all mapped or all
- * unwritten). Failure at this point has nothing to undo.
- *
- * If allocation is required for this range, reserve the space now so
- * that the allocation is guaranteed to succeed later on. Once we copy
- * the data into the page cache pages, then we cannot fail otherwise we
- * expose transient stale data. If the reserve fails, we can safely
- * back out at this point as there is nothing to undo.
- */
- ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
- if (ret)
- return ret;
- if (WARN_ON(iomap.offset > pos))
- return -EIO;
- if (WARN_ON(iomap.length == 0))
- return -EIO;
-
- /*
- * Cut down the length to the one actually provided by the filesystem,
- * as it might not be able to give us the whole size that we requested.
- */
- if (iomap.offset + iomap.length < pos + length)
- length = iomap.offset + iomap.length - pos;
-
- /*
- * Now that we have guaranteed that the space allocation will succeed.
- * we can do the copy-in page by page without having to worry about
- * failures exposing transient data.
- */
- written = actor(inode, pos, length, data, &iomap);
-
- /*
- * Now the data has been copied, commit the range we've copied. This
- * should not fail unless the filesystem has had a fatal error.
- */
- if (ops->iomap_end) {
- ret = ops->iomap_end(inode, pos, length,
- written > 0 ? written : 0,
- flags, &iomap);
- }
-
- return written ? written : ret;
-}
-
-static sector_t
-iomap_sector(struct iomap *iomap, loff_t pos)
-{
- return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
-}
-
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct page *page)
-{
- struct iomap_page *iop = to_iomap_page(page);
-
- if (iop || i_blocksize(inode) == PAGE_SIZE)
- return iop;
-
- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
- atomic_set(&iop->read_count, 0);
- atomic_set(&iop->write_count, 0);
- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
-
- /*
- * migrate_page_move_mapping() assumes that pages with private data have
- * their count elevated by 1.
- */
- get_page(page);
- set_page_private(page, (unsigned long)iop);
- SetPagePrivate(page);
- return iop;
-}
-
-static void
-iomap_page_release(struct page *page)
-{
- struct iomap_page *iop = to_iomap_page(page);
-
- if (!iop)
- return;
- WARN_ON_ONCE(atomic_read(&iop->read_count));
- WARN_ON_ONCE(atomic_read(&iop->write_count));
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(iop);
-}
-
-/*
- * Calculate the range inside the page that we actually need to read.
- */
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
- loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
-{
- loff_t orig_pos = *pos;
- loff_t isize = i_size_read(inode);
- unsigned block_bits = inode->i_blkbits;
- unsigned block_size = (1 << block_bits);
- unsigned poff = offset_in_page(*pos);
- unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
- unsigned first = poff >> block_bits;
- unsigned last = (poff + plen - 1) >> block_bits;
-
- /*
- * If the block size is smaller than the page size we need to check the
- * per-block uptodate status and adjust the offset and length if needed
- * to avoid reading in already uptodate ranges.
- */
- if (iop) {
- unsigned int i;
-
- /* move forward for each leading block marked uptodate */
- for (i = first; i <= last; i++) {
- if (!test_bit(i, iop->uptodate))
- break;
- *pos += block_size;
- poff += block_size;
- plen -= block_size;
- first++;
- }
-
- /* truncate len if we find any trailing uptodate block(s) */
- for ( ; i <= last; i++) {
- if (test_bit(i, iop->uptodate)) {
- plen -= (last - i + 1) * block_size;
- last = i - 1;
- break;
- }
- }
- }
-
- /*
- * If the extent spans the block that contains the i_size we need to
- * handle both halves separately so that we properly zero data in the
- * page cache for blocks that are entirely outside of i_size.
- */
- if (orig_pos <= isize && orig_pos + length > isize) {
- unsigned end = offset_in_page(isize - 1) >> block_bits;
-
- if (first <= end && last > end)
- plen -= (last - end) * block_size;
- }
-
- *offp = poff;
- *lenp = plen;
-}
-
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
-{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned first = off >> inode->i_blkbits;
- unsigned last = (off + len - 1) >> inode->i_blkbits;
- unsigned int i;
- bool uptodate = true;
-
- if (iop) {
- for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
- if (i >= first && i <= last)
- set_bit(i, iop->uptodate);
- else if (!test_bit(i, iop->uptodate))
- uptodate = false;
- }
- }
-
- if (uptodate && !PageError(page))
- SetPageUptodate(page);
-}
-
-static void
-iomap_read_finish(struct iomap_page *iop, struct page *page)
-{
- if (!iop || atomic_dec_and_test(&iop->read_count))
- unlock_page(page);
-}
-
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
-{
- struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
-
- if (unlikely(error)) {
- ClearPageUptodate(page);
- SetPageError(page);
- } else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
- }
-
- iomap_read_finish(iop, page);
-}
-
-static void
-iomap_read_end_io(struct bio *bio)
-{
- int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all)
- iomap_read_page_end_io(bvec, error);
- bio_put(bio);
-}
-
-struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
- bool is_readahead;
- struct bio *bio;
- struct list_head *pages;
-};
-
-static void
-iomap_read_inline_data(struct inode *inode, struct page *page,
- struct iomap *iomap)
-{
- size_t size = i_size_read(inode);
- void *addr;
-
- if (PageUptodate(page))
- return;
-
- BUG_ON(page->index);
- BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
-
- addr = kmap_atomic(page);
- memcpy(addr, iomap->inline_data, size);
- memset(addr + size, 0, PAGE_SIZE - size);
- kunmap_atomic(addr);
- SetPageUptodate(page);
-}
-
-static loff_t
-iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
-{
- struct iomap_readpage_ctx *ctx = data;
- struct page *page = ctx->cur_page;
- struct iomap_page *iop = iomap_page_create(inode, page);
- bool same_page = false, is_contig = false;
- loff_t orig_pos = pos;
- unsigned poff, plen;
- sector_t sector;
-
- if (iomap->type == IOMAP_INLINE) {
- WARN_ON_ONCE(pos);
- iomap_read_inline_data(inode, page, iomap);
- return PAGE_SIZE;
- }
-
- /* zero post-eof blocks as the page may be mapped */
- iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
- if (plen == 0)
- goto done;
-
- if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
- zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
- goto done;
- }
-
- ctx->cur_page_in_bio = true;
-
- /*
- * Try to merge into a previous segment if we can.
- */
- sector = iomap_sector(iomap, pos);
- if (ctx->bio && bio_end_sector(ctx->bio) == sector)
- is_contig = true;
-
- if (is_contig &&
- __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
- if (!same_page && iop)
- atomic_inc(&iop->read_count);
- goto done;
- }
-
- /*
- * If we start a new segment we need to increase the read count, and we
- * need to do so before submitting any previous full bio to make sure
- * that we don't prematurely unlock the page.
- */
- if (iop)
- atomic_inc(&iop->read_count);
-
- if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
- int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- if (ctx->bio)
- submit_bio(ctx->bio);
-
- if (ctx->is_readahead) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
- ctx->bio->bi_opf = REQ_OP_READ;
- if (ctx->is_readahead)
- ctx->bio->bi_opf |= REQ_RAHEAD;
- ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
- ctx->bio->bi_end_io = iomap_read_end_io;
- }
-
- bio_add_page(ctx->bio, page, plen, poff);
-done:
- /*
- * Move the caller beyond our range so that it keeps making progress.
- * For that we have to include any leading non-uptodate ranges, but
- * we can skip trailing ones as they will be handled in the next
- * iteration.
- */
- return pos - orig_pos + plen;
-}
-
-int
-iomap_readpage(struct page *page, const struct iomap_ops *ops)
-{
- struct iomap_readpage_ctx ctx = { .cur_page = page };
- struct inode *inode = page->mapping->host;
- unsigned poff;
- loff_t ret;
-
- for (poff = 0; poff < PAGE_SIZE; poff += ret) {
- ret = iomap_apply(inode, page_offset(page) + poff,
- PAGE_SIZE - poff, 0, ops, &ctx,
- iomap_readpage_actor);
- if (ret <= 0) {
- WARN_ON_ONCE(ret == 0);
- SetPageError(page);
- break;
- }
- }
-
- if (ctx.bio) {
- submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_page_in_bio);
- } else {
- WARN_ON_ONCE(ctx.cur_page_in_bio);
- unlock_page(page);
- }
-
- /*
- * Just like mpage_readpages and block_read_full_page we always
- * return 0 and just mark the page as PageError on errors. This
- * should be cleaned up all through the stack eventually.
- */
- return 0;
-}
-EXPORT_SYMBOL_GPL(iomap_readpage);
-
-static struct page *
-iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
- loff_t length, loff_t *done)
-{
- while (!list_empty(pages)) {
- struct page *page = lru_to_page(pages);
-
- if (page_offset(page) >= (u64)pos + length)
- break;
-
- list_del(&page->lru);
- if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
- GFP_NOFS))
- return page;
-
- /*
- * If we already have a page in the page cache at index we are
- * done. Upper layers don't care if it is uptodate after the
- * readpages call itself as every page gets checked again once
- * actually needed.
- */
- *done += PAGE_SIZE;
- put_page(page);
- }
-
- return NULL;
-}
-
-static loff_t
-iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
-{
- struct iomap_readpage_ctx *ctx = data;
- loff_t done, ret;
-
- for (done = 0; done < length; done += ret) {
- if (ctx->cur_page && offset_in_page(pos + done) == 0) {
- if (!ctx->cur_page_in_bio)
- unlock_page(ctx->cur_page);
- put_page(ctx->cur_page);
- ctx->cur_page = NULL;
- }
- if (!ctx->cur_page) {
- ctx->cur_page = iomap_next_page(inode, ctx->pages,
- pos, length, &done);
- if (!ctx->cur_page)
- break;
- ctx->cur_page_in_bio = false;
- }
- ret = iomap_readpage_actor(inode, pos + done, length - done,
- ctx, iomap);
- }
-
- return done;
-}
-
-int
-iomap_readpages(struct address_space *mapping, struct list_head *pages,
- unsigned nr_pages, const struct iomap_ops *ops)
-{
- struct iomap_readpage_ctx ctx = {
- .pages = pages,
- .is_readahead = true,
- };
- loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
- loff_t last = page_offset(list_entry(pages->next, struct page, lru));
- loff_t length = last - pos + PAGE_SIZE, ret = 0;
-
- while (length > 0) {
- ret = iomap_apply(mapping->host, pos, length, 0, ops,
- &ctx, iomap_readpages_actor);
- if (ret <= 0) {
- WARN_ON_ONCE(ret == 0);
- goto done;
- }
- pos += ret;
- length -= ret;
- }
- ret = 0;
-done:
- if (ctx.bio)
- submit_bio(ctx.bio);
- if (ctx.cur_page) {
- if (!ctx.cur_page_in_bio)
- unlock_page(ctx.cur_page);
- put_page(ctx.cur_page);
- }
-
- /*
- * Check that we didn't lose a page due to the arcance calling
- * conventions..
- */
- WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
- return ret;
-}
-EXPORT_SYMBOL_GPL(iomap_readpages);
-
-/*
- * iomap_is_partially_uptodate checks whether blocks within a page are
- * uptodate or not.
- *
- * Returns true if all blocks which correspond to a file portion
- * we want to read within the page are uptodate.
- */
-int
-iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count)
-{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned len, first, last;
- unsigned i;
-
- /* Limit range to one page */
- len = min_t(unsigned, PAGE_SIZE - from, count);
-
- /* First and last blocks in range within page */
- first = from >> inode->i_blkbits;
- last = (from + len - 1) >> inode->i_blkbits;
-
- if (iop) {
- for (i = first; i <= last; i++)
- if (!test_bit(i, iop->uptodate))
- return 0;
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
-
-int
-iomap_releasepage(struct page *page, gfp_t gfp_mask)
-{
- /*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(), skip those here.
- */
- if (PageDirty(page) || PageWriteback(page))
- return 0;
- iomap_page_release(page);
- return 1;
-}
-EXPORT_SYMBOL_GPL(iomap_releasepage);
-
-void
-iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
-{
- /*
- * If we are invalidating the entire page, clear the dirty state from it
- * and release it to avoid unnecessary buildup of the LRU.
- */
- if (offset == 0 && len == PAGE_SIZE) {
- WARN_ON_ONCE(PageWriteback(page));
- cancel_dirty_page(page);
- iomap_page_release(page);
- }
-}
-EXPORT_SYMBOL_GPL(iomap_invalidatepage);
-
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- int ret;
-
- ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (page_has_private(page)) {
- ClearPagePrivate(page);
- get_page(newpage);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- SetPagePrivate(newpage);
- }
-
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
-
-static void
-iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
-{
- loff_t i_size = i_size_read(inode);
-
- /*
- * Only truncate newly allocated pages beyoned EOF, even if the
- * write started inside the existing inode size.
- */
- if (pos + len > i_size)
- truncate_pagecache_range(inode, max(pos, i_size), pos + len);
-}
-
-static int
-iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
- unsigned poff, unsigned plen, unsigned from, unsigned to,
- struct iomap *iomap)
-{
- struct bio_vec bvec;
- struct bio bio;
-
- if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
- zero_user_segments(page, poff, from, to, poff + plen);
- iomap_set_range_uptodate(page, poff, plen);
- return 0;
- }
-
- bio_init(&bio, &bvec, 1);
- bio.bi_opf = REQ_OP_READ;
- bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_set_dev(&bio, iomap->bdev);
- __bio_add_page(&bio, page, plen, poff);
- return submit_bio_wait(&bio);
-}
-
-static int
-__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
- struct page *page, struct iomap *iomap)
-{
- struct iomap_page *iop = iomap_page_create(inode, page);
- loff_t block_size = i_blocksize(inode);
- loff_t block_start = pos & ~(block_size - 1);
- loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
- unsigned from = offset_in_page(pos), to = from + len, poff, plen;
- int status = 0;
-
- if (PageUptodate(page))
- return 0;
-
- do {
- iomap_adjust_read_range(inode, iop, &block_start,
- block_end - block_start, &poff, &plen);
- if (plen == 0)
- break;
-
- if ((from > poff && from < poff + plen) ||
- (to > poff && to < poff + plen)) {
- status = iomap_read_page_sync(inode, block_start, page,
- poff, plen, from, to, iomap);
- if (status)
- break;
- }
-
- } while ((block_start += plen) < block_end);
-
- return status;
-}
-
-static int
-iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, struct iomap *iomap)
-{
- const struct iomap_page_ops *page_ops = iomap->page_ops;
- pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
- int status = 0;
-
- BUG_ON(pos + len > iomap->offset + iomap->length);
-
- if (fatal_signal_pending(current))
- return -EINTR;
-
- if (page_ops && page_ops->page_prepare) {
- status = page_ops->page_prepare(inode, pos, len, iomap);
- if (status)
- return status;
- }
-
- page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
- if (!page) {
- status = -ENOMEM;
- goto out_no_page;
- }
-
- if (iomap->type == IOMAP_INLINE)
- iomap_read_inline_data(inode, page, iomap);
- else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, iomap);
- else
- status = __iomap_write_begin(inode, pos, len, page, iomap);
-
- if (unlikely(status))
- goto out_unlock;
-
- *pagep = page;
- return 0;
-
-out_unlock:
- unlock_page(page);
- put_page(page);
- iomap_write_failed(inode, pos, len);
-
-out_no_page:
- if (page_ops && page_ops->page_done)
- page_ops->page_done(inode, pos, 0, NULL, iomap);
- return status;
-}
-
-int
-iomap_set_page_dirty(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
- int newly_dirty;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
-
- /*
- * Lock out page->mem_cgroup migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- if (newly_dirty)
- __set_page_dirty(page, mapping, 0);
- unlock_page_memcg(page);
-
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
-}
-EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
-
-static int
-__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
-{
- flush_dcache_page(page);
-
- /*
- * The blocks that were entirely written will now be uptodate, so we
- * don't have to worry about a readpage reading them and overwriting a
- * partial write. However if we have encountered a short write and only
- * partially written into a block, it will not be marked uptodate, so a
- * readpage might come in and destroy our partial write.
- *
- * Do the simplest thing, and just treat any short write to a non
- * uptodate page as a zero-length write, and force the caller to redo
- * the whole thing.
- */
- if (unlikely(copied < len && !PageUptodate(page)))
- return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
- iomap_set_page_dirty(page);
- return copied;
-}
-
-static int
-iomap_write_end_inline(struct inode *inode, struct page *page,
- struct iomap *iomap, loff_t pos, unsigned copied)
-{
- void *addr;
-
- WARN_ON_ONCE(!PageUptodate(page));
- BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
-
- addr = kmap_atomic(page);
- memcpy(iomap->inline_data + pos, addr + pos, copied);
- kunmap_atomic(addr);
-
- mark_inode_dirty(inode);
- return copied;
-}
-
-static int
-iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
-{
- const struct iomap_page_ops *page_ops = iomap->page_ops;
- loff_t old_size = inode->i_size;
- int ret;
-
- if (iomap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
- } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
- page, NULL);
- } else {
- ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
- }
-
- /*
- * Update the in-memory inode size after copying the data into the page
- * cache. It's up to the file system to write the updated size to disk,
- * preferably after I/O completion so that no stale data is exposed.
- */
- if (pos + ret > old_size) {
- i_size_write(inode, pos + ret);
- iomap->flags |= IOMAP_F_SIZE_CHANGED;
- }
- unlock_page(page);
-
- if (old_size < pos)
- pagecache_isize_extended(inode, old_size, pos);
- if (page_ops && page_ops->page_done)
- page_ops->page_done(inode, pos, ret, page, iomap);
- put_page(page);
-
- if (ret < len)
- iomap_write_failed(inode, pos, len);
- return ret;
-}
-
-static loff_t
-iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
-{
- struct iov_iter *i = data;
- long status = 0;
- ssize_t written = 0;
- unsigned int flags = AOP_FLAG_NOFS;
-
- do {
- struct page *page;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
- size_t copied; /* Bytes copied from user */
-
- offset = offset_in_page(pos);
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_count(i));
-again:
- if (bytes > length)
- bytes = length;
-
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
- */
- if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
- status = -EFAULT;
- break;
- }
-
- status = iomap_write_begin(inode, pos, bytes, flags, &page,
- iomap);
- if (unlikely(status))
- break;
-
- if (mapping_writably_mapped(inode->i_mapping))
- flush_dcache_page(page);
-
- copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-
- flush_dcache_page(page);
-
- status = iomap_write_end(inode, pos, bytes, copied, page,
- iomap);
- if (unlikely(status < 0))
- break;
- copied = status;
-
- cond_resched();
-
- iov_iter_advance(i, copied);
- if (unlikely(copied == 0)) {
- /*
- * If we were unable to copy any data at all, we must
- * fall back to a single segment length write.
- *
- * If we didn't fallback here, we could livelock
- * because not all segments in the iov can be copied at
- * once without a pagefault.
- */
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_single_seg_count(i));
- goto again;
- }
- pos += copied;
- written += copied;
- length -= copied;
-
- balance_dirty_pages_ratelimited(inode->i_mapping);
- } while (iov_iter_count(i) && length);
-
- return written ? written : status;
-}
-
-ssize_t
-iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops)
-{
- struct inode *inode = iocb->ki_filp->f_mapping->host;
- loff_t pos = iocb->ki_pos, ret = 0, written = 0;
-
- while (iov_iter_count(iter)) {
- ret = iomap_apply(inode, pos, iov_iter_count(iter),
- IOMAP_WRITE, ops, iter, iomap_write_actor);
- if (ret <= 0)
- break;
- pos += ret;
- written += ret;
- }
-
- return written ? written : ret;
-}
-EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-
-static struct page *
-__iomap_read_page(struct inode *inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- struct page *page;
-
- page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
- if (IS_ERR(page))
- return page;
- if (!PageUptodate(page)) {
- put_page(page);
- return ERR_PTR(-EIO);
- }
- return page;
-}
-
-static loff_t
-iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
-{
- long status = 0;
- ssize_t written = 0;
-
- do {
- struct page *page, *rpage;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
-
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, length);
-
- rpage = __iomap_read_page(inode, pos);
- if (IS_ERR(rpage))
- return PTR_ERR(rpage);
-
- status = iomap_write_begin(inode, pos, bytes,
- AOP_FLAG_NOFS, &page, iomap);
- put_page(rpage);
- if (unlikely(status))
- return status;
-
- WARN_ON_ONCE(!PageUptodate(page));
-
- status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
- if (unlikely(status <= 0)) {
- if (WARN_ON_ONCE(status == 0))
- return -EIO;
- return status;
- }
-
- cond_resched();
-
- pos += status;
- written += status;
- length -= status;
-
- balance_dirty_pages_ratelimited(inode->i_mapping);
- } while (length);
-
- return written;
-}
-
-int
-iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops)
-{
- loff_t ret;
-
- while (len) {
- ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
- iomap_dirty_actor);
- if (ret <= 0)
- return ret;
- pos += ret;
- len -= ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iomap_file_dirty);
-
-static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
- unsigned bytes, struct iomap *iomap)
-{
- struct page *page;
- int status;
-
- status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
- iomap);
- if (status)
- return status;
-
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
-
- return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
-}
-
-static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
- struct iomap *iomap)
-{
- return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
- iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
-}
-
-static loff_t
-iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
- void *data, struct iomap *iomap)
-{
- bool *did_zero = data;
- loff_t written = 0;
- int status;
-
- /* already zeroed? we're done. */
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
- return count;
-
- do {
- unsigned offset, bytes;
-
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, count);
-
- if (IS_DAX(inode))
- status = iomap_dax_zero(pos, offset, bytes, iomap);
- else
- status = iomap_zero(inode, pos, offset, bytes, iomap);
- if (status < 0)
- return status;
-
- pos += bytes;
- count -= bytes;
- written += bytes;
- if (did_zero)
- *did_zero = true;
- } while (count > 0);
-
- return written;
-}
-
-int
-iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops)
-{
- loff_t ret;
-
- while (len > 0) {
- ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
- ops, did_zero, iomap_zero_range_actor);
- if (ret <= 0)
- return ret;
-
- pos += ret;
- len -= ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iomap_zero_range);
-
-int
-iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops)
-{
- unsigned int blocksize = i_blocksize(inode);
- unsigned int off = pos & (blocksize - 1);
-
- /* Block boundary? Nothing to do */
- if (!off)
- return 0;
- return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
-}
-EXPORT_SYMBOL_GPL(iomap_truncate_page);
-
-static loff_t
-iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
-{
- struct page *page = data;
- int ret;
-
- if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, pos, length, NULL, iomap);
- if (ret)
- return ret;
- block_commit_write(page, 0, length);
- } else {
- WARN_ON_ONCE(!PageUptodate(page));
- iomap_page_create(inode, page);
- set_page_dirty(page);
- }
-
- return length;
-}
-
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
-{
- struct page *page = vmf->page;
- struct inode *inode = file_inode(vmf->vma->vm_file);
- unsigned long length;
- loff_t offset, size;
- ssize_t ret;
-
- lock_page(page);
- size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
- (page_offset(page) > size)) {
- /* We overload EFAULT to mean page got truncated */
- ret = -EFAULT;
- goto out_unlock;
- }
-
- /* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_SHIFT) > size)
- length = offset_in_page(size);
- else
- length = PAGE_SIZE;
-
- offset = page_offset(page);
- while (length > 0) {
- ret = iomap_apply(inode, offset, length,
- IOMAP_WRITE | IOMAP_FAULT, ops, page,
- iomap_page_mkwrite_actor);
- if (unlikely(ret <= 0))
- goto out_unlock;
- offset += ret;
- length -= ret;
- }
-
- wait_for_stable_page(page);
- return VM_FAULT_LOCKED;
-out_unlock:
- unlock_page(page);
- return block_page_mkwrite_return(ret);
-}
-EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-
-struct fiemap_ctx {
- struct fiemap_extent_info *fi;
- struct iomap prev;
-};
-
-static int iomap_to_fiemap(struct fiemap_extent_info *fi,
- struct iomap *iomap, u32 flags)
-{
- switch (iomap->type) {
- case IOMAP_HOLE:
- /* skip holes */
- return 0;
- case IOMAP_DELALLOC:
- flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
- break;
- case IOMAP_MAPPED:
- break;
- case IOMAP_UNWRITTEN:
- flags |= FIEMAP_EXTENT_UNWRITTEN;
- break;
- case IOMAP_INLINE:
- flags |= FIEMAP_EXTENT_DATA_INLINE;
- break;
- }
-
- if (iomap->flags & IOMAP_F_MERGED)
- flags |= FIEMAP_EXTENT_MERGED;
- if (iomap->flags & IOMAP_F_SHARED)
- flags |= FIEMAP_EXTENT_SHARED;
-
- return fiemap_fill_next_extent(fi, iomap->offset,
- iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
- iomap->length, flags);
-}
-
-static loff_t
-iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
-{
- struct fiemap_ctx *ctx = data;
- loff_t ret = length;
-
- if (iomap->type == IOMAP_HOLE)
- return length;
-
- ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
- ctx->prev = *iomap;
- switch (ret) {
- case 0: /* success */
- return length;
- case 1: /* extent array full */
- return 0;
- default:
- return ret;
- }
-}
-
-int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
- loff_t start, loff_t len, const struct iomap_ops *ops)
-{
- struct fiemap_ctx ctx;
- loff_t ret;
-
- memset(&ctx, 0, sizeof(ctx));
- ctx.fi = fi;
- ctx.prev.type = IOMAP_HOLE;
-
- ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
- if (ret)
- return ret;
-
- if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
- ret = filemap_write_and_wait(inode->i_mapping);
- if (ret)
- return ret;
- }
-
- while (len > 0) {
- ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
- iomap_fiemap_actor);
- /* inode with no (attribute) mapping will give ENOENT */
- if (ret == -ENOENT)
- break;
- if (ret < 0)
- return ret;
- if (ret == 0)
- break;
-
- start += ret;
- len -= ret;
- }
-
- if (ctx.prev.type != IOMAP_HOLE) {
- ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iomap_fiemap);
-
-/*
- * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
- * Returns true if found and updates @lastoff to the offset in file.
- */
-static bool
-page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
- int whence)
-{
- const struct address_space_operations *ops = inode->i_mapping->a_ops;
- unsigned int bsize = i_blocksize(inode), off;
- bool seek_data = whence == SEEK_DATA;
- loff_t poff = page_offset(page);
-
- if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
- return false;
-
- if (*lastoff < poff) {
- /*
- * Last offset smaller than the start of the page means we found
- * a hole:
- */
- if (whence == SEEK_HOLE)
- return true;
- *lastoff = poff;
- }
-
- /*
- * Just check the page unless we can and should check block ranges:
- */
- if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
- return PageUptodate(page) == seek_data;
-
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping))
- goto out_unlock_not_found;
-
- for (off = 0; off < PAGE_SIZE; off += bsize) {
- if (offset_in_page(*lastoff) >= off + bsize)
- continue;
- if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
- unlock_page(page);
- return true;
- }
- *lastoff = poff + off + bsize;
- }
-
-out_unlock_not_found:
- unlock_page(page);
- return false;
-}
-
-/*
- * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
- *
- * Within unwritten extents, the page cache determines which parts are holes
- * and which are data: uptodate buffer heads count as data; everything else
- * counts as a hole.
- *
- * Returns the resulting offset on successs, and -ENOENT otherwise.
- */
-static loff_t
-page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
- int whence)
-{
- pgoff_t index = offset >> PAGE_SHIFT;
- pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
- loff_t lastoff = offset;
- struct pagevec pvec;
-
- if (length <= 0)
- return -ENOENT;
-
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i;
-
- nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
- end - 1);
- if (nr_pages == 0)
- break;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- if (page_seek_hole_data(inode, page, &lastoff, whence))
- goto check_range;
- lastoff = page_offset(page) + PAGE_SIZE;
- }
- pagevec_release(&pvec);
- } while (index < end);
-
- /* When no page at lastoff and we are not done, we found a hole. */
- if (whence != SEEK_HOLE)
- goto not_found;
-
-check_range:
- if (lastoff < offset + length)
- goto out;
-not_found:
- lastoff = -ENOENT;
-out:
- pagevec_release(&pvec);
- return lastoff;
-}
-
-
-static loff_t
-iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
-{
- switch (iomap->type) {
- case IOMAP_UNWRITTEN:
- offset = page_cache_seek_hole_data(inode, offset, length,
- SEEK_HOLE);
- if (offset < 0)
- return length;
- /* fall through */
- case IOMAP_HOLE:
- *(loff_t *)data = offset;
- return 0;
- default:
- return length;
- }
-}
-
-loff_t
-iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
-{
- loff_t size = i_size_read(inode);
- loff_t length = size - offset;
- loff_t ret;
-
- /* Nothing to be found before or beyond the end of the file. */
- if (offset < 0 || offset >= size)
- return -ENXIO;
-
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_hole_actor);
- if (ret < 0)
- return ret;
- if (ret == 0)
- break;
-
- offset += ret;
- length -= ret;
- }
-
- return offset;
-}
-EXPORT_SYMBOL_GPL(iomap_seek_hole);
-
-static loff_t
-iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
-{
- switch (iomap->type) {
- case IOMAP_HOLE:
- return length;
- case IOMAP_UNWRITTEN:
- offset = page_cache_seek_hole_data(inode, offset, length,
- SEEK_DATA);
- if (offset < 0)
- return length;
- /*FALLTHRU*/
- default:
- *(loff_t *)data = offset;
- return 0;
- }
-}
-
-loff_t
-iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
-{
- loff_t size = i_size_read(inode);
- loff_t length = size - offset;
- loff_t ret;
-
- /* Nothing to be found before or beyond the end of the file. */
- if (offset < 0 || offset >= size)
- return -ENXIO;
-
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_data_actor);
- if (ret < 0)
- return ret;
- if (ret == 0)
- break;
-
- offset += ret;
- length -= ret;
- }
-
- if (length <= 0)
- return -ENXIO;
- return offset;
-}
-EXPORT_SYMBOL_GPL(iomap_seek_data);
-
-/*
- * Private flags for iomap_dio, must not overlap with the public ones in
- * iomap.h:
- */
-#define IOMAP_DIO_WRITE_FUA (1 << 28)
-#define IOMAP_DIO_NEED_SYNC (1 << 29)
-#define IOMAP_DIO_WRITE (1 << 30)
-#define IOMAP_DIO_DIRTY (1 << 31)
-
-struct iomap_dio {
- struct kiocb *iocb;
- iomap_dio_end_io_t *end_io;
- loff_t i_size;
- loff_t size;
- atomic_t ref;
- unsigned flags;
- int error;
- bool wait_for_completion;
-
- union {
- /* used during submission and for synchronous completion: */
- struct {
- struct iov_iter *iter;
- struct task_struct *waiter;
- struct request_queue *last_queue;
- blk_qc_t cookie;
- } submit;
-
- /* used for aio completion: */
- struct {
- struct work_struct work;
- } aio;
- };
-};
-
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
-{
- struct request_queue *q = READ_ONCE(kiocb->private);
-
- if (!q)
- return 0;
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
-}
-EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
-
-static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
- struct bio *bio)
-{
- atomic_inc(&dio->ref);
-
- if (dio->iocb->ki_flags & IOCB_HIPRI)
- bio_set_polled(bio, dio->iocb);
-
- dio->submit.last_queue = bdev_get_queue(iomap->bdev);
- dio->submit.cookie = submit_bio(bio);
-}
-
-static ssize_t iomap_dio_complete(struct iomap_dio *dio)
-{
- struct kiocb *iocb = dio->iocb;
- struct inode *inode = file_inode(iocb->ki_filp);
- loff_t offset = iocb->ki_pos;
- ssize_t ret;
-
- if (dio->end_io) {
- ret = dio->end_io(iocb,
- dio->error ? dio->error : dio->size,
- dio->flags);
- } else {
- ret = dio->error;
- }
-
- if (likely(!ret)) {
- ret = dio->size;
- /* check for short read */
- if (offset + ret > dio->i_size &&
- !(dio->flags & IOMAP_DIO_WRITE))
- ret = dio->i_size - offset;
- iocb->ki_pos += ret;
- }
-
- /*
- * Try again to invalidate clean pages which might have been cached by
- * non-direct readahead, or faulted in by get_user_pages() if the source
- * of the write was an mmap'ed region of the file we're writing. Either
- * one is a pretty crazy thing to do, so we don't support it 100%. If
- * this invalidation fails, tough, the write still worked...
- *
- * And this page cache invalidation has to be after dio->end_io(), as
- * some filesystems convert unwritten extents to real allocations in
- * end_io() when necessary, otherwise a racing buffer read would cache
- * zeros from unwritten extents.
- */
- if (!dio->error &&
- (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
- int err;
- err = invalidate_inode_pages2_range(inode->i_mapping,
- offset >> PAGE_SHIFT,
- (offset + dio->size - 1) >> PAGE_SHIFT);
- if (err)
- dio_warn_stale_pagecache(iocb->ki_filp);
- }
-
- /*
- * If this is a DSYNC write, make sure we push it to stable storage now
- * that we've written data.
- */
- if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
- ret = generic_write_sync(iocb, ret);
-
- inode_dio_end(file_inode(iocb->ki_filp));
- kfree(dio);
-
- return ret;
-}
-
-static void iomap_dio_complete_work(struct work_struct *work)
-{
- struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
- struct kiocb *iocb = dio->iocb;
-
- iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
-}
-
-/*
- * Set an error in the dio if none is set yet. We have to use cmpxchg
- * as the submission context and the completion context(s) can race to
- * update the error.
- */
-static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
-{
- cmpxchg(&dio->error, 0, ret);
-}
-
-static void iomap_dio_bio_end_io(struct bio *bio)
-{
- struct iomap_dio *dio = bio->bi_private;
- bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
-
- if (bio->bi_status)
- iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
-
- if (atomic_dec_and_test(&dio->ref)) {
- if (dio->wait_for_completion) {
- struct task_struct *waiter = dio->submit.waiter;
- WRITE_ONCE(dio->submit.waiter, NULL);
- blk_wake_io_task(waiter);
- } else if (dio->flags & IOMAP_DIO_WRITE) {
- struct inode *inode = file_inode(dio->iocb->ki_filp);
-
- INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
- queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
- } else {
- iomap_dio_complete_work(&dio->aio.work);
- }
- }
-
- if (should_dirty) {
- bio_check_pages_dirty(bio);
- } else {
- bio_release_pages(bio, false);
- bio_put(bio);
- }
-}
-
-static void
-iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
- unsigned len)
-{
- struct page *page = ZERO_PAGE(0);
- int flags = REQ_SYNC | REQ_IDLE;
- struct bio *bio;
-
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, iomap->bdev);
- bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
- bio->bi_private = dio;
- bio->bi_end_io = iomap_dio_bio_end_io;
-
- get_page(page);
- __bio_add_page(bio, page, len, 0);
- bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
- iomap_dio_submit_bio(dio, iomap, bio);
-}
-
-static loff_t
-iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
- struct iomap_dio *dio, struct iomap *iomap)
-{
- unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
- unsigned int fs_block_size = i_blocksize(inode), pad;
- unsigned int align = iov_iter_alignment(dio->submit.iter);
- struct iov_iter iter;
- struct bio *bio;
- bool need_zeroout = false;
- bool use_fua = false;
- int nr_pages, ret = 0;
- size_t copied = 0;
-
- if ((pos | length | align) & ((1 << blkbits) - 1))
- return -EINVAL;
-
- if (iomap->type == IOMAP_UNWRITTEN) {
- dio->flags |= IOMAP_DIO_UNWRITTEN;
- need_zeroout = true;
- }
-
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
-
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else if (iomap->type == IOMAP_MAPPED) {
- /*
- * Use a FUA write if we need datasync semantics, this is a pure
- * data IO that doesn't require any metadata updates (including
- * after IO completion such as unwritten extent conversion) and
- * the underlying device supports FUA. This allows us to avoid
- * cache flushes on IO completion.
- */
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_FUA) &&
- blk_queue_fua(bdev_get_queue(iomap->bdev)))
- use_fua = true;
- }
-
- /*
- * Operate on a partial iter trimmed to the extent we were called for.
- * We'll update the iter in the dio once we're done with this extent.
- */
- iter = *dio->submit.iter;
- iov_iter_truncate(&iter, length);
-
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
- if (nr_pages <= 0)
- return nr_pages;
-
- if (need_zeroout) {
- /* zero out from the start of the block to the write offset */
- pad = pos & (fs_block_size - 1);
- if (pad)
- iomap_dio_zero(dio, iomap, pos - pad, pad);
- }
-
- do {
- size_t n;
- if (dio->error) {
- iov_iter_revert(dio->submit.iter, copied);
- return 0;
- }
-
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- bio_set_dev(bio, iomap->bdev);
- bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
- bio->bi_write_hint = dio->iocb->ki_hint;
- bio->bi_ioprio = dio->iocb->ki_ioprio;
- bio->bi_private = dio;
- bio->bi_end_io = iomap_dio_bio_end_io;
-
- ret = bio_iov_iter_get_pages(bio, &iter);
- if (unlikely(ret)) {
- /*
- * We have to stop part way through an IO. We must fall
- * through to the sub-block tail zeroing here, otherwise
- * this short IO may expose stale data in the tail of
- * the block we haven't written data to.
- */
- bio_put(bio);
- goto zero_tail;
- }
-
- n = bio->bi_iter.bi_size;
- if (dio->flags & IOMAP_DIO_WRITE) {
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
- if (use_fua)
- bio->bi_opf |= REQ_FUA;
- else
- dio->flags &= ~IOMAP_DIO_WRITE_FUA;
- task_io_account_write(n);
- } else {
- bio->bi_opf = REQ_OP_READ;
- if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
- }
-
- iov_iter_advance(dio->submit.iter, n);
-
- dio->size += n;
- pos += n;
- copied += n;
-
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
- iomap_dio_submit_bio(dio, iomap, bio);
- } while (nr_pages);
-
- /*
- * We need to zeroout the tail of a sub-block write if the extent type
- * requires zeroing or the write extends beyond EOF. If we don't zero
- * the block tail in the latter case, we can expose stale data via mmap
- * reads of the EOF block.
- */
-zero_tail:
- if (need_zeroout ||
- ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
- /* zero out from the end of the write to the end of the block */
- pad = pos & (fs_block_size - 1);
- if (pad)
- iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
- }
- return copied ? copied : ret;
-}
-
-static loff_t
-iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
-{
- length = iov_iter_zero(length, dio->submit.iter);
- dio->size += length;
- return length;
-}
-
-static loff_t
-iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
- struct iomap_dio *dio, struct iomap *iomap)
-{
- struct iov_iter *iter = dio->submit.iter;
- size_t copied;
-
- BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
-
- if (dio->flags & IOMAP_DIO_WRITE) {
- loff_t size = inode->i_size;
-
- if (pos > size)
- memset(iomap->inline_data + size, 0, pos - size);
- copied = copy_from_iter(iomap->inline_data + pos, length, iter);
- if (copied) {
- if (pos + copied > size)
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- }
- } else {
- copied = copy_to_iter(iomap->inline_data + pos, length, iter);
- }
- dio->size += copied;
- return copied;
-}
-
-static loff_t
-iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
-{
- struct iomap_dio *dio = data;
-
- switch (iomap->type) {
- case IOMAP_HOLE:
- if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
- return -EIO;
- return iomap_dio_hole_actor(length, dio);
- case IOMAP_UNWRITTEN:
- if (!(dio->flags & IOMAP_DIO_WRITE))
- return iomap_dio_hole_actor(length, dio);
- return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
- case IOMAP_MAPPED:
- return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
- case IOMAP_INLINE:
- return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
- default:
- WARN_ON_ONCE(1);
- return -EIO;
- }
-}
-
-/*
- * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
- * is being issued as AIO or not. This allows us to optimise pure data writes
- * to use REQ_FUA rather than requiring generic_write_sync() to issue a
- * REQ_FLUSH post write. This is slightly tricky because a single request here
- * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
- * may be pure data writes. In that case, we still need to do a full data sync
- * completion.
- */
-ssize_t
-iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
-{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = file_inode(iocb->ki_filp);
- size_t count = iov_iter_count(iter);
- loff_t pos = iocb->ki_pos, start = pos;
- loff_t end = iocb->ki_pos + count - 1, ret = 0;
- unsigned int flags = IOMAP_DIRECT;
- bool wait_for_completion = is_sync_kiocb(iocb);
- struct blk_plug plug;
- struct iomap_dio *dio;
-
- lockdep_assert_held(&inode->i_rwsem);
-
- if (!count)
- return 0;
-
- dio = kmalloc(sizeof(*dio), GFP_KERNEL);
- if (!dio)
- return -ENOMEM;
-
- dio->iocb = iocb;
- atomic_set(&dio->ref, 1);
- dio->size = 0;
- dio->i_size = i_size_read(inode);
- dio->end_io = end_io;
- dio->error = 0;
- dio->flags = 0;
-
- dio->submit.iter = iter;
- dio->submit.waiter = current;
- dio->submit.cookie = BLK_QC_T_NONE;
- dio->submit.last_queue = NULL;
-
- if (iov_iter_rw(iter) == READ) {
- if (pos >= dio->i_size)
- goto out_free_dio;
-
- if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
- dio->flags |= IOMAP_DIO_DIRTY;
- } else {
- flags |= IOMAP_WRITE;
- dio->flags |= IOMAP_DIO_WRITE;
-
- /* for data sync or sync, we need sync completion processing */
- if (iocb->ki_flags & IOCB_DSYNC)
- dio->flags |= IOMAP_DIO_NEED_SYNC;
-
- /*
- * For datasync only writes, we optimistically try using FUA for
- * this IO. Any non-FUA write that occurs will clear this flag,
- * hence we know before completion whether a cache flush is
- * necessary.
- */
- if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
- dio->flags |= IOMAP_DIO_WRITE_FUA;
- }
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_has_page(mapping, start, end)) {
- ret = -EAGAIN;
- goto out_free_dio;
- }
- flags |= IOMAP_NOWAIT;
- }
-
- ret = filemap_write_and_wait_range(mapping, start, end);
- if (ret)
- goto out_free_dio;
-
- /*
- * Try to invalidate cache pages for the range we're direct
- * writing. If this invalidation fails, tough, the write will
- * still work, but racing two incompatible write paths is a
- * pretty crazy thing to do, so we don't support it 100%.
- */
- ret = invalidate_inode_pages2_range(mapping,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT);
- if (ret)
- dio_warn_stale_pagecache(iocb->ki_filp);
- ret = 0;
-
- if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
- !inode->i_sb->s_dio_done_wq) {
- ret = sb_init_dio_done_wq(inode->i_sb);
- if (ret < 0)
- goto out_free_dio;
- }
-
- inode_dio_begin(inode);
-
- blk_start_plug(&plug);
- do {
- ret = iomap_apply(inode, pos, count, flags, ops, dio,
- iomap_dio_actor);
- if (ret <= 0) {
- /* magic error code to fall back to buffered I/O */
- if (ret == -ENOTBLK) {
- wait_for_completion = true;
- ret = 0;
- }
- break;
- }
- pos += ret;
-
- if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
- break;
- } while ((count = iov_iter_count(iter)) > 0);
- blk_finish_plug(&plug);
-
- if (ret < 0)
- iomap_dio_set_error(dio, ret);
-
- /*
- * If all the writes we issued were FUA, we don't need to flush the
- * cache on IO completion. Clear the sync flag for this case.
- */
- if (dio->flags & IOMAP_DIO_WRITE_FUA)
- dio->flags &= ~IOMAP_DIO_NEED_SYNC;
-
- WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
- WRITE_ONCE(iocb->private, dio->submit.last_queue);
-
- /*
- * We are about to drop our additional submission reference, which
- * might be the last reference to the dio. There are three three
- * different ways we can progress here:
- *
- * (a) If this is the last reference we will always complete and free
- * the dio ourselves.
- * (b) If this is not the last reference, and we serve an asynchronous
- * iocb, we must never touch the dio after the decrement, the
- * I/O completion handler will complete and free it.
- * (c) If this is not the last reference, but we serve a synchronous
- * iocb, the I/O completion handler will wake us up on the drop
- * of the final reference, and we will complete and free it here
- * after we got woken by the I/O completion handler.
- */
- dio->wait_for_completion = wait_for_completion;
- if (!atomic_dec_and_test(&dio->ref)) {
- if (!wait_for_completion)
- return -EIOCBQUEUED;
-
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!READ_ONCE(dio->submit.waiter))
- break;
-
- if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !dio->submit.last_queue ||
- !blk_poll(dio->submit.last_queue,
- dio->submit.cookie, true))
- io_schedule();
- }
- __set_current_state(TASK_RUNNING);
- }
-
- return iomap_dio_complete(dio);
-
-out_free_dio:
- kfree(dio);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iomap_dio_rw);
-
-/* Swapfile activation */
-
-#ifdef CONFIG_SWAP
-struct iomap_swapfile_info {
- struct iomap iomap; /* accumulated iomap */
- struct swap_info_struct *sis;
- uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
- uint64_t highest_ppage; /* highest physical addr seen (pages) */
- unsigned long nr_pages; /* number of pages collected */
- int nr_extents; /* extent count */
-};
-
-/*
- * Collect physical extents for this swap file. Physical extents reported to
- * the swap code must be trimmed to align to a page boundary. The logical
- * offset within the file is irrelevant since the swapfile code maps logical
- * page numbers of the swap device to the physical page-aligned extents.
- */
-static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
-{
- struct iomap *iomap = &isi->iomap;
- unsigned long nr_pages;
- uint64_t first_ppage;
- uint64_t first_ppage_reported;
- uint64_t next_ppage;
- int error;
-
- /*
- * Round the start up and the end down so that the physical
- * extent aligns to a page boundary.
- */
- first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
- next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
- PAGE_SHIFT;
-
- /* Skip too-short physical extents. */
- if (first_ppage >= next_ppage)
- return 0;
- nr_pages = next_ppage - first_ppage;
-
- /*
- * Calculate how much swap space we're adding; the first page contains
- * the swap header and doesn't count. The mm still wants that first
- * page fed to add_swap_extent, however.
- */
- first_ppage_reported = first_ppage;
- if (iomap->offset == 0)
- first_ppage_reported++;
- if (isi->lowest_ppage > first_ppage_reported)
- isi->lowest_ppage = first_ppage_reported;
- if (isi->highest_ppage < (next_ppage - 1))
- isi->highest_ppage = next_ppage - 1;
-
- /* Add extent, set up for the next call. */
- error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
- if (error < 0)
- return error;
- isi->nr_extents += error;
- isi->nr_pages += nr_pages;
- return 0;
-}
-
-/*
- * Accumulate iomaps for this swap file. We have to accumulate iomaps because
- * swap only cares about contiguous page-aligned physical extents and makes no
- * distinction between written and unwritten extents.
- */
-static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
- loff_t count, void *data, struct iomap *iomap)
-{
- struct iomap_swapfile_info *isi = data;
- int error;
-
- switch (iomap->type) {
- case IOMAP_MAPPED:
- case IOMAP_UNWRITTEN:
- /* Only real or unwritten extents. */
- break;
- case IOMAP_INLINE:
- /* No inline data. */
- pr_err("swapon: file is inline\n");
- return -EINVAL;
- default:
- pr_err("swapon: file has unallocated extents\n");
- return -EINVAL;
- }
-
- /* No uncommitted metadata or shared blocks. */
- if (iomap->flags & IOMAP_F_DIRTY) {
- pr_err("swapon: file is not committed\n");
- return -EINVAL;
- }
- if (iomap->flags & IOMAP_F_SHARED) {
- pr_err("swapon: file has shared extents\n");
- return -EINVAL;
- }
-
- /* Only one bdev per swap file. */
- if (iomap->bdev != isi->sis->bdev) {
- pr_err("swapon: file is on multiple devices\n");
- return -EINVAL;
- }
-
- if (isi->iomap.length == 0) {
- /* No accumulated extent, so just store it. */
- memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
- } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
- /* Append this to the accumulated extent. */
- isi->iomap.length += iomap->length;
- } else {
- /* Otherwise, add the retained iomap and store this one. */
- error = iomap_swapfile_add_extent(isi);
- if (error)
- return error;
- memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
- }
- return count;
-}
-
-/*
- * Iterate a swap file's iomaps to construct physical extents that can be
- * passed to the swapfile subsystem.
- */
-int iomap_swapfile_activate(struct swap_info_struct *sis,
- struct file *swap_file, sector_t *pagespan,
- const struct iomap_ops *ops)
-{
- struct iomap_swapfile_info isi = {
- .sis = sis,
- .lowest_ppage = (sector_t)-1ULL,
- };
- struct address_space *mapping = swap_file->f_mapping;
- struct inode *inode = mapping->host;
- loff_t pos = 0;
- loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
- loff_t ret;
-
- /*
- * Persist all file mapping metadata so that we won't have any
- * IOMAP_F_DIRTY iomaps.
- */
- ret = vfs_fsync(swap_file, 1);
- if (ret)
- return ret;
-
- while (len > 0) {
- ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
- ops, &isi, iomap_swapfile_activate_actor);
- if (ret <= 0)
- return ret;
-
- pos += ret;
- len -= ret;
- }
-
- if (isi.iomap.length) {
- ret = iomap_swapfile_add_extent(&isi);
- if (ret)
- return ret;
- }
-
- *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
- sis->max = isi.nr_pages;
- sis->pages = isi.nr_pages - 1;
- sis->highest_bit = isi.nr_pages - 1;
- return isi.nr_extents;
-}
-EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
-#endif /* CONFIG_SWAP */
-
-static loff_t
-iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
-{
- sector_t *bno = data, addr;
-
- if (iomap->type == IOMAP_MAPPED) {
- addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
- if (addr > INT_MAX)
- WARN(1, "would truncate bmap result\n");
- else
- *bno = addr;
- }
- return 0;
-}
-
-/* legacy ->bmap interface. 0 is the error return (!) */
-sector_t
-iomap_bmap(struct address_space *mapping, sector_t bno,
- const struct iomap_ops *ops)
-{
- struct inode *inode = mapping->host;
- loff_t pos = bno << inode->i_blkbits;
- unsigned blocksize = i_blocksize(inode);
-
- if (filemap_write_and_wait(mapping))
- return 0;
-
- bno = 0;
- iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
- return bno;
-}
-EXPORT_SYMBOL_GPL(iomap_bmap);
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
new file mode 100644
index 000000000000..2d165388d952
--- /dev/null
+++ b/fs/iomap/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-newer
+#
+# Copyright (c) 2019 Oracle.
+# All Rights Reserved.
+#
+obj-$(CONFIG_FS_IOMAP) += iomap.o
+
+iomap-y += \
+ apply.o \
+ buffered-io.o \
+ direct-io.o \
+ fiemap.o \
+ seek.o
+
+iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c
new file mode 100644
index 000000000000..54c02aecf3cd
--- /dev/null
+++ b/fs/iomap/apply.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+
+/*
+ * Execute a iomap write on a segment of the mapping that spans a
+ * contiguous range of pages that have identical block mapping state.
+ *
+ * This avoids the need to map pages individually, do individual allocations
+ * for each page and most importantly avoid the need for filesystem specific
+ * locking per page. Instead, all the operations are amortised over the entire
+ * range of pages. It is assumed that the filesystems will lock whatever
+ * resources they require in the iomap_begin call, and release them in the
+ * iomap_end call.
+ */
+loff_t
+iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
+ const struct iomap_ops *ops, void *data, iomap_actor_t actor)
+{
+ struct iomap iomap = { 0 };
+ loff_t written = 0, ret;
+
+ /*
+ * Need to map a range from start position for length bytes. This can
+ * span multiple pages - it is only guaranteed to return a range of a
+ * single type of pages (e.g. all into a hole, all mapped or all
+ * unwritten). Failure at this point has nothing to undo.
+ *
+ * If allocation is required for this range, reserve the space now so
+ * that the allocation is guaranteed to succeed later on. Once we copy
+ * the data into the page cache pages, then we cannot fail otherwise we
+ * expose transient stale data. If the reserve fails, we can safely
+ * back out at this point as there is nothing to undo.
+ */
+ ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
+ if (ret)
+ return ret;
+ if (WARN_ON(iomap.offset > pos))
+ return -EIO;
+ if (WARN_ON(iomap.length == 0))
+ return -EIO;
+
+ /*
+ * Cut down the length to the one actually provided by the filesystem,
+ * as it might not be able to give us the whole size that we requested.
+ */
+ if (iomap.offset + iomap.length < pos + length)
+ length = iomap.offset + iomap.length - pos;
+
+ /*
+ * Now that we have guaranteed that the space allocation will succeed.
+ * we can do the copy-in page by page without having to worry about
+ * failures exposing transient data.
+ */
+ written = actor(inode, pos, length, data, &iomap);
+
+ /*
+ * Now the data has been copied, commit the range we've copied. This
+ * should not fail unless the filesystem has had a fatal error.
+ */
+ if (ops->iomap_end) {
+ ret = ops->iomap_end(inode, pos, length,
+ written > 0 ? written : 0,
+ flags, &iomap);
+ }
+
+ return written ? written : ret;
+}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
new file mode 100644
index 000000000000..e25901ae3ff4
--- /dev/null
+++ b/fs/iomap/buffered-io.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include <linux/pagemap.h>
+#include <linux/uio.h>
+#include <linux/buffer_head.h>
+#include <linux/dax.h>
+#include <linux/writeback.h>
+#include <linux/swap.h>
+#include <linux/bio.h>
+#include <linux/sched/signal.h>
+#include <linux/migrate.h>
+
+#include "../internal.h"
+
+static struct iomap_page *
+iomap_page_create(struct inode *inode, struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (iop || i_blocksize(inode) == PAGE_SIZE)
+ return iop;
+
+ iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+ atomic_set(&iop->read_count, 0);
+ atomic_set(&iop->write_count, 0);
+ bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+
+ /*
+ * migrate_page_move_mapping() assumes that pages with private data have
+ * their count elevated by 1.
+ */
+ get_page(page);
+ set_page_private(page, (unsigned long)iop);
+ SetPagePrivate(page);
+ return iop;
+}
+
+static void
+iomap_page_release(struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (!iop)
+ return;
+ WARN_ON_ONCE(atomic_read(&iop->read_count));
+ WARN_ON_ONCE(atomic_read(&iop->write_count));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ put_page(page);
+ kfree(iop);
+}
+
+/*
+ * Calculate the range inside the page that we actually need to read.
+ */
+static void
+iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
+ loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+{
+ loff_t orig_pos = *pos;
+ loff_t isize = i_size_read(inode);
+ unsigned block_bits = inode->i_blkbits;
+ unsigned block_size = (1 << block_bits);
+ unsigned poff = offset_in_page(*pos);
+ unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ unsigned first = poff >> block_bits;
+ unsigned last = (poff + plen - 1) >> block_bits;
+
+ /*
+ * If the block size is smaller than the page size we need to check the
+ * per-block uptodate status and adjust the offset and length if needed
+ * to avoid reading in already uptodate ranges.
+ */
+ if (iop) {
+ unsigned int i;
+
+ /* move forward for each leading block marked uptodate */
+ for (i = first; i <= last; i++) {
+ if (!test_bit(i, iop->uptodate))
+ break;
+ *pos += block_size;
+ poff += block_size;
+ plen -= block_size;
+ first++;
+ }
+
+ /* truncate len if we find any trailing uptodate block(s) */
+ for ( ; i <= last; i++) {
+ if (test_bit(i, iop->uptodate)) {
+ plen -= (last - i + 1) * block_size;
+ last = i - 1;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If the extent spans the block that contains the i_size we need to
+ * handle both halves separately so that we properly zero data in the
+ * page cache for blocks that are entirely outside of i_size.
+ */
+ if (orig_pos <= isize && orig_pos + length > isize) {
+ unsigned end = offset_in_page(isize - 1) >> block_bits;
+
+ if (first <= end && last > end)
+ plen -= (last - end) * block_size;
+ }
+
+ *offp = poff;
+ *lenp = plen;
+}
+
+static void
+iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned first = off >> inode->i_blkbits;
+ unsigned last = (off + len - 1) >> inode->i_blkbits;
+ unsigned int i;
+ bool uptodate = true;
+
+ if (iop) {
+ for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
+ if (i >= first && i <= last)
+ set_bit(i, iop->uptodate);
+ else if (!test_bit(i, iop->uptodate))
+ uptodate = false;
+ }
+ }
+
+ if (uptodate && !PageError(page))
+ SetPageUptodate(page);
+}
+
+static void
+iomap_read_finish(struct iomap_page *iop, struct page *page)
+{
+ if (!iop || atomic_dec_and_test(&iop->read_count))
+ unlock_page(page);
+}
+
+static void
+iomap_read_page_end_io(struct bio_vec *bvec, int error)
+{
+ struct page *page = bvec->bv_page;
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (unlikely(error)) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ }
+
+ iomap_read_finish(iop, page);
+}
+
+static void
+iomap_read_end_io(struct bio *bio)
+{
+ int error = blk_status_to_errno(bio->bi_status);
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ iomap_read_page_end_io(bvec, error);
+ bio_put(bio);
+}
+
+struct iomap_readpage_ctx {
+ struct page *cur_page;
+ bool cur_page_in_bio;
+ bool is_readahead;
+ struct bio *bio;
+ struct list_head *pages;
+};
+
+static void
+iomap_read_inline_data(struct inode *inode, struct page *page,
+ struct iomap *iomap)
+{
+ size_t size = i_size_read(inode);
+ void *addr;
+
+ if (PageUptodate(page))
+ return;
+
+ BUG_ON(page->index);
+ BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(addr, iomap->inline_data, size);
+ memset(addr + size, 0, PAGE_SIZE - size);
+ kunmap_atomic(addr);
+ SetPageUptodate(page);
+}
+
+static loff_t
+iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ struct page *page = ctx->cur_page;
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ bool same_page = false, is_contig = false;
+ loff_t orig_pos = pos;
+ unsigned poff, plen;
+ sector_t sector;
+
+ if (iomap->type == IOMAP_INLINE) {
+ WARN_ON_ONCE(pos);
+ iomap_read_inline_data(inode, page, iomap);
+ return PAGE_SIZE;
+ }
+
+ /* zero post-eof blocks as the page may be mapped */
+ iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+ if (plen == 0)
+ goto done;
+
+ if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ zero_user(page, poff, plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ goto done;
+ }
+
+ ctx->cur_page_in_bio = true;
+
+ /*
+ * Try to merge into a previous segment if we can.
+ */
+ sector = iomap_sector(iomap, pos);
+ if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+ is_contig = true;
+
+ if (is_contig &&
+ __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
+ if (!same_page && iop)
+ atomic_inc(&iop->read_count);
+ goto done;
+ }
+
+ /*
+ * If we start a new segment we need to increase the read count, and we
+ * need to do so before submitting any previous full bio to make sure
+ * that we don't prematurely unlock the page.
+ */
+ if (iop)
+ atomic_inc(&iop->read_count);
+
+ if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+ gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (ctx->bio)
+ submit_bio(ctx->bio);
+
+ if (ctx->is_readahead) /* same as readahead_gfp_mask */
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio->bi_opf = REQ_OP_READ;
+ if (ctx->is_readahead)
+ ctx->bio->bi_opf |= REQ_RAHEAD;
+ ctx->bio->bi_iter.bi_sector = sector;
+ bio_set_dev(ctx->bio, iomap->bdev);
+ ctx->bio->bi_end_io = iomap_read_end_io;
+ }
+
+ bio_add_page(ctx->bio, page, plen, poff);
+done:
+ /*
+ * Move the caller beyond our range so that it keeps making progress.
+ * For that we have to include any leading non-uptodate ranges, but
+ * we can skip trailing ones as they will be handled in the next
+ * iteration.
+ */
+ return pos - orig_pos + plen;
+}
+
+int
+iomap_readpage(struct page *page, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = { .cur_page = page };
+ struct inode *inode = page->mapping->host;
+ unsigned poff;
+ loff_t ret;
+
+ for (poff = 0; poff < PAGE_SIZE; poff += ret) {
+ ret = iomap_apply(inode, page_offset(page) + poff,
+ PAGE_SIZE - poff, 0, ops, &ctx,
+ iomap_readpage_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ SetPageError(page);
+ break;
+ }
+ }
+
+ if (ctx.bio) {
+ submit_bio(ctx.bio);
+ WARN_ON_ONCE(!ctx.cur_page_in_bio);
+ } else {
+ WARN_ON_ONCE(ctx.cur_page_in_bio);
+ unlock_page(page);
+ }
+
+ /*
+ * Just like mpage_readpages and block_read_full_page we always
+ * return 0 and just mark the page as PageError on errors. This
+ * should be cleaned up all through the stack eventually.
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_readpage);
+
+static struct page *
+iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
+ loff_t length, loff_t *done)
+{
+ while (!list_empty(pages)) {
+ struct page *page = lru_to_page(pages);
+
+ if (page_offset(page) >= (u64)pos + length)
+ break;
+
+ list_del(&page->lru);
+ if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
+ GFP_NOFS))
+ return page;
+
+ /*
+ * If we already have a page in the page cache at index we are
+ * done. Upper layers don't care if it is uptodate after the
+ * readpages call itself as every page gets checked again once
+ * actually needed.
+ */
+ *done += PAGE_SIZE;
+ put_page(page);
+ }
+
+ return NULL;
+}
+
+static loff_t
+iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ loff_t done, ret;
+
+ for (done = 0; done < length; done += ret) {
+ if (ctx->cur_page && offset_in_page(pos + done) == 0) {
+ if (!ctx->cur_page_in_bio)
+ unlock_page(ctx->cur_page);
+ put_page(ctx->cur_page);
+ ctx->cur_page = NULL;
+ }
+ if (!ctx->cur_page) {
+ ctx->cur_page = iomap_next_page(inode, ctx->pages,
+ pos, length, &done);
+ if (!ctx->cur_page)
+ break;
+ ctx->cur_page_in_bio = false;
+ }
+ ret = iomap_readpage_actor(inode, pos + done, length - done,
+ ctx, iomap);
+ }
+
+ return done;
+}
+
+int
+iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = {
+ .pages = pages,
+ .is_readahead = true,
+ };
+ loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
+ loff_t last = page_offset(list_entry(pages->next, struct page, lru));
+ loff_t length = last - pos + PAGE_SIZE, ret = 0;
+
+ while (length > 0) {
+ ret = iomap_apply(mapping->host, pos, length, 0, ops,
+ &ctx, iomap_readpages_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ goto done;
+ }
+ pos += ret;
+ length -= ret;
+ }
+ ret = 0;
+done:
+ if (ctx.bio)
+ submit_bio(ctx.bio);
+ if (ctx.cur_page) {
+ if (!ctx.cur_page_in_bio)
+ unlock_page(ctx.cur_page);
+ put_page(ctx.cur_page);
+ }
+
+ /*
+ * Check that we didn't lose a page due to the arcance calling
+ * conventions..
+ */
+ WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_readpages);
+
+/*
+ * iomap_is_partially_uptodate checks whether blocks within a page are
+ * uptodate or not.
+ *
+ * Returns true if all blocks which correspond to a file portion
+ * we want to read within the page are uptodate.
+ */
+int
+iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned len, first, last;
+ unsigned i;
+
+ /* Limit range to one page */
+ len = min_t(unsigned, PAGE_SIZE - from, count);
+
+ /* First and last blocks in range within page */
+ first = from >> inode->i_blkbits;
+ last = (from + len - 1) >> inode->i_blkbits;
+
+ if (iop) {
+ for (i = first; i <= last; i++)
+ if (!test_bit(i, iop->uptodate))
+ return 0;
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
+
+int
+iomap_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ /*
+ * mm accommodates an old ext3 case where clean pages might not have had
+ * the dirty bit cleared. Thus, it can send actual dirty pages to
+ * ->releasepage() via shrink_active_list(), skip those here.
+ */
+ if (PageDirty(page) || PageWriteback(page))
+ return 0;
+ iomap_page_release(page);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(iomap_releasepage);
+
+void
+iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+{
+ /*
+ * If we are invalidating the entire page, clear the dirty state from it
+ * and release it to avoid unnecessary buildup of the LRU.
+ */
+ if (offset == 0 && len == PAGE_SIZE) {
+ WARN_ON_ONCE(PageWriteback(page));
+ cancel_dirty_page(page);
+ iomap_page_release(page);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+
+#ifdef CONFIG_MIGRATION
+int
+iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ int ret;
+
+ ret = migrate_page_move_mapping(mapping, newpage, page, 0);
+ if (ret != MIGRATEPAGE_SUCCESS)
+ return ret;
+
+ if (page_has_private(page)) {
+ ClearPagePrivate(page);
+ get_page(newpage);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ put_page(page);
+ SetPagePrivate(newpage);
+ }
+
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+ migrate_page_states(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(iomap_migrate_page);
+#endif /* CONFIG_MIGRATION */
+
+static void
+iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
+{
+ loff_t i_size = i_size_read(inode);
+
+ /*
+ * Only truncate newly allocated pages beyoned EOF, even if the
+ * write started inside the existing inode size.
+ */
+ if (pos + len > i_size)
+ truncate_pagecache_range(inode, max(pos, i_size), pos + len);
+}
+
+static int
+iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
+ unsigned poff, unsigned plen, unsigned from, unsigned to,
+ struct iomap *iomap)
+{
+ struct bio_vec bvec;
+ struct bio bio;
+
+ if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
+ zero_user_segments(page, poff, from, to, poff + plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ return 0;
+ }
+
+ bio_init(&bio, &bvec, 1);
+ bio.bi_opf = REQ_OP_READ;
+ bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
+ bio_set_dev(&bio, iomap->bdev);
+ __bio_add_page(&bio, page, plen, poff);
+ return submit_bio_wait(&bio);
+}
+
+static int
+__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+ struct page *page, struct iomap *iomap)
+{
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ loff_t block_size = i_blocksize(inode);
+ loff_t block_start = pos & ~(block_size - 1);
+ loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+ unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ int status = 0;
+
+ if (PageUptodate(page))
+ return 0;
+
+ do {
+ iomap_adjust_read_range(inode, iop, &block_start,
+ block_end - block_start, &poff, &plen);
+ if (plen == 0)
+ break;
+
+ if ((from > poff && from < poff + plen) ||
+ (to > poff && to < poff + plen)) {
+ status = iomap_read_page_sync(inode, block_start, page,
+ poff, plen, from, to, iomap);
+ if (status)
+ break;
+ }
+
+ } while ((block_start += plen) < block_end);
+
+ return status;
+}
+
+static int
+iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, struct iomap *iomap)
+{
+ const struct iomap_page_ops *page_ops = iomap->page_ops;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ struct page *page;
+ int status = 0;
+
+ BUG_ON(pos + len > iomap->offset + iomap->length);
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (page_ops && page_ops->page_prepare) {
+ status = page_ops->page_prepare(inode, pos, len, iomap);
+ if (status)
+ return status;
+ }
+
+ page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ if (!page) {
+ status = -ENOMEM;
+ goto out_no_page;
+ }
+
+ if (iomap->type == IOMAP_INLINE)
+ iomap_read_inline_data(inode, page, iomap);
+ else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
+ status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ else
+ status = __iomap_write_begin(inode, pos, len, page, iomap);
+
+ if (unlikely(status))
+ goto out_unlock;
+
+ *pagep = page;
+ return 0;
+
+out_unlock:
+ unlock_page(page);
+ put_page(page);
+ iomap_write_failed(inode, pos, len);
+
+out_no_page:
+ if (page_ops && page_ops->page_done)
+ page_ops->page_done(inode, pos, 0, NULL, iomap);
+ return status;
+}
+
+int
+iomap_set_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ int newly_dirty;
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ /*
+ * Lock out page->mem_cgroup migration to keep PageDirty
+ * synchronized with per-memcg dirty page counters.
+ */
+ lock_page_memcg(page);
+ newly_dirty = !TestSetPageDirty(page);
+ if (newly_dirty)
+ __set_page_dirty(page, mapping, 0);
+ unlock_page_memcg(page);
+
+ if (newly_dirty)
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return newly_dirty;
+}
+EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
+
+static int
+__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page, struct iomap *iomap)
+{
+ flush_dcache_page(page);
+
+ /*
+ * The blocks that were entirely written will now be uptodate, so we
+ * don't have to worry about a readpage reading them and overwriting a
+ * partial write. However if we have encountered a short write and only
+ * partially written into a block, it will not be marked uptodate, so a
+ * readpage might come in and destroy our partial write.
+ *
+ * Do the simplest thing, and just treat any short write to a non
+ * uptodate page as a zero-length write, and force the caller to redo
+ * the whole thing.
+ */
+ if (unlikely(copied < len && !PageUptodate(page)))
+ return 0;
+ iomap_set_range_uptodate(page, offset_in_page(pos), len);
+ iomap_set_page_dirty(page);
+ return copied;
+}
+
+static int
+iomap_write_end_inline(struct inode *inode, struct page *page,
+ struct iomap *iomap, loff_t pos, unsigned copied)
+{
+ void *addr;
+
+ WARN_ON_ONCE(!PageUptodate(page));
+ BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(iomap->inline_data + pos, addr + pos, copied);
+ kunmap_atomic(addr);
+
+ mark_inode_dirty(inode);
+ return copied;
+}
+
+static int
+iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page, struct iomap *iomap)
+{
+ const struct iomap_page_ops *page_ops = iomap->page_ops;
+ loff_t old_size = inode->i_size;
+ int ret;
+
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
+ } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
+ page, NULL);
+ } else {
+ ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+ }
+
+ /*
+ * Update the in-memory inode size after copying the data into the page
+ * cache. It's up to the file system to write the updated size to disk,
+ * preferably after I/O completion so that no stale data is exposed.
+ */
+ if (pos + ret > old_size) {
+ i_size_write(inode, pos + ret);
+ iomap->flags |= IOMAP_F_SIZE_CHANGED;
+ }
+ unlock_page(page);
+
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
+ if (page_ops && page_ops->page_done)
+ page_ops->page_done(inode, pos, ret, page, iomap);
+ put_page(page);
+
+ if (ret < len)
+ iomap_write_failed(inode, pos, len);
+ return ret;
+}
+
+static loff_t
+iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iov_iter *i = data;
+ long status = 0;
+ ssize_t written = 0;
+ unsigned int flags = AOP_FLAG_NOFS;
+
+ do {
+ struct page *page;
+ unsigned long offset; /* Offset into pagecache page */
+ unsigned long bytes; /* Bytes to write to page */
+ size_t copied; /* Bytes copied from user */
+
+ offset = offset_in_page(pos);
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_count(i));
+again:
+ if (bytes > length)
+ bytes = length;
+
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ *
+ * Not only is this an optimisation, but it is also required
+ * to check that the address is actually valid, when atomic
+ * usercopies are used, below.
+ */
+ if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+ status = -EFAULT;
+ break;
+ }
+
+ status = iomap_write_begin(inode, pos, bytes, flags, &page,
+ iomap);
+ if (unlikely(status))
+ break;
+
+ if (mapping_writably_mapped(inode->i_mapping))
+ flush_dcache_page(page);
+
+ copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+
+ flush_dcache_page(page);
+
+ status = iomap_write_end(inode, pos, bytes, copied, page,
+ iomap);
+ if (unlikely(status < 0))
+ break;
+ copied = status;
+
+ cond_resched();
+
+ iov_iter_advance(i, copied);
+ if (unlikely(copied == 0)) {
+ /*
+ * If we were unable to copy any data at all, we must
+ * fall back to a single segment length write.
+ *
+ * If we didn't fallback here, we could livelock
+ * because not all segments in the iov can be copied at
+ * once without a pagefault.
+ */
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_single_seg_count(i));
+ goto again;
+ }
+ pos += copied;
+ written += copied;
+ length -= copied;
+
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ } while (iov_iter_count(i) && length);
+
+ return written ? written : status;
+}
+
+ssize_t
+iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ loff_t pos = iocb->ki_pos, ret = 0, written = 0;
+
+ while (iov_iter_count(iter)) {
+ ret = iomap_apply(inode, pos, iov_iter_count(iter),
+ IOMAP_WRITE, ops, iter, iomap_write_actor);
+ if (ret <= 0)
+ break;
+ pos += ret;
+ written += ret;
+ }
+
+ return written ? written : ret;
+}
+EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+
+static struct page *
+__iomap_read_page(struct inode *inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+
+ page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
+ if (IS_ERR(page))
+ return page;
+ if (!PageUptodate(page)) {
+ put_page(page);
+ return ERR_PTR(-EIO);
+ }
+ return page;
+}
+
+static loff_t
+iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ long status = 0;
+ ssize_t written = 0;
+
+ do {
+ struct page *page, *rpage;
+ unsigned long offset; /* Offset into pagecache page */
+ unsigned long bytes; /* Bytes to write to page */
+
+ offset = offset_in_page(pos);
+ bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+
+ rpage = __iomap_read_page(inode, pos);
+ if (IS_ERR(rpage))
+ return PTR_ERR(rpage);
+
+ status = iomap_write_begin(inode, pos, bytes,
+ AOP_FLAG_NOFS, &page, iomap);
+ put_page(rpage);
+ if (unlikely(status))
+ return status;
+
+ WARN_ON_ONCE(!PageUptodate(page));
+
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ if (unlikely(status <= 0)) {
+ if (WARN_ON_ONCE(status == 0))
+ return -EIO;
+ return status;
+ }
+
+ cond_resched();
+
+ pos += status;
+ written += status;
+ length -= status;
+
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ } while (length);
+
+ return written;
+}
+
+int
+iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops)
+{
+ loff_t ret;
+
+ while (len) {
+ ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
+ iomap_dirty_actor);
+ if (ret <= 0)
+ return ret;
+ pos += ret;
+ len -= ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_file_dirty);
+
+static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
+ unsigned bytes, struct iomap *iomap)
+{
+ struct page *page;
+ int status;
+
+ status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
+ iomap);
+ if (status)
+ return status;
+
+ zero_user(page, offset, bytes);
+ mark_page_accessed(page);
+
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+}
+
+static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
+ struct iomap *iomap)
+{
+ return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
+ iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
+}
+
+static loff_t
+iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
+ void *data, struct iomap *iomap)
+{
+ bool *did_zero = data;
+ loff_t written = 0;
+ int status;
+
+ /* already zeroed? we're done. */
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+ return count;
+
+ do {
+ unsigned offset, bytes;
+
+ offset = offset_in_page(pos);
+ bytes = min_t(loff_t, PAGE_SIZE - offset, count);
+
+ if (IS_DAX(inode))
+ status = iomap_dax_zero(pos, offset, bytes, iomap);
+ else
+ status = iomap_zero(inode, pos, offset, bytes, iomap);
+ if (status < 0)
+ return status;
+
+ pos += bytes;
+ count -= bytes;
+ written += bytes;
+ if (did_zero)
+ *did_zero = true;
+ } while (count > 0);
+
+ return written;
+}
+
+int
+iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ loff_t ret;
+
+ while (len > 0) {
+ ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
+ ops, did_zero, iomap_zero_range_actor);
+ if (ret <= 0)
+ return ret;
+
+ pos += ret;
+ len -= ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_zero_range);
+
+int
+iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int off = pos & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!off)
+ return 0;
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+}
+EXPORT_SYMBOL_GPL(iomap_truncate_page);
+
+static loff_t
+iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct page *page = data;
+ int ret;
+
+ if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+ if (ret)
+ return ret;
+ block_commit_write(page, 0, length);
+ } else {
+ WARN_ON_ONCE(!PageUptodate(page));
+ iomap_page_create(inode, page);
+ set_page_dirty(page);
+ }
+
+ return length;
+}
+
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+{
+ struct page *page = vmf->page;
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ unsigned long length;
+ loff_t offset, size;
+ ssize_t ret;
+
+ lock_page(page);
+ size = i_size_read(inode);
+ if ((page->mapping != inode->i_mapping) ||
+ (page_offset(page) > size)) {
+ /* We overload EFAULT to mean page got truncated */
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /* page is wholly or partially inside EOF */
+ if (((page->index + 1) << PAGE_SHIFT) > size)
+ length = offset_in_page(size);
+ else
+ length = PAGE_SIZE;
+
+ offset = page_offset(page);
+ while (length > 0) {
+ ret = iomap_apply(inode, offset, length,
+ IOMAP_WRITE | IOMAP_FAULT, ops, page,
+ iomap_page_mkwrite_actor);
+ if (unlikely(ret <= 0))
+ goto out_unlock;
+ offset += ret;
+ length -= ret;
+ }
+
+ wait_for_stable_page(page);
+ return VM_FAULT_LOCKED;
+out_unlock:
+ unlock_page(page);
+ return block_page_mkwrite_return(ret);
+}
+EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
new file mode 100644
index 000000000000..10517cea9682
--- /dev/null
+++ b/fs/iomap/direct-io.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include <linux/backing-dev.h>
+#include <linux/uio.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include "../internal.h"
+
+/*
+ * Private flags for iomap_dio, must not overlap with the public ones in
+ * iomap.h:
+ */
+#define IOMAP_DIO_WRITE_FUA (1 << 28)
+#define IOMAP_DIO_NEED_SYNC (1 << 29)
+#define IOMAP_DIO_WRITE (1 << 30)
+#define IOMAP_DIO_DIRTY (1 << 31)
+
+struct iomap_dio {
+ struct kiocb *iocb;
+ iomap_dio_end_io_t *end_io;
+ loff_t i_size;
+ loff_t size;
+ atomic_t ref;
+ unsigned flags;
+ int error;
+ bool wait_for_completion;
+
+ union {
+ /* used during submission and for synchronous completion: */
+ struct {
+ struct iov_iter *iter;
+ struct task_struct *waiter;
+ struct request_queue *last_queue;
+ blk_qc_t cookie;
+ } submit;
+
+ /* used for aio completion: */
+ struct {
+ struct work_struct work;
+ } aio;
+ };
+};
+
+int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+{
+ struct request_queue *q = READ_ONCE(kiocb->private);
+
+ if (!q)
+ return 0;
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
+}
+EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
+
+static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
+ struct bio *bio)
+{
+ atomic_inc(&dio->ref);
+
+ if (dio->iocb->ki_flags & IOCB_HIPRI)
+ bio_set_polled(bio, dio->iocb);
+
+ dio->submit.last_queue = bdev_get_queue(iomap->bdev);
+ dio->submit.cookie = submit_bio(bio);
+}
+
+static ssize_t iomap_dio_complete(struct iomap_dio *dio)
+{
+ struct kiocb *iocb = dio->iocb;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ loff_t offset = iocb->ki_pos;
+ ssize_t ret;
+
+ if (dio->end_io) {
+ ret = dio->end_io(iocb,
+ dio->error ? dio->error : dio->size,
+ dio->flags);
+ } else {
+ ret = dio->error;
+ }
+
+ if (likely(!ret)) {
+ ret = dio->size;
+ /* check for short read */
+ if (offset + ret > dio->i_size &&
+ !(dio->flags & IOMAP_DIO_WRITE))
+ ret = dio->i_size - offset;
+ iocb->ki_pos += ret;
+ }
+
+ /*
+ * Try again to invalidate clean pages which might have been cached by
+ * non-direct readahead, or faulted in by get_user_pages() if the source
+ * of the write was an mmap'ed region of the file we're writing. Either
+ * one is a pretty crazy thing to do, so we don't support it 100%. If
+ * this invalidation fails, tough, the write still worked...
+ *
+ * And this page cache invalidation has to be after dio->end_io(), as
+ * some filesystems convert unwritten extents to real allocations in
+ * end_io() when necessary, otherwise a racing buffer read would cache
+ * zeros from unwritten extents.
+ */
+ if (!dio->error &&
+ (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
+ int err;
+ err = invalidate_inode_pages2_range(inode->i_mapping,
+ offset >> PAGE_SHIFT,
+ (offset + dio->size - 1) >> PAGE_SHIFT);
+ if (err)
+ dio_warn_stale_pagecache(iocb->ki_filp);
+ }
+
+ /*
+ * If this is a DSYNC write, make sure we push it to stable storage now
+ * that we've written data.
+ */
+ if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
+ ret = generic_write_sync(iocb, ret);
+
+ inode_dio_end(file_inode(iocb->ki_filp));
+ kfree(dio);
+
+ return ret;
+}
+
+static void iomap_dio_complete_work(struct work_struct *work)
+{
+ struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
+ struct kiocb *iocb = dio->iocb;
+
+ iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
+}
+
+/*
+ * Set an error in the dio if none is set yet. We have to use cmpxchg
+ * as the submission context and the completion context(s) can race to
+ * update the error.
+ */
+static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
+{
+ cmpxchg(&dio->error, 0, ret);
+}
+
+static void iomap_dio_bio_end_io(struct bio *bio)
+{
+ struct iomap_dio *dio = bio->bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+
+ if (bio->bi_status)
+ iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
+
+ if (atomic_dec_and_test(&dio->ref)) {
+ if (dio->wait_for_completion) {
+ struct task_struct *waiter = dio->submit.waiter;
+ WRITE_ONCE(dio->submit.waiter, NULL);
+ blk_wake_io_task(waiter);
+ } else if (dio->flags & IOMAP_DIO_WRITE) {
+ struct inode *inode = file_inode(dio->iocb->ki_filp);
+
+ INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+ queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
+ } else {
+ iomap_dio_complete_work(&dio->aio.work);
+ }
+ }
+
+ if (should_dirty) {
+ bio_check_pages_dirty(bio);
+ } else {
+ bio_release_pages(bio, false);
+ bio_put(bio);
+ }
+}
+
+static void
+iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
+ unsigned len)
+{
+ struct page *page = ZERO_PAGE(0);
+ int flags = REQ_SYNC | REQ_IDLE;
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ bio_set_dev(bio, iomap->bdev);
+ bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
+ bio->bi_private = dio;
+ bio->bi_end_io = iomap_dio_bio_end_io;
+
+ get_page(page);
+ __bio_add_page(bio, page, len, 0);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
+ iomap_dio_submit_bio(dio, iomap, bio);
+}
+
+static loff_t
+iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
+{
+ unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
+ unsigned int fs_block_size = i_blocksize(inode), pad;
+ unsigned int align = iov_iter_alignment(dio->submit.iter);
+ struct iov_iter iter;
+ struct bio *bio;
+ bool need_zeroout = false;
+ bool use_fua = false;
+ int nr_pages, ret = 0;
+ size_t copied = 0;
+
+ if ((pos | length | align) & ((1 << blkbits) - 1))
+ return -EINVAL;
+
+ if (iomap->type == IOMAP_UNWRITTEN) {
+ dio->flags |= IOMAP_DIO_UNWRITTEN;
+ need_zeroout = true;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED)
+ dio->flags |= IOMAP_DIO_COW;
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ need_zeroout = true;
+ } else if (iomap->type == IOMAP_MAPPED) {
+ /*
+ * Use a FUA write if we need datasync semantics, this is a pure
+ * data IO that doesn't require any metadata updates (including
+ * after IO completion such as unwritten extent conversion) and
+ * the underlying device supports FUA. This allows us to avoid
+ * cache flushes on IO completion.
+ */
+ if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
+ (dio->flags & IOMAP_DIO_WRITE_FUA) &&
+ blk_queue_fua(bdev_get_queue(iomap->bdev)))
+ use_fua = true;
+ }
+
+ /*
+ * Operate on a partial iter trimmed to the extent we were called for.
+ * We'll update the iter in the dio once we're done with this extent.
+ */
+ iter = *dio->submit.iter;
+ iov_iter_truncate(&iter, length);
+
+ nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+ if (nr_pages <= 0)
+ return nr_pages;
+
+ if (need_zeroout) {
+ /* zero out from the start of the block to the write offset */
+ pad = pos & (fs_block_size - 1);
+ if (pad)
+ iomap_dio_zero(dio, iomap, pos - pad, pad);
+ }
+
+ do {
+ size_t n;
+ if (dio->error) {
+ iov_iter_revert(dio->submit.iter, copied);
+ return 0;
+ }
+
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
+ bio_set_dev(bio, iomap->bdev);
+ bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
+ bio->bi_write_hint = dio->iocb->ki_hint;
+ bio->bi_ioprio = dio->iocb->ki_ioprio;
+ bio->bi_private = dio;
+ bio->bi_end_io = iomap_dio_bio_end_io;
+
+ ret = bio_iov_iter_get_pages(bio, &iter);
+ if (unlikely(ret)) {
+ /*
+ * We have to stop part way through an IO. We must fall
+ * through to the sub-block tail zeroing here, otherwise
+ * this short IO may expose stale data in the tail of
+ * the block we haven't written data to.
+ */
+ bio_put(bio);
+ goto zero_tail;
+ }
+
+ n = bio->bi_iter.bi_size;
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ if (use_fua)
+ bio->bi_opf |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_FUA;
+ task_io_account_write(n);
+ } else {
+ bio->bi_opf = REQ_OP_READ;
+ if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
+ }
+
+ iov_iter_advance(dio->submit.iter, n);
+
+ dio->size += n;
+ pos += n;
+ copied += n;
+
+ nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+ iomap_dio_submit_bio(dio, iomap, bio);
+ } while (nr_pages);
+
+ /*
+ * We need to zeroout the tail of a sub-block write if the extent type
+ * requires zeroing or the write extends beyond EOF. If we don't zero
+ * the block tail in the latter case, we can expose stale data via mmap
+ * reads of the EOF block.
+ */
+zero_tail:
+ if (need_zeroout ||
+ ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
+ /* zero out from the end of the write to the end of the block */
+ pad = pos & (fs_block_size - 1);
+ if (pad)
+ iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
+ }
+ return copied ? copied : ret;
+}
+
+static loff_t
+iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
+{
+ length = iov_iter_zero(length, dio->submit.iter);
+ dio->size += length;
+ return length;
+}
+
+static loff_t
+iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
+{
+ struct iov_iter *iter = dio->submit.iter;
+ size_t copied;
+
+ BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ loff_t size = inode->i_size;
+
+ if (pos > size)
+ memset(iomap->inline_data + size, 0, pos - size);
+ copied = copy_from_iter(iomap->inline_data + pos, length, iter);
+ if (copied) {
+ if (pos + copied > size)
+ i_size_write(inode, pos + copied);
+ mark_inode_dirty(inode);
+ }
+ } else {
+ copied = copy_to_iter(iomap->inline_data + pos, length, iter);
+ }
+ dio->size += copied;
+ return copied;
+}
+
+static loff_t
+iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_dio *dio = data;
+
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
+ return -EIO;
+ return iomap_dio_hole_actor(length, dio);
+ case IOMAP_UNWRITTEN:
+ if (!(dio->flags & IOMAP_DIO_WRITE))
+ return iomap_dio_hole_actor(length, dio);
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_MAPPED:
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_INLINE:
+ return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
+/*
+ * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
+ * is being issued as AIO or not. This allows us to optimise pure data writes
+ * to use REQ_FUA rather than requiring generic_write_sync() to issue a
+ * REQ_FLUSH post write. This is slightly tricky because a single request here
+ * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
+ * may be pure data writes. In that case, we still need to do a full data sync
+ * completion.
+ */
+ssize_t
+iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ size_t count = iov_iter_count(iter);
+ loff_t pos = iocb->ki_pos, start = pos;
+ loff_t end = iocb->ki_pos + count - 1, ret = 0;
+ unsigned int flags = IOMAP_DIRECT;
+ bool wait_for_completion = is_sync_kiocb(iocb);
+ struct blk_plug plug;
+ struct iomap_dio *dio;
+
+ lockdep_assert_held(&inode->i_rwsem);
+
+ if (!count)
+ return 0;
+
+ dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+ if (!dio)
+ return -ENOMEM;
+
+ dio->iocb = iocb;
+ atomic_set(&dio->ref, 1);
+ dio->size = 0;
+ dio->i_size = i_size_read(inode);
+ dio->end_io = end_io;
+ dio->error = 0;
+ dio->flags = 0;
+
+ dio->submit.iter = iter;
+ dio->submit.waiter = current;
+ dio->submit.cookie = BLK_QC_T_NONE;
+ dio->submit.last_queue = NULL;
+
+ if (iov_iter_rw(iter) == READ) {
+ if (pos >= dio->i_size)
+ goto out_free_dio;
+
+ if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
+ dio->flags |= IOMAP_DIO_DIRTY;
+ } else {
+ flags |= IOMAP_WRITE;
+ dio->flags |= IOMAP_DIO_WRITE;
+
+ /* for data sync or sync, we need sync completion processing */
+ if (iocb->ki_flags & IOCB_DSYNC)
+ dio->flags |= IOMAP_DIO_NEED_SYNC;
+
+ /*
+ * For datasync only writes, we optimistically try using FUA for
+ * this IO. Any non-FUA write that occurs will clear this flag,
+ * hence we know before completion whether a cache flush is
+ * necessary.
+ */
+ if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
+ dio->flags |= IOMAP_DIO_WRITE_FUA;
+ }
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (filemap_range_has_page(mapping, start, end)) {
+ ret = -EAGAIN;
+ goto out_free_dio;
+ }
+ flags |= IOMAP_NOWAIT;
+ }
+
+ ret = filemap_write_and_wait_range(mapping, start, end);
+ if (ret)
+ goto out_free_dio;
+
+ /*
+ * Try to invalidate cache pages for the range we're direct
+ * writing. If this invalidation fails, tough, the write will
+ * still work, but racing two incompatible write paths is a
+ * pretty crazy thing to do, so we don't support it 100%.
+ */
+ ret = invalidate_inode_pages2_range(mapping,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ if (ret)
+ dio_warn_stale_pagecache(iocb->ki_filp);
+ ret = 0;
+
+ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
+ !inode->i_sb->s_dio_done_wq) {
+ ret = sb_init_dio_done_wq(inode->i_sb);
+ if (ret < 0)
+ goto out_free_dio;
+ }
+
+ inode_dio_begin(inode);
+
+ blk_start_plug(&plug);
+ do {
+ ret = iomap_apply(inode, pos, count, flags, ops, dio,
+ iomap_dio_actor);
+ if (ret <= 0) {
+ /* magic error code to fall back to buffered I/O */
+ if (ret == -ENOTBLK) {
+ wait_for_completion = true;
+ ret = 0;
+ }
+ break;
+ }
+ pos += ret;
+
+ if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
+ break;
+ } while ((count = iov_iter_count(iter)) > 0);
+ blk_finish_plug(&plug);
+
+ if (ret < 0)
+ iomap_dio_set_error(dio, ret);
+
+ /*
+ * If all the writes we issued were FUA, we don't need to flush the
+ * cache on IO completion. Clear the sync flag for this case.
+ */
+ if (dio->flags & IOMAP_DIO_WRITE_FUA)
+ dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+
+ WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
+ WRITE_ONCE(iocb->private, dio->submit.last_queue);
+
+ /*
+ * We are about to drop our additional submission reference, which
+ * might be the last reference to the dio. There are three three
+ * different ways we can progress here:
+ *
+ * (a) If this is the last reference we will always complete and free
+ * the dio ourselves.
+ * (b) If this is not the last reference, and we serve an asynchronous
+ * iocb, we must never touch the dio after the decrement, the
+ * I/O completion handler will complete and free it.
+ * (c) If this is not the last reference, but we serve a synchronous
+ * iocb, the I/O completion handler will wake us up on the drop
+ * of the final reference, and we will complete and free it here
+ * after we got woken by the I/O completion handler.
+ */
+ dio->wait_for_completion = wait_for_completion;
+ if (!atomic_dec_and_test(&dio->ref)) {
+ if (!wait_for_completion)
+ return -EIOCBQUEUED;
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(dio->submit.waiter))
+ break;
+
+ if (!(iocb->ki_flags & IOCB_HIPRI) ||
+ !dio->submit.last_queue ||
+ !blk_poll(dio->submit.last_queue,
+ dio->submit.cookie, true))
+ io_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ }
+
+ return iomap_dio_complete(dio);
+
+out_free_dio:
+ kfree(dio);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_dio_rw);
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
new file mode 100644
index 000000000000..f26fdd36e383
--- /dev/null
+++ b/fs/iomap/fiemap.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018 Christoph Hellwig.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+
+struct fiemap_ctx {
+ struct fiemap_extent_info *fi;
+ struct iomap prev;
+};
+
+static int iomap_to_fiemap(struct fiemap_extent_info *fi,
+ struct iomap *iomap, u32 flags)
+{
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ /* skip holes */
+ return 0;
+ case IOMAP_DELALLOC:
+ flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
+ break;
+ case IOMAP_MAPPED:
+ break;
+ case IOMAP_UNWRITTEN:
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
+ break;
+ case IOMAP_INLINE:
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ break;
+ }
+
+ if (iomap->flags & IOMAP_F_MERGED)
+ flags |= FIEMAP_EXTENT_MERGED;
+ if (iomap->flags & IOMAP_F_SHARED)
+ flags |= FIEMAP_EXTENT_SHARED;
+
+ return fiemap_fill_next_extent(fi, iomap->offset,
+ iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
+ iomap->length, flags);
+}
+
+static loff_t
+iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct fiemap_ctx *ctx = data;
+ loff_t ret = length;
+
+ if (iomap->type == IOMAP_HOLE)
+ return length;
+
+ ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
+ ctx->prev = *iomap;
+ switch (ret) {
+ case 0: /* success */
+ return length;
+ case 1: /* extent array full */
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
+ loff_t start, loff_t len, const struct iomap_ops *ops)
+{
+ struct fiemap_ctx ctx;
+ loff_t ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.fi = fi;
+ ctx.prev.type = IOMAP_HOLE;
+
+ ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
+ if (ret)
+ return ret;
+
+ if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = filemap_write_and_wait(inode->i_mapping);
+ if (ret)
+ return ret;
+ }
+
+ while (len > 0) {
+ ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
+ iomap_fiemap_actor);
+ /* inode with no (attribute) mapping will give ENOENT */
+ if (ret == -ENOENT)
+ break;
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ break;
+
+ start += ret;
+ len -= ret;
+ }
+
+ if (ctx.prev.type != IOMAP_HOLE) {
+ ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_fiemap);
+
+static loff_t
+iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ sector_t *bno = data, addr;
+
+ if (iomap->type == IOMAP_MAPPED) {
+ addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
+ if (addr > INT_MAX)
+ WARN(1, "would truncate bmap result\n");
+ else
+ *bno = addr;
+ }
+ return 0;
+}
+
+/* legacy ->bmap interface. 0 is the error return (!) */
+sector_t
+iomap_bmap(struct address_space *mapping, sector_t bno,
+ const struct iomap_ops *ops)
+{
+ struct inode *inode = mapping->host;
+ loff_t pos = bno << inode->i_blkbits;
+ unsigned blocksize = i_blocksize(inode);
+
+ if (filemap_write_and_wait(mapping))
+ return 0;
+
+ bno = 0;
+ iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
+ return bno;
+}
+EXPORT_SYMBOL_GPL(iomap_bmap);
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
new file mode 100644
index 000000000000..c04bad4b2b43
--- /dev/null
+++ b/fs/iomap/seek.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Red Hat, Inc.
+ * Copyright (c) 2018 Christoph Hellwig.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+
+/*
+ * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
+ * Returns true if found and updates @lastoff to the offset in file.
+ */
+static bool
+page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
+ int whence)
+{
+ const struct address_space_operations *ops = inode->i_mapping->a_ops;
+ unsigned int bsize = i_blocksize(inode), off;
+ bool seek_data = whence == SEEK_DATA;
+ loff_t poff = page_offset(page);
+
+ if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
+ return false;
+
+ if (*lastoff < poff) {
+ /*
+ * Last offset smaller than the start of the page means we found
+ * a hole:
+ */
+ if (whence == SEEK_HOLE)
+ return true;
+ *lastoff = poff;
+ }
+
+ /*
+ * Just check the page unless we can and should check block ranges:
+ */
+ if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
+ return PageUptodate(page) == seek_data;
+
+ lock_page(page);
+ if (unlikely(page->mapping != inode->i_mapping))
+ goto out_unlock_not_found;
+
+ for (off = 0; off < PAGE_SIZE; off += bsize) {
+ if (offset_in_page(*lastoff) >= off + bsize)
+ continue;
+ if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
+ unlock_page(page);
+ return true;
+ }
+ *lastoff = poff + off + bsize;
+ }
+
+out_unlock_not_found:
+ unlock_page(page);
+ return false;
+}
+
+/*
+ * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
+ *
+ * Within unwritten extents, the page cache determines which parts are holes
+ * and which are data: uptodate buffer heads count as data; everything else
+ * counts as a hole.
+ *
+ * Returns the resulting offset on successs, and -ENOENT otherwise.
+ */
+static loff_t
+page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
+ int whence)
+{
+ pgoff_t index = offset >> PAGE_SHIFT;
+ pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ loff_t lastoff = offset;
+ struct pagevec pvec;
+
+ if (length <= 0)
+ return -ENOENT;
+
+ pagevec_init(&pvec);
+
+ do {
+ unsigned nr_pages, i;
+
+ nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
+ end - 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (page_seek_hole_data(inode, page, &lastoff, whence))
+ goto check_range;
+ lastoff = page_offset(page) + PAGE_SIZE;
+ }
+ pagevec_release(&pvec);
+ } while (index < end);
+
+ /* When no page at lastoff and we are not done, we found a hole. */
+ if (whence != SEEK_HOLE)
+ goto not_found;
+
+check_range:
+ if (lastoff < offset + length)
+ goto out;
+not_found:
+ lastoff = -ENOENT;
+out:
+ pagevec_release(&pvec);
+ return lastoff;
+}
+
+
+static loff_t
+iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ switch (iomap->type) {
+ case IOMAP_UNWRITTEN:
+ offset = page_cache_seek_hole_data(inode, offset, length,
+ SEEK_HOLE);
+ if (offset < 0)
+ return length;
+ /* fall through */
+ case IOMAP_HOLE:
+ *(loff_t *)data = offset;
+ return 0;
+ default:
+ return length;
+ }
+}
+
+loff_t
+iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+{
+ loff_t size = i_size_read(inode);
+ loff_t length = size - offset;
+ loff_t ret;
+
+ /* Nothing to be found before or beyond the end of the file. */
+ if (offset < 0 || offset >= size)
+ return -ENXIO;
+
+ while (length > 0) {
+ ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
+ &offset, iomap_seek_hole_actor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ break;
+
+ offset += ret;
+ length -= ret;
+ }
+
+ return offset;
+}
+EXPORT_SYMBOL_GPL(iomap_seek_hole);
+
+static loff_t
+iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ return length;
+ case IOMAP_UNWRITTEN:
+ offset = page_cache_seek_hole_data(inode, offset, length,
+ SEEK_DATA);
+ if (offset < 0)
+ return length;
+ /*FALLTHRU*/
+ default:
+ *(loff_t *)data = offset;
+ return 0;
+ }
+}
+
+loff_t
+iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+{
+ loff_t size = i_size_read(inode);
+ loff_t length = size - offset;
+ loff_t ret;
+
+ /* Nothing to be found before or beyond the end of the file. */
+ if (offset < 0 || offset >= size)
+ return -ENXIO;
+
+ while (length > 0) {
+ ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
+ &offset, iomap_seek_data_actor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ break;
+
+ offset += ret;
+ length -= ret;
+ }
+
+ if (length <= 0)
+ return -ENXIO;
+ return offset;
+}
+EXPORT_SYMBOL_GPL(iomap_seek_data);
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
new file mode 100644
index 000000000000..152a230f668d
--- /dev/null
+++ b/fs/iomap/swapfile.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include <linux/swap.h>
+
+/* Swapfile activation */
+
+struct iomap_swapfile_info {
+ struct iomap iomap; /* accumulated iomap */
+ struct swap_info_struct *sis;
+ uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
+ uint64_t highest_ppage; /* highest physical addr seen (pages) */
+ unsigned long nr_pages; /* number of pages collected */
+ int nr_extents; /* extent count */
+};
+
+/*
+ * Collect physical extents for this swap file. Physical extents reported to
+ * the swap code must be trimmed to align to a page boundary. The logical
+ * offset within the file is irrelevant since the swapfile code maps logical
+ * page numbers of the swap device to the physical page-aligned extents.
+ */
+static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
+{
+ struct iomap *iomap = &isi->iomap;
+ unsigned long nr_pages;
+ uint64_t first_ppage;
+ uint64_t first_ppage_reported;
+ uint64_t next_ppage;
+ int error;
+
+ /*
+ * Round the start up and the end down so that the physical
+ * extent aligns to a page boundary.
+ */
+ first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
+ next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
+ PAGE_SHIFT;
+
+ /* Skip too-short physical extents. */
+ if (first_ppage >= next_ppage)
+ return 0;
+ nr_pages = next_ppage - first_ppage;
+
+ /*
+ * Calculate how much swap space we're adding; the first page contains
+ * the swap header and doesn't count. The mm still wants that first
+ * page fed to add_swap_extent, however.
+ */
+ first_ppage_reported = first_ppage;
+ if (iomap->offset == 0)
+ first_ppage_reported++;
+ if (isi->lowest_ppage > first_ppage_reported)
+ isi->lowest_ppage = first_ppage_reported;
+ if (isi->highest_ppage < (next_ppage - 1))
+ isi->highest_ppage = next_ppage - 1;
+
+ /* Add extent, set up for the next call. */
+ error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
+ if (error < 0)
+ return error;
+ isi->nr_extents += error;
+ isi->nr_pages += nr_pages;
+ return 0;
+}
+
+/*
+ * Accumulate iomaps for this swap file. We have to accumulate iomaps because
+ * swap only cares about contiguous page-aligned physical extents and makes no
+ * distinction between written and unwritten extents.
+ */
+static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
+ loff_t count, void *data, struct iomap *iomap)
+{
+ struct iomap_swapfile_info *isi = data;
+ int error;
+
+ switch (iomap->type) {
+ case IOMAP_MAPPED:
+ case IOMAP_UNWRITTEN:
+ /* Only real or unwritten extents. */
+ break;
+ case IOMAP_INLINE:
+ /* No inline data. */
+ pr_err("swapon: file is inline\n");
+ return -EINVAL;
+ default:
+ pr_err("swapon: file has unallocated extents\n");
+ return -EINVAL;
+ }
+
+ /* No uncommitted metadata or shared blocks. */
+ if (iomap->flags & IOMAP_F_DIRTY) {
+ pr_err("swapon: file is not committed\n");
+ return -EINVAL;
+ }
+ if (iomap->flags & IOMAP_F_SHARED) {
+ pr_err("swapon: file has shared extents\n");
+ return -EINVAL;
+ }
+
+ /* Only one bdev per swap file. */
+ if (iomap->bdev != isi->sis->bdev) {
+ pr_err("swapon: file is on multiple devices\n");
+ return -EINVAL;
+ }
+
+ if (isi->iomap.length == 0) {
+ /* No accumulated extent, so just store it. */
+ memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
+ } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
+ /* Append this to the accumulated extent. */
+ isi->iomap.length += iomap->length;
+ } else {
+ /* Otherwise, add the retained iomap and store this one. */
+ error = iomap_swapfile_add_extent(isi);
+ if (error)
+ return error;
+ memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
+ }
+ return count;
+}
+
+/*
+ * Iterate a swap file's iomaps to construct physical extents that can be
+ * passed to the swapfile subsystem.
+ */
+int iomap_swapfile_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *pagespan,
+ const struct iomap_ops *ops)
+{
+ struct iomap_swapfile_info isi = {
+ .sis = sis,
+ .lowest_ppage = (sector_t)-1ULL,
+ };
+ struct address_space *mapping = swap_file->f_mapping;
+ struct inode *inode = mapping->host;
+ loff_t pos = 0;
+ loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
+ loff_t ret;
+
+ /*
+ * Persist all file mapping metadata so that we won't have any
+ * IOMAP_F_DIRTY iomaps.
+ */
+ ret = vfs_fsync(swap_file, 1);
+ if (ret)
+ return ret;
+
+ while (len > 0) {
+ ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
+ ops, &isi, iomap_swapfile_activate_actor);
+ if (ret <= 0)
+ return ret;
+
+ pos += ret;
+ len -= ret;
+ }
+
+ if (isi.iomap.length) {
+ ret = iomap_swapfile_add_extent(&isi);
+ if (ret)
+ return ret;
+ }
+
+ *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
+ sis->max = isi.nr_pages;
+ sis->pages = isi.nr_pages - 1;
+ sis->highest_bit = isi.nr_pages - 1;
+ return isi.nr_extents;
+}
+EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
diff --git a/fs/libfs.c b/fs/libfs.c
index 7e52e77692ec..c9b2850c0f7c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -17,6 +17,8 @@
#include <linux/exportfs.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* sync_mapping_buffers */
+#include <linux/fs_context.h>
+#include <linux/pseudo_fs.h>
#include <linux/uaccess.h>
@@ -236,34 +238,22 @@ static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
};
-/*
- * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
- * will never be mountable)
- */
-struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
- const struct super_operations *ops, const struct xattr_handler **xattr,
- const struct dentry_operations *dops, unsigned long magic)
+static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
{
- struct super_block *s;
- struct dentry *dentry;
+ struct pseudo_fs_context *ctx = fc->fs_private;
struct inode *root;
- struct qstr d_name = QSTR_INIT(name, strlen(name));
-
- s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
- &init_user_ns, NULL);
- if (IS_ERR(s))
- return ERR_CAST(s);
s->s_maxbytes = MAX_LFS_FILESIZE;
s->s_blocksize = PAGE_SIZE;
s->s_blocksize_bits = PAGE_SHIFT;
- s->s_magic = magic;
- s->s_op = ops ? ops : &simple_super_operations;
- s->s_xattr = xattr;
+ s->s_magic = ctx->magic;
+ s->s_op = ctx->ops ?: &simple_super_operations;
+ s->s_xattr = ctx->xattr;
s->s_time_gran = 1;
root = new_inode(s);
if (!root)
- goto Enomem;
+ return -ENOMEM;
+
/*
* since this is the first inode, make it number 1. New inodes created
* after this must take care not to collide with it (by passing
@@ -272,22 +262,48 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
root->i_atime = root->i_mtime = root->i_ctime = current_time(root);
- dentry = __d_alloc(s, &d_name);
- if (!dentry) {
- iput(root);
- goto Enomem;
+ s->s_root = d_make_root(root);
+ if (!s->s_root)
+ return -ENOMEM;
+ s->s_d_op = ctx->dops;
+ return 0;
+}
+
+static int pseudo_fs_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, pseudo_fs_fill_super);
+}
+
+static void pseudo_fs_free(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations pseudo_fs_context_ops = {
+ .free = pseudo_fs_free,
+ .get_tree = pseudo_fs_get_tree,
+};
+
+/*
+ * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
+ * will never be mountable)
+ */
+struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
+ unsigned long magic)
+{
+ struct pseudo_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct pseudo_fs_context), GFP_KERNEL);
+ if (likely(ctx)) {
+ ctx->magic = magic;
+ fc->fs_private = ctx;
+ fc->ops = &pseudo_fs_context_ops;
+ fc->sb_flags |= SB_NOUSER;
+ fc->global = true;
}
- d_instantiate(dentry, root);
- s->s_root = dentry;
- s->s_d_op = dops;
- s->s_flags |= SB_ACTIVE;
- return dget(s->s_root);
-
-Enomem:
- deactivate_locked_super(s);
- return ERR_PTR(-ENOMEM);
+ return ctx;
}
-EXPORT_SYMBOL(mount_pseudo_xattr);
+EXPORT_SYMBOL(init_pseudo);
int simple_open(struct inode *inode, struct file *file)
{
diff --git a/fs/mount.h b/fs/mount.h
index 6250de544760..711a4093e475 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -58,7 +58,10 @@ struct mount {
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
- struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+ union {
+ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+ struct hlist_node mnt_umount;
+ };
struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
@@ -68,8 +71,7 @@ struct mount {
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
- struct fs_pin mnt_umount;
- struct dentry *mnt_ex_mountpoint;
+ struct hlist_head mnt_stuck_children;
} __randomize_layout;
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namespace.c b/fs/namespace.c
index 6fbc9126367a..6464ea4acba9 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -29,6 +29,7 @@
#include <linux/sched/task.h>
#include <uapi/linux/mount.h>
#include <linux/fs_context.h>
+#include <linux/shmem_fs.h>
#include "pnode.h"
#include "internal.h"
@@ -69,6 +70,8 @@ static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static DECLARE_RWSEM(namespace_sem);
+static HLIST_HEAD(unmounted); /* protected by namespace_sem */
+static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
/* /sys/fs */
struct kobject *fs_kobj;
@@ -169,14 +172,6 @@ unsigned int mnt_get_count(struct mount *mnt)
#endif
}
-static void drop_mountpoint(struct fs_pin *p)
-{
- struct mount *m = container_of(p, struct mount, mnt_umount);
- dput(m->mnt_ex_mountpoint);
- pin_remove(p);
- mntput(&m->mnt);
-}
-
static struct mount *alloc_vfsmnt(const char *name)
{
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -214,7 +209,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
- init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
+ INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
}
return mnt;
@@ -739,7 +734,7 @@ mountpoint:
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
- new->m_dentry = dentry;
+ new->m_dentry = dget(dentry);
new->m_count = 1;
hlist_add_head(&new->m_hash, mp_hash(dentry));
INIT_HLIST_HEAD(&new->m_list);
@@ -752,7 +747,11 @@ done:
return mp;
}
-static void put_mountpoint(struct mountpoint *mp)
+/*
+ * vfsmount lock must be held. Additionally, the caller is responsible
+ * for serializing calls for given disposal list.
+ */
+static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
{
if (!--mp->m_count) {
struct dentry *dentry = mp->m_dentry;
@@ -760,11 +759,18 @@ static void put_mountpoint(struct mountpoint *mp)
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
+ dput_to_list(dentry, list);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
+/* called with namespace_lock and vfsmount lock */
+static void put_mountpoint(struct mountpoint *mp)
+{
+ __put_mountpoint(mp, &ex_mountpoints);
+}
+
static inline int check_mnt(struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
@@ -795,25 +801,17 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
/*
* vfsmount lock must be held for write
*/
-static void unhash_mnt(struct mount *mnt)
+static struct mountpoint *unhash_mnt(struct mount *mnt)
{
+ struct mountpoint *mp;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
hlist_del_init_rcu(&mnt->mnt_hash);
hlist_del_init(&mnt->mnt_mp_list);
- put_mountpoint(mnt->mnt_mp);
+ mp = mnt->mnt_mp;
mnt->mnt_mp = NULL;
-}
-
-/*
- * vfsmount lock must be held for write
- */
-static void detach_mnt(struct mount *mnt, struct path *old_path)
-{
- old_path->dentry = mnt->mnt_mountpoint;
- old_path->mnt = &mnt->mnt_parent->mnt;
- unhash_mnt(mnt);
+ return mp;
}
/*
@@ -821,9 +819,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
*/
static void umount_mnt(struct mount *mnt)
{
- /* old mountpoint will be dropped when we can do that */
- mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
- unhash_mnt(mnt);
+ put_mountpoint(unhash_mnt(mnt));
}
/*
@@ -835,7 +831,7 @@ void mnt_set_mountpoint(struct mount *mnt,
{
mp->m_count++;
mnt_add_count(mnt, 1); /* essentially, that's mntget */
- child_mnt->mnt_mountpoint = dget(mp->m_dentry);
+ child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
@@ -862,7 +858,6 @@ static void attach_mnt(struct mount *mnt,
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{
struct mountpoint *old_mp = mnt->mnt_mp;
- struct dentry *old_mountpoint = mnt->mnt_mountpoint;
struct mount *old_parent = mnt->mnt_parent;
list_del_init(&mnt->mnt_child);
@@ -872,22 +867,6 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m
attach_mnt(mnt, parent, mp);
put_mountpoint(old_mp);
-
- /*
- * Safely avoid even the suggestion this code might sleep or
- * lock the mount hash by taking advantage of the knowledge that
- * mnt_change_mountpoint will not release the final reference
- * to a mountpoint.
- *
- * During mounting, the mount passed in as the parent mount will
- * continue to use the old mountpoint and during unmounting, the
- * old mountpoint will continue to exist until namespace_unlock,
- * which happens well after mnt_change_mountpoint.
- */
- spin_lock(&old_mountpoint->d_lock);
- old_mountpoint->d_lockref.count--;
- spin_unlock(&old_mountpoint->d_lock);
-
mnt_add_count(old_parent, -1);
}
@@ -1102,19 +1081,22 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
static void cleanup_mnt(struct mount *mnt)
{
+ struct hlist_node *p;
+ struct mount *m;
/*
- * This probably indicates that somebody messed
- * up a mnt_want/drop_write() pair. If this
- * happens, the filesystem was probably unable
- * to make r/w->r/o transitions.
- */
- /*
+ * The warning here probably indicates that somebody messed
+ * up a mnt_want/drop_write() pair. If this happens, the
+ * filesystem was probably unable to make r/w->r/o transitions.
* The locking used to deal with mnt_count decrement provides barriers,
* so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt));
if (unlikely(mnt->mnt_pins.first))
mnt_pin_kill(mnt);
+ hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
+ hlist_del(&m->mnt_umount);
+ mntput(&m->mnt);
+ }
fsnotify_vfsmount_delete(&mnt->mnt);
dput(mnt->mnt.mnt_root);
deactivate_super(mnt->mnt.mnt_sb);
@@ -1140,6 +1122,8 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
+ LIST_HEAD(list);
+
rcu_read_lock();
if (likely(READ_ONCE(mnt->mnt_ns))) {
/*
@@ -1180,10 +1164,12 @@ static void mntput_no_expire(struct mount *mnt)
if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- umount_mnt(p);
+ __put_mountpoint(unhash_mnt(p), &list);
+ hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
unlock_mount_hash();
+ shrink_dentry_list(&list);
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
struct task_struct *task = current;
@@ -1369,22 +1355,29 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
-static HLIST_HEAD(unmounted); /* protected by namespace_sem */
-
static void namespace_unlock(void)
{
struct hlist_head head;
+ struct hlist_node *p;
+ struct mount *m;
+ LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
+ list_splice_init(&ex_mountpoints, &list);
up_write(&namespace_sem);
+ shrink_dentry_list(&list);
+
if (likely(hlist_empty(&head)))
return;
synchronize_rcu_expedited();
- group_pin_kill(&head);
+ hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
+ hlist_del(&m->mnt_umount);
+ mntput(&m->mnt);
+ }
}
static inline void namespace_lock(void)
@@ -1471,8 +1464,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
disconnect = disconnect_mount(p, how);
- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
- disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
@@ -1480,6 +1471,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
+ hlist_add_head(&p->mnt_umount, &unmounted);
}
}
change_mnt_propagation(p, MS_PRIVATE);
@@ -1625,15 +1617,15 @@ void __detach_mounts(struct dentry *dentry)
namespace_lock();
lock_mount_hash();
mp = lookup_mountpoint(dentry);
- if (IS_ERR_OR_NULL(mp))
+ if (!mp)
goto out_unlock;
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
- hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
umount_mnt(mnt);
+ hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
@@ -2045,7 +2037,7 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
static int attach_recursive_mnt(struct mount *source_mnt,
struct mount *dest_mnt,
struct mountpoint *dest_mp,
- struct path *parent_path)
+ bool moving)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
HLIST_HEAD(tree_list);
@@ -2063,7 +2055,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
return PTR_ERR(smp);
/* Is there space to add these mounts to the mount namespace? */
- if (!parent_path) {
+ if (!moving) {
err = count_mounts(ns, source_mnt);
if (err)
goto out;
@@ -2082,8 +2074,8 @@ static int attach_recursive_mnt(struct mount *source_mnt,
} else {
lock_mount_hash();
}
- if (parent_path) {
- detach_mnt(source_mnt, parent_path);
+ if (moving) {
+ unhash_mnt(source_mnt);
attach_mnt(source_mnt, dest_mnt, dest_mp);
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
@@ -2181,7 +2173,7 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
d_is_dir(mnt->mnt.mnt_root))
return -ENOTDIR;
- return attach_recursive_mnt(mnt, p, mp, NULL);
+ return attach_recursive_mnt(mnt, p, mp, false);
}
/*
@@ -2574,11 +2566,11 @@ out:
static int do_move_mount(struct path *old_path, struct path *new_path)
{
- struct path parent_path = {.mnt = NULL, .dentry = NULL};
struct mnt_namespace *ns;
struct mount *p;
struct mount *old;
- struct mountpoint *mp;
+ struct mount *parent;
+ struct mountpoint *mp, *old_mp;
int err;
bool attached;
@@ -2588,7 +2580,9 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
old = real_mount(old_path->mnt);
p = real_mount(new_path->mnt);
+ parent = old->mnt_parent;
attached = mnt_has_parent(old);
+ old_mp = old->mnt_mp;
ns = old->mnt_ns;
err = -EINVAL;
@@ -2616,7 +2610,7 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
/*
* Don't move a mount residing in a shared parent.
*/
- if (attached && IS_MNT_SHARED(old->mnt_parent))
+ if (attached && IS_MNT_SHARED(parent))
goto out;
/*
* Don't move a mount tree containing unbindable mounts to a destination
@@ -2632,18 +2626,21 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
goto out;
err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
- attached ? &parent_path : NULL);
+ attached);
if (err)
goto out;
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
+ if (attached)
+ put_mountpoint(old_mp);
out:
unlock_mount(mp);
if (!err) {
- path_put(&parent_path);
- if (!attached)
+ if (attached)
+ mntput_no_expire(parent);
+ else
free_mnt_ns(ns);
}
return err;
@@ -2788,6 +2785,8 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
err = vfs_parse_fs_string(fc, "source", name, strlen(name));
if (!err)
err = parse_monolithic_mount_data(fc, data);
+ if (!err && !mount_capable(fc))
+ err = -EPERM;
if (!err)
err = vfs_get_tree(fc);
if (!err)
@@ -3295,8 +3294,8 @@ struct dentry *mount_subtree(struct vfsmount *m, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
-int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
- unsigned long flags, void __user *data)
+int ksys_mount(const char __user *dev_name, const char __user *dir_name,
+ const char __user *type, unsigned long flags, void __user *data)
{
int ret;
char *kernel_type;
@@ -3586,8 +3585,8 @@ EXPORT_SYMBOL(path_is_under);
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
- struct path new, old, parent_path, root_parent, root;
- struct mount *new_mnt, *root_mnt, *old_mnt;
+ struct path new, old, root;
+ struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
struct mountpoint *old_mp, *root_mp;
int error;
@@ -3616,9 +3615,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
new_mnt = real_mount(new.mnt);
root_mnt = real_mount(root.mnt);
old_mnt = real_mount(old.mnt);
+ ex_parent = new_mnt->mnt_parent;
+ root_parent = root_mnt->mnt_parent;
if (IS_MNT_SHARED(old_mnt) ||
- IS_MNT_SHARED(new_mnt->mnt_parent) ||
- IS_MNT_SHARED(root_mnt->mnt_parent))
+ IS_MNT_SHARED(ex_parent) ||
+ IS_MNT_SHARED(root_parent))
goto out4;
if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
goto out4;
@@ -3635,7 +3636,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
goto out4; /* not a mountpoint */
if (!mnt_has_parent(root_mnt))
goto out4; /* not attached */
- root_mp = root_mnt->mnt_mp;
if (new.mnt->mnt_root != new.dentry)
goto out4; /* not a mountpoint */
if (!mnt_has_parent(new_mnt))
@@ -3646,10 +3646,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
goto out4;
- root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
- detach_mnt(new_mnt, &parent_path);
- detach_mnt(root_mnt, &root_parent);
+ umount_mnt(new_mnt);
+ root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
new_mnt->mnt.mnt_flags |= MNT_LOCKED;
root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
@@ -3657,7 +3656,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* mount old root on put_old */
attach_mnt(root_mnt, old_mnt, old_mp);
/* mount new_root on / */
- attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
+ attach_mnt(new_mnt, root_parent, root_mp);
+ mnt_add_count(root_parent, -1);
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
@@ -3667,10 +3667,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = 0;
out4:
unlock_mount(old_mp);
- if (!error) {
- path_put(&root_parent);
- path_put(&parent_path);
- }
+ if (!error)
+ mntput_no_expire(ex_parent);
out3:
path_put(&root);
out2:
@@ -3687,13 +3685,8 @@ static void __init init_mount_tree(void)
struct mount *m;
struct mnt_namespace *ns;
struct path root;
- struct file_system_type *type;
- type = get_fs_type("rootfs");
- if (!type)
- panic("Can't find rootfs type");
- mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
- put_filesystem(type);
+ mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
if (IS_ERR(mnt))
panic("Can't create rootfs");
@@ -3746,6 +3739,7 @@ void __init mnt_init(void)
fs_kobj = kobject_create_and_add("fs", NULL);
if (!fs_kobj)
printk(KERN_WARNING "%s: kobj create error\n", __func__);
+ shmem_init();
init_rootfs();
init_mount_tree();
}
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index c587e3c4c6a6..34cdeaecccf6 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
CFLAGS_nfstrace.o += -I$(src)
nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
io.o direct.o pagelist.o read.o symlink.o unlink.o \
- write.o namespace.o mount_clnt.o nfstrace.o export.o
+ write.o namespace.o mount_clnt.o nfstrace.o \
+ export.o sysfs.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 315967354954..f39924ba050b 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -414,27 +414,39 @@ static __be32
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
const struct cb_sequenceargs * args)
{
+ __be32 ret;
+
+ ret = cpu_to_be32(NFS4ERR_BADSLOT);
if (args->csa_slotid > tbl->server_highest_slotid)
- return htonl(NFS4ERR_BADSLOT);
+ goto out_err;
/* Replay */
if (args->csa_sequenceid == slot->seq_nr) {
+ ret = cpu_to_be32(NFS4ERR_DELAY);
if (nfs4_test_locked_slot(tbl, slot->slot_nr))
- return htonl(NFS4ERR_DELAY);
+ goto out_err;
+
/* Signal process_op to set this error on next op */
+ ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
if (args->csa_cachethis == 0)
- return htonl(NFS4ERR_RETRY_UNCACHED_REP);
+ goto out_err;
/* Liar! We never allowed you to set csa_cachethis != 0 */
- return htonl(NFS4ERR_SEQ_FALSE_RETRY);
+ ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
+ goto out_err;
}
/* Note: wraparound relies on seq_nr being of type u32 */
- if (likely(args->csa_sequenceid == slot->seq_nr + 1))
- return htonl(NFS4_OK);
-
/* Misordered request */
- return htonl(NFS4ERR_SEQ_MISORDERED);
+ ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
+ if (args->csa_sequenceid != slot->seq_nr + 1)
+ goto out_err;
+
+ return cpu_to_be32(NFS4_OK);
+
+out_err:
+ trace_nfs4_cb_seqid_err(args, ret);
+ return ret;
}
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d7e4f0848e28..30838304a0bf 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -49,6 +49,7 @@
#include "pnfs.h"
#include "nfs.h"
#include "netns.h"
+#include "sysfs.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -175,6 +176,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_proto = cl_init->proto;
+ clp->cl_nconnect = cl_init->nconnect;
clp->cl_net = get_net(cl_init->net);
clp->cl_principal = "*";
@@ -192,7 +194,7 @@ error_0:
EXPORT_SYMBOL_GPL(nfs_alloc_client);
#if IS_ENABLED(CONFIG_NFS_V4)
-void nfs_cleanup_cb_ident_idr(struct net *net)
+static void nfs_cleanup_cb_ident_idr(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
@@ -214,7 +216,7 @@ static void pnfs_init_server(struct nfs_server *server)
}
#else
-void nfs_cleanup_cb_ident_idr(struct net *net)
+static void nfs_cleanup_cb_ident_idr(struct net *net)
{
}
@@ -406,10 +408,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
clp = nfs_match_client(cl_init);
if (clp) {
spin_unlock(&nn->nfs_client_lock);
- if (IS_ERR(clp))
- return clp;
if (new)
new->rpc_ops->free_client(new);
+ if (IS_ERR(clp))
+ return clp;
return nfs_found_client(cl_init, clp);
}
if (new) {
@@ -493,6 +495,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
struct rpc_create_args args = {
.net = clp->cl_net,
.protocol = clp->cl_proto,
+ .nconnect = clp->cl_nconnect,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
.timeout = cl_init->timeparms,
@@ -658,6 +661,7 @@ static int nfs_init_server(struct nfs_server *server,
.net = data->net,
.timeparms = &timeparms,
.cred = server->cred,
+ .nconnect = data->nfs_server.nconnect,
};
struct nfs_client *clp;
int error;
@@ -1072,6 +1076,18 @@ void nfs_clients_init(struct net *net)
#endif
spin_lock_init(&nn->nfs_client_lock);
nn->boot_time = ktime_get_real();
+
+ nfs_netns_sysfs_setup(nn, net);
+}
+
+void nfs_clients_exit(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ nfs_netns_sysfs_destroy(nn);
+ nfs_cleanup_cb_ident_idr(net);
+ WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
+ WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
}
#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 57b6a45576ad..8d501093660f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -80,6 +80,10 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
ctx->dup_cookie = 0;
ctx->cred = get_cred(cred);
spin_lock(&dir->i_lock);
+ if (list_empty(&nfsi->open_files) &&
+ (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA |
+ NFS_INO_REVAL_FORCED;
list_add(&ctx->list, &nfsi->open_files);
spin_unlock(&dir->i_lock);
return ctx;
@@ -140,19 +144,12 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};
-struct readdirvec {
- unsigned long nr;
- unsigned long index;
- struct page *pages[NFS_MAX_READDIR_RAPAGES];
-};
-
typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
typedef struct {
struct file *file;
struct page *page;
struct dir_context *ctx;
unsigned long page_index;
- struct readdirvec pvec;
u64 *dir_cookie;
u64 last_cookie;
loff_t current_index;
@@ -532,10 +529,6 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
struct nfs_cache_array *array;
unsigned int count = 0;
int status;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;
-
- desc->pvec.index = desc->page_index;
- desc->pvec.nr = 0;
scratch = alloc_page(GFP_KERNEL);
if (scratch == NULL)
@@ -560,40 +553,20 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
if (desc->plus)
nfs_prime_dcache(file_dentry(desc->file), entry);
- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
- if (status == -ENOSPC) {
- desc->pvec.nr++;
- if (desc->pvec.nr == max_rapages)
- break;
- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
- }
+ status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
break;
} while (!entry->eof);
- /*
- * page and desc->pvec.pages[0] are valid, don't need to check
- * whether or not to be NULL.
- */
- copy_highpage(page, desc->pvec.pages[0]);
-
out_nopages:
if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
- array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]);
+ array = kmap(page);
array->eof_index = array->size;
status = 0;
- kunmap_atomic(array);
+ kunmap(page);
}
put_page(scratch);
-
- /*
- * desc->pvec.nr > 0 means at least one page was completely filled,
- * we should return -ENOSPC. Otherwise function
- * nfs_readdir_xdr_to_array will enter infinite loop.
- */
- if (desc->pvec.nr > 0)
- return -ENOSPC;
return status;
}
@@ -627,24 +600,6 @@ out_freepages:
return -ENOMEM;
}
-/*
- * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure.
- */
-static
-void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc)
-{
- struct nfs_cache_array *array;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;
- int index;
-
- for (index = 0; index < max_rapages; index++) {
- array = kmap_atomic(desc->pvec.pages[index]);
- memset(array, 0, sizeof(struct nfs_cache_array));
- array->eof_index = -1;
- kunmap_atomic(array);
- }
-}
-
static
int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
{
@@ -655,12 +610,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
int status = -ENOMEM;
unsigned int array_size = ARRAY_SIZE(pages);
- /*
- * This means we hit readdir rdpages miss, the preallocated rdpages
- * are useless, the preallocate rdpages should be reinitialized.
- */
- nfs_readdir_rapages_init(desc);
-
entry.prev_cookie = 0;
entry.cookie = desc->last_cookie;
entry.eof = 0;
@@ -721,24 +670,9 @@ int nfs_readdir_filler(void *data, struct page* page)
struct inode *inode = file_inode(desc->file);
int ret;
- /*
- * If desc->page_index in range desc->pvec.index and
- * desc->pvec.index + desc->pvec.nr, we get readdir cache hit.
- */
- if (desc->page_index >= desc->pvec.index &&
- desc->page_index < (desc->pvec.index + desc->pvec.nr)) {
- /*
- * page and desc->pvec.pages[x] are valid, don't need to check
- * whether or not to be NULL.
- */
- copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]);
- ret = 0;
- } else {
- ret = nfs_readdir_xdr_to_array(desc, page, inode);
- if (ret < 0)
- goto error;
- }
-
+ ret = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (ret < 0)
+ goto error;
SetPageUptodate(page);
if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
@@ -903,7 +837,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
*desc = &my_desc;
struct nfs_open_dir_context *dir_ctx = file->private_data;
int res = 0;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
file, (long long)ctx->pos);
@@ -923,12 +856,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = nfs_use_readdirplus(inode, ctx);
- res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages);
- if (res < 0)
- return -ENOMEM;
-
- nfs_readdir_rapages_init(desc);
-
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
if (res < 0)
@@ -964,7 +891,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
} while (!desc->eof);
out:
- nfs_readdir_free_pages(desc->pvec.pages, max_rapages);
if (res > 0)
res = 0;
dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index bcff3bf5ae09..b04e20d28162 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -934,6 +934,10 @@ out_nolseg:
if (pgio->pg_error < 0)
return;
out_mds:
+ trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
+ 0, NFS4_MAX_UINT64, IOMODE_READ,
+ NFS_I(pgio->pg_inode)->layout,
+ pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_read_mds(pgio);
@@ -1000,6 +1004,10 @@ retry:
return;
out_mds:
+ trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
+ 0, NFS4_MAX_UINT64, IOMODE_RW,
+ NFS_I(pgio->pg_inode)->layout,
+ pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_write_mds(pgio);
@@ -1026,6 +1034,10 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
if (pgio->pg_lseg)
return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
+ trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
+ 0, NFS4_MAX_UINT64, IOMODE_RW,
+ NFS_I(pgio->pg_inode)->layout,
+ pgio->pg_lseg);
/* no lseg means that pnfs is not in use, so no mirroring here */
nfs_pageio_reset_write_mds(pgio);
out:
@@ -1075,6 +1087,10 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ trace_pnfs_mds_fallback_write_done(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+ IOMODE_RW, NFS_I(hdr->inode)->layout,
+ hdr->lseg);
task->tk_status = pnfs_write_done_resend_to_mds(hdr);
}
}
@@ -1094,6 +1110,10 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ trace_pnfs_mds_fallback_read_done(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+ IOMODE_READ, NFS_I(hdr->inode)->layout,
+ hdr->lseg);
task->tk_status = pnfs_read_done_resend_to_mds(hdr);
}
}
@@ -1827,6 +1847,9 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
out_failed:
if (ff_layout_avoid_mds_available_ds(lseg))
return PNFS_TRY_AGAIN;
+ trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+ IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
return PNFS_NOT_ATTEMPTED;
}
@@ -1892,6 +1915,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
out_failed:
if (ff_layout_avoid_mds_available_ds(lseg))
return PNFS_TRY_AGAIN;
+ trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+ IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
return PNFS_NOT_ATTEMPTED;
}
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 19f856f45689..3eda40a320a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -257,7 +257,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
if (status == 0)
return 0;
- if (mirror->mirror_ds == NULL)
+ if (IS_ERR_OR_NULL(mirror->mirror_ds))
return -EINVAL;
dserr = kmalloc(sizeof(*dserr), gfp_flags);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 0b4a1a974411..8a1758200b57 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -51,6 +51,7 @@
#include "pnfs.h"
#include "nfs.h"
#include "netns.h"
+#include "sysfs.h"
#include "nfstrace.h"
@@ -208,7 +209,7 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
}
if (inode->i_mapping->nrpages == 0)
- flags &= ~NFS_INO_INVALID_DATA;
+ flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
nfsi->cache_validity |= flags;
if (flags & NFS_INO_INVALID_DATA)
nfs_fscache_invalidate(inode);
@@ -652,7 +653,8 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
i_size_write(inode, offset);
/* Optimisation */
if (offset == 0)
- NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
+ NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_DATA |
+ NFS_INO_DATA_INVAL_DEFER);
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
spin_unlock(&inode->i_lock);
@@ -1032,6 +1034,10 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
+ if (list_empty(&nfsi->open_files) &&
+ (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA |
+ NFS_INO_REVAL_FORCED;
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
@@ -1100,6 +1106,7 @@ int nfs_open(struct inode *inode, struct file *filp)
nfs_fscache_open_file(inode, filp);
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_open);
/*
* This function is called whenever some part of NFS notices that
@@ -1312,7 +1319,8 @@ int nfs_revalidate_mapping(struct inode *inode,
set_bit(NFS_INO_INVALIDATING, bitlock);
smp_wmb();
- nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_DATA|
+ NFS_INO_DATA_INVAL_DEFER);
spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode);
ret = nfs_invalidate_mapping(inode, mapping);
@@ -1870,7 +1878,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id,
inode->i_ino);
- }
+ } else if (!have_delegation)
+ nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
inode_set_iversion_raw(inode, fattr->change_attr);
attr_changed = true;
}
@@ -2159,12 +2168,8 @@ static int nfs_net_init(struct net *net)
static void nfs_net_exit(struct net *net)
{
- struct nfs_net *nn = net_generic(net, nfs_net_id);
-
nfs_fs_proc_net_exit(net);
- nfs_cleanup_cb_ident_idr(net);
- WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
- WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
+ nfs_clients_exit(net);
}
static struct pernet_operations nfs_net_ops = {
@@ -2181,6 +2186,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_sysfs_init();
+ if (err < 0)
+ goto out10;
+
err = register_pernet_subsys(&nfs_net_ops);
if (err < 0)
goto out9;
@@ -2244,6 +2253,8 @@ out7:
out8:
unregister_pernet_subsys(&nfs_net_ops);
out9:
+ nfs_sysfs_exit();
+out10:
return err;
}
@@ -2260,6 +2271,7 @@ static void __exit exit_nfs_fs(void)
unregister_nfs_fs();
nfs_fs_proc_exit();
nfsiod_stop();
+ nfs_sysfs_exit();
}
/* Not quite true; I just maintain it */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 498fab72f70b..a2346a2f8361 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -69,8 +69,7 @@ struct nfs_clone_mount {
* Maximum number of pages that readdir can use for creating
* a vmapped array of pages.
*/
-#define NFS_MAX_READDIR_PAGES 64
-#define NFS_MAX_READDIR_RAPAGES 8
+#define NFS_MAX_READDIR_PAGES 8
struct nfs_client_initdata {
unsigned long init_flags;
@@ -82,6 +81,7 @@ struct nfs_client_initdata {
struct nfs_subversion *nfs_mod;
int proto;
u32 minorversion;
+ unsigned int nconnect;
struct net *net;
const struct rpc_timeout *timeparms;
const struct cred *cred;
@@ -123,6 +123,7 @@ struct nfs_parsed_mount_data {
char *export_path;
int port;
unsigned short protocol;
+ unsigned short nconnect;
} nfs_server;
void *lsm_opts;
@@ -158,6 +159,7 @@ extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern const struct rpc_program nfs_program;
extern void nfs_clients_init(struct net *net);
+extern void nfs_clients_exit(struct net *net);
extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *);
@@ -170,7 +172,6 @@ int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
struct nfs_server *nfs_alloc_server(void);
void nfs_server_copy_userdata(struct nfs_server *, struct nfs_server *);
-extern void nfs_cleanup_cb_ident_idr(struct net *);
extern void nfs_put_client(struct nfs_client *);
extern void nfs_free_client(struct nfs_client *);
extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index fc9978c58265..c8374f74dce1 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -15,6 +15,8 @@ struct bl_dev_msg {
uint32_t major, minor;
};
+struct nfs_netns_client;
+
struct nfs_net {
struct cache_detail *nfs_dns_resolve;
struct rpc_pipe *bl_device_pipe;
@@ -29,6 +31,7 @@ struct nfs_net {
unsigned short nfs_callback_tcpport6;
int cb_users[NFS4_MAX_MINOR_VERSION + 1];
#endif
+ struct nfs_netns_client *nfs_client;
spinlock_t nfs_client_lock;
ktime_t boot_time;
#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 572794dab4b1..cbc17a203248 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -151,7 +151,7 @@ static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
return 0;
out_status:
*status = be32_to_cpup(p);
- trace_nfs_xdr_status((int)*status);
+ trace_nfs_xdr_status(xdr, (int)*status);
return 0;
}
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index fb0c425b5d45..148ceb74d27c 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -102,6 +102,9 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
+ if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP)
+ cl_init.nconnect = mds_clp->cl_nconnect;
+
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index abbbdde97e31..602767850b36 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -343,7 +343,7 @@ static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
return 0;
out_status:
*status = be32_to_cpup(p);
- trace_nfs_xdr_status((int)*status);
+ trace_nfs_xdr_status(xdr, (int)*status);
return 0;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8a38a254f516..d778dad9a75e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -312,12 +312,12 @@ extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_lock_context *l_ctx,
fmode_t fmode);
+extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
+ struct nfs_fsinfo *fsinfo);
#if defined(CONFIG_NFS_V4_1)
extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
extern int nfs4_proc_create_session(struct nfs_client *, const struct cred *);
extern int nfs4_proc_destroy_session(struct nfs4_session *, const struct cred *);
-extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
- struct nfs_fsinfo *fsinfo);
extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
bool sync);
extern int nfs4_detect_session_trunking(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 81b9b6d7927a..616393a01c06 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -859,7 +859,8 @@ static int nfs4_set_client(struct nfs_server *server,
const size_t addrlen,
const char *ip_addr,
int proto, const struct rpc_timeout *timeparms,
- u32 minorversion, struct net *net)
+ u32 minorversion, unsigned int nconnect,
+ struct net *net)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
@@ -875,6 +876,8 @@ static int nfs4_set_client(struct nfs_server *server,
};
struct nfs_client *clp;
+ if (minorversion > 0 && proto == XPRT_TRANSPORT_TCP)
+ cl_init.nconnect = nconnect;
if (server->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
if (server->options & NFS_OPTION_MIGRATION)
@@ -941,6 +944,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
+ if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP)
+ cl_init.nconnect = mds_clp->cl_nconnect;
+
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
@@ -1074,6 +1080,7 @@ static int nfs4_init_server(struct nfs_server *server,
data->nfs_server.protocol,
&timeparms,
data->minorversion,
+ data->nfs_server.nconnect,
data->net);
if (error < 0)
return error;
@@ -1163,6 +1170,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
XPRT_TRANSPORT_RDMA,
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
+ parent_client->cl_nconnect,
parent_client->cl_net);
if (!error)
goto init_server;
@@ -1176,6 +1184,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
XPRT_TRANSPORT_TCP,
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
+ parent_client->cl_nconnect,
parent_client->cl_net);
if (error < 0)
goto error;
@@ -1271,7 +1280,8 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
set_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
error = nfs4_set_client(server, hostname, sap, salen, buf,
clp->cl_proto, clnt->cl_timeout,
- clp->cl_minorversion, net);
+ clp->cl_minorversion,
+ clp->cl_nconnect, net);
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index f4157eb1f69d..96db471ca2e5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
return err;
if ((openflags & O_ACCMODE) == 3)
- openflags--;
+ return nfs_open(inode, filp);
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
@@ -204,7 +204,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
bool same_inode = false;
int ret;
- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ /* NFS does not support deduplication. */
+ if (remap_flags & REMAP_FILE_DEDUP)
+ return -EOPNOTSUPP;
+
+ if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL;
/* check alignment w.r.t. clone_blksize */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6418cb6c079b..39896afc6edf 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -428,6 +428,22 @@ static int nfs4_delay(long *timeout, bool interruptible)
return nfs4_delay_killable(timeout);
}
+static const nfs4_stateid *
+nfs4_recoverable_stateid(const nfs4_stateid *stateid)
+{
+ if (!stateid)
+ return NULL;
+ switch (stateid->type) {
+ case NFS4_OPEN_STATEID_TYPE:
+ case NFS4_LOCK_STATEID_TYPE:
+ case NFS4_DELEGATION_STATEID_TYPE:
+ return stateid;
+ default:
+ break;
+ }
+ return NULL;
+}
+
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
@@ -436,7 +452,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
- const nfs4_stateid *stateid = exception->stateid;
+ const nfs4_stateid *stateid;
struct inode *inode = exception->inode;
int ret = errorcode;
@@ -444,8 +460,9 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
exception->recovering = 0;
exception->retry = 0;
+ stateid = nfs4_recoverable_stateid(exception->stateid);
if (stateid == NULL && state != NULL)
- stateid = &state->stateid;
+ stateid = nfs4_recoverable_stateid(&state->stateid);
switch(errorcode) {
case 0:
@@ -1165,6 +1182,18 @@ static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
return true;
}
+static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
+{
+ return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
+}
+
+static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
+{
+ fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
+
+ return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
+}
+
static u32
nfs4_map_atomic_open_share(struct nfs_server *server,
fmode_t fmode, int openflags)
@@ -2900,14 +2929,13 @@ static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
}
static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
- fmode_t fmode,
- int flags,
- struct nfs_open_context *ctx)
+ int flags, struct nfs_open_context *ctx)
{
struct nfs4_state_owner *sp = opendata->owner;
struct nfs_server *server = sp->so_server;
struct dentry *dentry;
struct nfs4_state *state;
+ fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
unsigned int seq;
int ret;
@@ -2946,7 +2974,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
/* Parse layoutget results before we check for access */
pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
- ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
+ ret = nfs4_opendata_access(sp->so_cred, opendata, state,
+ acc_mode, flags);
if (ret != 0)
goto out;
@@ -2978,7 +3007,7 @@ static int _nfs4_do_open(struct inode *dir,
struct dentry *dentry = ctx->dentry;
const struct cred *cred = ctx->cred;
struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
- fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
+ fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
struct iattr *sattr = c->sattr;
struct nfs4_label *label = c->label;
@@ -3024,7 +3053,7 @@ static int _nfs4_do_open(struct inode *dir,
if (d_really_is_positive(dentry))
opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
- status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
+ status = _nfs4_open_and_get_state(opendata, flags, ctx);
if (status != 0)
goto err_free_label;
state = ctx->state;
@@ -3594,9 +3623,9 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
if (ctx->state == NULL)
return;
if (is_sync)
- nfs4_close_sync(ctx->state, ctx->mode);
+ nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
else
- nfs4_close_state(ctx->state, ctx->mode);
+ nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
}
#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
@@ -5980,7 +6009,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.rpc_message = &msg,
.callback_ops = &nfs4_setclientid_ops,
.callback_data = &setclientid,
- .flags = RPC_TASK_TIMEOUT,
+ .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
};
int status;
@@ -6046,7 +6075,8 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_clientid);
- status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ status = rpc_call_sync(clp->cl_rpcclient, &msg,
+ RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
trace_nfs4_setclientid_confirm(clp, status);
dprintk("NFS reply setclientid_confirm: %d\n", status);
return status;
@@ -7627,7 +7657,7 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
- &res.seq_res, 0);
+ &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
dprintk("NFS reply secinfo: %d\n", status);
put_cred(cred);
@@ -7965,7 +7995,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
.rpc_client = clp->cl_rpcclient,
.callback_ops = &nfs4_exchange_id_call_ops,
.rpc_message = &msg,
- .flags = RPC_TASK_TIMEOUT,
+ .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
};
struct nfs41_exchange_id_data *calldata;
int status;
@@ -8190,7 +8220,8 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
};
int status;
- status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ status = rpc_call_sync(clp->cl_rpcclient, &msg,
+ RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
trace_nfs4_destroy_clientid(clp, status);
if (status)
dprintk("NFS: Got error %d from the server %s on "
@@ -8241,6 +8272,8 @@ out:
return ret;
}
+#endif /* CONFIG_NFS_V4_1 */
+
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
@@ -8273,7 +8306,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
- if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
+ if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
@@ -8331,6 +8364,8 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
return status;
}
+#ifdef CONFIG_NFS_V4_1
+
/*
* Initialize the values to be used by the client in CREATE_SESSION
* If nfs4_init_session set the fore channel request and response sizes,
@@ -8345,6 +8380,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
{
unsigned int max_rqst_sz, max_resp_sz;
unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
+ unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
@@ -8367,6 +8403,8 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
+ if (args->bc_attrs.max_reqs > max_bc_slots)
+ args->bc_attrs.max_reqs = max_bc_slots;
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
@@ -8469,7 +8507,8 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
- status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
+ RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
trace_nfs4_create_session(clp, status);
switch (status) {
@@ -8545,7 +8584,8 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
return 0;
- status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
+ RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
trace_nfs4_destroy_session(session->clp, status);
if (status)
@@ -8799,7 +8839,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_NO_ROUND_ROBIN,
};
int status = -ENOMEM;
@@ -9318,7 +9358,7 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, 0);
+ &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
dprintk("<-- %s status=%d\n", __func__, status);
put_cred(cred);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e2e3c4f04d3e..9afd051a4876 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -87,6 +87,27 @@ const nfs4_stateid current_stateid = {
static DEFINE_MUTEX(nfs_clid_init_mutex);
+static int nfs4_setup_state_renewal(struct nfs_client *clp)
+{
+ int status;
+ struct nfs_fsinfo fsinfo;
+ unsigned long now;
+
+ if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
+ nfs4_schedule_state_renewal(clp);
+ return 0;
+ }
+
+ now = jiffies;
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
+ nfs4_schedule_state_renewal(clp);
+ }
+
+ return status;
+}
+
int nfs4_init_clientid(struct nfs_client *clp, const struct cred *cred)
{
struct nfs4_setclientid_res clid = {
@@ -114,7 +135,7 @@ do_confirm:
if (status != 0)
goto out;
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
- nfs4_schedule_state_renewal(clp);
+ nfs4_setup_state_renewal(clp);
out:
return status;
}
@@ -286,34 +307,13 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
#if defined(CONFIG_NFS_V4_1)
-static int nfs41_setup_state_renewal(struct nfs_client *clp)
-{
- int status;
- struct nfs_fsinfo fsinfo;
- unsigned long now;
-
- if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
- nfs4_schedule_state_renewal(clp);
- return 0;
- }
-
- now = jiffies;
- status = nfs4_proc_get_lease_time(clp, &fsinfo);
- if (status == 0) {
- nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
- nfs4_schedule_state_renewal(clp);
- }
-
- return status;
-}
-
static void nfs41_finish_session_reset(struct nfs_client *clp)
{
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* create_session negotiated new slot table */
clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
- nfs41_setup_state_renewal(clp);
+ nfs4_setup_state_renewal(clp);
}
int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
@@ -1064,8 +1064,7 @@ int nfs4_select_rw_stateid(struct nfs4_state *state,
* choose to use.
*/
goto out;
- nfs4_copy_open_stateid(dst, state);
- ret = 0;
+ ret = nfs4_copy_open_stateid(dst, state) ? 0 : -EAGAIN;
out:
if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
dst->seqid = 0;
diff --git a/fs/nfs/nfs4trace.c b/fs/nfs/nfs4trace.c
index e9fb3e50a999..1a8f376b3f73 100644
--- a/fs/nfs/nfs4trace.c
+++ b/fs/nfs/nfs4trace.c
@@ -16,4 +16,12 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_commit_ds);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_pg_init_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_pg_init_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_pg_get_mirror_count);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist);
#endif
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index cd1a5c08da9a..b2f395fa7350 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -156,7 +156,7 @@ TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
#define show_nfsv4_errors(error) \
- __print_symbolic(-(error), \
+ __print_symbolic(error, \
{ NFS4_OK, "OK" }, \
/* Mapped by nfs4_stat_to_errno() */ \
{ EPERM, "EPERM" }, \
@@ -348,7 +348,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_STRUCT__entry(
__string(dstaddr, clp->cl_hostname)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -357,8 +357,8 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
),
TP_printk(
- "error=%d (%s) dstaddr=%s",
- __entry->error,
+ "error=%ld (%s) dstaddr=%s",
+ -__entry->error,
show_nfsv4_errors(__entry->error),
__get_str(dstaddr)
)
@@ -420,7 +420,7 @@ TRACE_EVENT(nfs4_sequence_done,
__field(unsigned int, highest_slotid)
__field(unsigned int, target_highest_slotid)
__field(unsigned int, status_flags)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -435,10 +435,10 @@ TRACE_EVENT(nfs4_sequence_done,
__entry->error = res->sr_status;
),
TP_printk(
- "error=%d (%s) session=0x%08x slot_nr=%u seq_nr=%u "
+ "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
"highest_slotid=%u target_highest_slotid=%u "
"status_flags=%u (%s)",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
__entry->session,
__entry->slot_nr,
@@ -467,7 +467,7 @@ TRACE_EVENT(nfs4_cb_sequence,
__field(unsigned int, seq_nr)
__field(unsigned int, highest_slotid)
__field(unsigned int, cachethis)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -476,13 +476,13 @@ TRACE_EVENT(nfs4_cb_sequence,
__entry->seq_nr = args->csa_sequenceid;
__entry->highest_slotid = args->csa_highestslotid;
__entry->cachethis = args->csa_cachethis;
- __entry->error = -be32_to_cpu(status);
+ __entry->error = be32_to_cpu(status);
),
TP_printk(
- "error=%d (%s) session=0x%08x slot_nr=%u seq_nr=%u "
+ "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
"highest_slotid=%u",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
__entry->session,
__entry->slot_nr,
@@ -490,6 +490,44 @@ TRACE_EVENT(nfs4_cb_sequence,
__entry->highest_slotid
)
);
+
+TRACE_EVENT(nfs4_cb_seqid_err,
+ TP_PROTO(
+ const struct cb_sequenceargs *args,
+ __be32 status
+ ),
+ TP_ARGS(args, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, session)
+ __field(unsigned int, slot_nr)
+ __field(unsigned int, seq_nr)
+ __field(unsigned int, highest_slotid)
+ __field(unsigned int, cachethis)
+ __field(unsigned long, error)
+ ),
+
+ TP_fast_assign(
+ __entry->session = nfs_session_id_hash(&args->csa_sessionid);
+ __entry->slot_nr = args->csa_slotid;
+ __entry->seq_nr = args->csa_sequenceid;
+ __entry->highest_slotid = args->csa_highestslotid;
+ __entry->cachethis = args->csa_cachethis;
+ __entry->error = be32_to_cpu(status);
+ ),
+
+ TP_printk(
+ "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
+ "highest_slotid=%u",
+ -__entry->error,
+ show_nfsv4_errors(__entry->error),
+ __entry->session,
+ __entry->slot_nr,
+ __entry->seq_nr,
+ __entry->highest_slotid
+ )
+);
+
#endif /* CONFIG_NFS_V4_1 */
TRACE_EVENT(nfs4_setup_sequence,
@@ -526,26 +564,37 @@ TRACE_EVENT(nfs4_setup_sequence,
TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
+ const struct xdr_stream *xdr,
u32 op,
int error
),
- TP_ARGS(op, error),
+ TP_ARGS(xdr, op, error),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
__field(u32, op)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
+ const struct rpc_rqst *rqstp = xdr->rqst;
+ const struct rpc_task *task = rqstp->rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->op = op;
- __entry->error = -error;
+ __entry->error = error;
),
TP_printk(
- "operation %d: nfs status %d (%s)",
- __entry->op,
- __entry->error, show_nfsv4_errors(__entry->error)
+ "task:%u@%d xid=0x%08x error=%ld (%s) operation=%u",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ -__entry->error, show_nfsv4_errors(__entry->error),
+ __entry->op
)
);
@@ -559,7 +608,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
TP_ARGS(ctx, flags, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(unsigned int, flags)
__field(unsigned int, fmode)
__field(dev_t, dev)
@@ -577,7 +626,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
const struct nfs4_state *state = ctx->state;
const struct inode *inode = NULL;
- __entry->error = error;
+ __entry->error = -error;
__entry->flags = flags;
__entry->fmode = (__force unsigned int)ctx->mode;
__entry->dev = ctx->dentry->d_sb->s_dev;
@@ -609,11 +658,11 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
),
TP_printk(
- "error=%d (%s) flags=%d (%s) fmode=%s "
+ "error=%ld (%s) flags=%d (%s) fmode=%s "
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"name=%02x:%02x:%llu/%s stateid=%d:0x%08x "
"openstateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
__entry->flags,
show_open_flags(__entry->flags),
@@ -695,7 +744,7 @@ TRACE_EVENT(nfs4_close,
__field(u32, fhandle)
__field(u64, fileid)
__field(unsigned int, fmode)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@@ -715,9 +764,9 @@ TRACE_EVENT(nfs4_close,
),
TP_printk(
- "error=%d (%s) fmode=%s fileid=%02x:%02x:%llu "
+ "error=%ld (%s) fmode=%s fileid=%02x:%02x:%llu "
"fhandle=0x%08x openstateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
__entry->fmode ? show_fmode_flags(__entry->fmode) :
"closed",
@@ -757,7 +806,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
TP_ARGS(request, state, cmd, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(int, cmd)
__field(char, type)
__field(loff_t, start)
@@ -787,10 +836,10 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
),
TP_printk(
- "error=%d (%s) cmd=%s:%s range=%lld:%lld "
+ "error=%ld (%s) cmd=%s:%s range=%lld:%lld "
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
show_lock_cmd(__entry->cmd),
show_lock_type(__entry->type),
@@ -827,7 +876,7 @@ TRACE_EVENT(nfs4_set_lock,
TP_ARGS(request, state, lockstateid, cmd, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(int, cmd)
__field(char, type)
__field(loff_t, start)
@@ -863,10 +912,10 @@ TRACE_EVENT(nfs4_set_lock,
),
TP_printk(
- "error=%d (%s) cmd=%s:%s range=%lld:%lld "
+ "error=%ld (%s) cmd=%s:%s range=%lld:%lld "
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x lockstateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
show_lock_cmd(__entry->cmd),
show_lock_type(__entry->type),
@@ -932,7 +981,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u32, fhandle)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@@ -948,9 +997,9 @@ TRACE_EVENT(nfs4_delegreturn_exit,
),
TP_printk(
- "error=%d (%s) dev=%02x:%02x fhandle=0x%08x "
+ "error=%ld (%s) dev=%02x:%02x fhandle=0x%08x "
"stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->fhandle,
@@ -969,7 +1018,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
TP_ARGS(state, lsp, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -991,9 +1040,9 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1026,7 +1075,7 @@ DECLARE_EVENT_CLASS(nfs4_lookup_event,
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(int, error)
+ __field(unsigned long, error)
__field(u64, dir)
__string(name, name->name)
),
@@ -1034,13 +1083,13 @@ DECLARE_EVENT_CLASS(nfs4_lookup_event,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
- __entry->error = error;
+ __entry->error = -error;
__assign_str(name, name->name);
),
TP_printk(
- "error=%d (%s) name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) name=%02x:%02x:%llu/%s",
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir,
@@ -1076,7 +1125,7 @@ TRACE_EVENT(nfs4_lookupp,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u64, ino)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -1086,8 +1135,8 @@ TRACE_EVENT(nfs4_lookupp,
),
TP_printk(
- "error=%d (%s) inode=%02x:%02x:%llu",
- __entry->error,
+ "error=%ld (%s) inode=%02x:%02x:%llu",
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->ino
@@ -1107,7 +1156,7 @@ TRACE_EVENT(nfs4_rename,
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(int, error)
+ __field(unsigned long, error)
__field(u64, olddir)
__string(oldname, oldname->name)
__field(u64, newdir)
@@ -1124,9 +1173,9 @@ TRACE_EVENT(nfs4_rename,
),
TP_printk(
- "error=%d (%s) oldname=%02x:%02x:%llu/%s "
+ "error=%ld (%s) oldname=%02x:%02x:%llu/%s "
"newname=%02x:%02x:%llu/%s",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->olddir,
@@ -1149,19 +1198,19 @@ DECLARE_EVENT_CLASS(nfs4_inode_event,
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x",
- __entry->error,
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x",
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1200,7 +1249,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@@ -1217,9 +1266,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1257,7 +1306,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
__field(u32, fhandle)
__field(u64, fileid)
__field(unsigned int, valid)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -1269,9 +1318,9 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"valid=%s",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1304,7 +1353,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
TP_ARGS(clp, fhandle, inode, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -1325,9 +1374,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"dstaddr=%s",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1359,7 +1408,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
TP_ARGS(clp, fhandle, inode, stateid, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -1386,9 +1435,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x dstaddr=%s",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1422,7 +1471,7 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
TP_ARGS(name, len, id, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(u32, id)
__dynamic_array(char, name, len > 0 ? len + 1 : 1)
),
@@ -1437,8 +1486,8 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
),
TP_printk(
- "error=%d id=%u name=%s",
- __entry->error,
+ "error=%ld (%s) id=%u name=%s",
+ -__entry->error, show_nfsv4_errors(__entry->error),
__entry->id,
__get_str(name)
)
@@ -1471,7 +1520,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
__field(u64, fileid)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@@ -1485,7 +1534,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->offset = hdr->args.offset;
__entry->count = hdr->args.count;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash =
@@ -1493,9 +1542,9 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1531,7 +1580,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
__field(u64, fileid)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@@ -1545,7 +1594,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->offset = hdr->args.offset;
__entry->count = hdr->args.count;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash =
@@ -1553,9 +1602,9 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu stateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1592,7 +1641,7 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
__field(u64, fileid)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, error)
+ __field(unsigned long, error)
),
TP_fast_assign(
@@ -1606,9 +1655,9 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1656,7 +1705,7 @@ TRACE_EVENT(nfs4_layoutget,
__field(u32, iomode)
__field(u64, offset)
__field(u64, count)
- __field(int, error)
+ __field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
__field(int, layoutstateid_seq)
@@ -1689,10 +1738,10 @@ TRACE_EVENT(nfs4_layoutget,
),
TP_printk(
- "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"iomode=%s offset=%llu count=%llu stateid=%d:0x%08x "
"layoutstateid=%d:0x%08x",
- __entry->error,
+ -__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
@@ -1722,6 +1771,7 @@ TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_BLOCKED);
TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_INVALID_OPEN);
TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_RETRY);
TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_EXIT);
#define show_pnfs_update_layout_reason(reason) \
__print_symbolic(reason, \
@@ -1737,7 +1787,8 @@ TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
{ PNFS_UPDATE_LAYOUT_BLOCKED, "layouts blocked" }, \
{ PNFS_UPDATE_LAYOUT_INVALID_OPEN, "invalid open" }, \
{ PNFS_UPDATE_LAYOUT_RETRY, "retrying" }, \
- { PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, "sent layoutget" })
+ { PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, "sent layoutget" }, \
+ { PNFS_UPDATE_LAYOUT_EXIT, "exit" })
TRACE_EVENT(pnfs_update_layout,
TP_PROTO(struct inode *inode,
@@ -1796,6 +1847,78 @@ TRACE_EVENT(pnfs_update_layout,
)
);
+DECLARE_EVENT_CLASS(pnfs_layout_event,
+ TP_PROTO(struct inode *inode,
+ loff_t pos,
+ u64 count,
+ enum pnfs_iomode iomode,
+ struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg
+ ),
+ TP_ARGS(inode, pos, count, iomode, lo, lseg),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, fileid)
+ __field(u32, fhandle)
+ __field(loff_t, pos)
+ __field(u64, count)
+ __field(enum pnfs_iomode, iomode)
+ __field(int, layoutstateid_seq)
+ __field(u32, layoutstateid_hash)
+ __field(long, lseg)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __entry->pos = pos;
+ __entry->count = count;
+ __entry->iomode = iomode;
+ if (lo != NULL) {
+ __entry->layoutstateid_seq =
+ be32_to_cpu(lo->plh_stateid.seqid);
+ __entry->layoutstateid_hash =
+ nfs_stateid_hash(&lo->plh_stateid);
+ } else {
+ __entry->layoutstateid_seq = 0;
+ __entry->layoutstateid_hash = 0;
+ }
+ __entry->lseg = (long)lseg;
+ ),
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "iomode=%s pos=%llu count=%llu "
+ "layoutstateid=%d:0x%08x lseg=0x%lx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ show_pnfs_iomode(__entry->iomode),
+ (unsigned long long)__entry->pos,
+ (unsigned long long)__entry->count,
+ __entry->layoutstateid_seq, __entry->layoutstateid_hash,
+ __entry->lseg
+ )
+);
+
+#define DEFINE_PNFS_LAYOUT_EVENT(name) \
+ DEFINE_EVENT(pnfs_layout_event, name, \
+ TP_PROTO(struct inode *inode, \
+ loff_t pos, \
+ u64 count, \
+ enum pnfs_iomode iomode, \
+ struct pnfs_layout_hdr *lo, \
+ struct pnfs_layout_segment *lseg \
+ ), \
+ TP_ARGS(inode, pos, count, iomode, lo, lseg))
+
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_pg_init_read);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_pg_init_write);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_pg_get_mirror_count);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_read_done);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_done);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_read_pagelist);
+DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_pagelist);
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* _TRACE_NFS4_H */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 602446158bfb..46a8d636d151 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -837,6 +837,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define NFS4_dec_sequence_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz)
+#endif
#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
@@ -845,6 +846,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
+#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_reclaim_complete_maxsz)
@@ -2957,6 +2959,8 @@ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_nops(&hdr);
}
+#endif
+
/*
* a GET_LEASE_TIME request
*/
@@ -2977,6 +2981,8 @@ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
encode_nops(&hdr);
}
+#ifdef CONFIG_NFS_V4_1
+
/*
* a RECLAIM_COMPLETE request
*/
@@ -3187,7 +3193,7 @@ static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
return true;
out_status:
nfserr = be32_to_cpup(p);
- trace_nfs4_xdr_status(opnum, nfserr);
+ trace_nfs4_xdr_status(xdr, opnum, nfserr);
*nfs_retval = nfs4_stat_to_errno(nfserr);
return true;
out_bad_operation:
@@ -3427,7 +3433,7 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
- dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
+ dprintk("%s: lease time=%u\n", __func__, (unsigned int)*res);
return 0;
}
@@ -7122,6 +7128,8 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
return status;
}
+#endif
+
/*
* Decode GET_LEASE_TIME response
*/
@@ -7143,6 +7151,8 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
return status;
}
+#ifdef CONFIG_NFS_V4_1
+
/*
* Decode RECLAIM_COMPLETE response
*/
@@ -7551,7 +7561,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC41(CREATE_SESSION, enc_create_session, dec_create_session),
PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
PROC41(SEQUENCE, enc_sequence, dec_sequence),
- PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete),
PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
PROC41(LAYOUTGET, enc_layoutget, dec_layoutget),
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index a0d6910aa03a..976d4089e267 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -11,6 +11,16 @@
#include <linux/tracepoint.h>
#include <linux/iversion.h>
+TRACE_DEFINE_ENUM(DT_UNKNOWN);
+TRACE_DEFINE_ENUM(DT_FIFO);
+TRACE_DEFINE_ENUM(DT_CHR);
+TRACE_DEFINE_ENUM(DT_DIR);
+TRACE_DEFINE_ENUM(DT_BLK);
+TRACE_DEFINE_ENUM(DT_REG);
+TRACE_DEFINE_ENUM(DT_LNK);
+TRACE_DEFINE_ENUM(DT_SOCK);
+TRACE_DEFINE_ENUM(DT_WHT);
+
#define nfs_show_file_type(ftype) \
__print_symbolic(ftype, \
{ DT_UNKNOWN, "UNKNOWN" }, \
@@ -23,25 +33,57 @@
{ DT_SOCK, "SOCK" }, \
{ DT_WHT, "WHT" })
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_DATA);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_ATIME);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_ACCESS);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_ACL);
+TRACE_DEFINE_ENUM(NFS_INO_REVAL_PAGECACHE);
+TRACE_DEFINE_ENUM(NFS_INO_REVAL_FORCED);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_LABEL);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_CHANGE);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_CTIME);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_MTIME);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_SIZE);
+TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER);
+
#define nfs_show_cache_validity(v) \
__print_flags(v, "|", \
- { NFS_INO_INVALID_ATTR, "INVALID_ATTR" }, \
{ NFS_INO_INVALID_DATA, "INVALID_DATA" }, \
{ NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \
{ NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \
{ NFS_INO_INVALID_ACL, "INVALID_ACL" }, \
{ NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \
{ NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \
- { NFS_INO_INVALID_LABEL, "INVALID_LABEL" })
+ { NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \
+ { NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \
+ { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \
+ { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \
+ { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \
+ { NFS_INO_INVALID_OTHER, "INVALID_OTHER" })
+
+TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS);
+TRACE_DEFINE_ENUM(NFS_INO_STALE);
+TRACE_DEFINE_ENUM(NFS_INO_ACL_LRU_SET);
+TRACE_DEFINE_ENUM(NFS_INO_INVALIDATING);
+TRACE_DEFINE_ENUM(NFS_INO_FSCACHE);
+TRACE_DEFINE_ENUM(NFS_INO_FSCACHE_LOCK);
+TRACE_DEFINE_ENUM(NFS_INO_LAYOUTCOMMIT);
+TRACE_DEFINE_ENUM(NFS_INO_LAYOUTCOMMITTING);
+TRACE_DEFINE_ENUM(NFS_INO_LAYOUTSTATS);
+TRACE_DEFINE_ENUM(NFS_INO_ODIRECT);
#define nfs_show_nfsi_flags(v) \
__print_flags(v, "|", \
- { 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
- { 1 << NFS_INO_STALE, "STALE" }, \
- { 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
- { 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
- { 1 << NFS_INO_LAYOUTCOMMIT, "NEED_LAYOUTCOMMIT" }, \
- { 1 << NFS_INO_LAYOUTCOMMITTING, "LAYOUTCOMMIT" })
+ { BIT(NFS_INO_ADVISE_RDPLUS), "ADVISE_RDPLUS" }, \
+ { BIT(NFS_INO_STALE), "STALE" }, \
+ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \
+ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \
+ { BIT(NFS_INO_FSCACHE), "FSCACHE" }, \
+ { BIT(NFS_INO_FSCACHE_LOCK), "FSCACHE_LOCK" }, \
+ { BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \
+ { BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \
+ { BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
+ { BIT(NFS_INO_ODIRECT), "ODIRECT" })
DECLARE_EVENT_CLASS(nfs_inode_event,
TP_PROTO(
@@ -83,7 +125,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
TP_ARGS(inode, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(unsigned char, type)
@@ -96,7 +138,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
TP_fast_assign(
const struct nfs_inode *nfsi = NFS_I(inode);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
@@ -108,10 +150,10 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
),
TP_printk(
- "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"type=%u (%s) version=%llu size=%lld "
- "cache_validity=%lu (%s) nfs_flags=%ld (%s)",
- __entry->error,
+ "cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s)",
+ -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
@@ -158,13 +200,41 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_access_exit);
+TRACE_DEFINE_ENUM(LOOKUP_FOLLOW);
+TRACE_DEFINE_ENUM(LOOKUP_DIRECTORY);
+TRACE_DEFINE_ENUM(LOOKUP_AUTOMOUNT);
+TRACE_DEFINE_ENUM(LOOKUP_PARENT);
+TRACE_DEFINE_ENUM(LOOKUP_REVAL);
+TRACE_DEFINE_ENUM(LOOKUP_RCU);
+TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
+TRACE_DEFINE_ENUM(LOOKUP_NO_EVAL);
+TRACE_DEFINE_ENUM(LOOKUP_OPEN);
+TRACE_DEFINE_ENUM(LOOKUP_CREATE);
+TRACE_DEFINE_ENUM(LOOKUP_EXCL);
+TRACE_DEFINE_ENUM(LOOKUP_RENAME_TARGET);
+TRACE_DEFINE_ENUM(LOOKUP_JUMPED);
+TRACE_DEFINE_ENUM(LOOKUP_ROOT);
+TRACE_DEFINE_ENUM(LOOKUP_EMPTY);
+TRACE_DEFINE_ENUM(LOOKUP_DOWN);
+
#define show_lookup_flags(flags) \
- __print_flags((unsigned long)flags, "|", \
- { LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
+ __print_flags(flags, "|", \
+ { LOOKUP_FOLLOW, "FOLLOW" }, \
{ LOOKUP_DIRECTORY, "DIRECTORY" }, \
+ { LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
+ { LOOKUP_PARENT, "PARENT" }, \
+ { LOOKUP_REVAL, "REVAL" }, \
+ { LOOKUP_RCU, "RCU" }, \
+ { LOOKUP_NO_REVAL, "NO_REVAL" }, \
+ { LOOKUP_NO_EVAL, "NO_EVAL" }, \
{ LOOKUP_OPEN, "OPEN" }, \
{ LOOKUP_CREATE, "CREATE" }, \
- { LOOKUP_EXCL, "EXCL" })
+ { LOOKUP_EXCL, "EXCL" }, \
+ { LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
+ { LOOKUP_JUMPED, "JUMPED" }, \
+ { LOOKUP_ROOT, "ROOT" }, \
+ { LOOKUP_EMPTY, "EMPTY" }, \
+ { LOOKUP_DOWN, "DOWN" })
DECLARE_EVENT_CLASS(nfs_lookup_event,
TP_PROTO(
@@ -176,7 +246,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
TP_ARGS(dir, dentry, flags),
TP_STRUCT__entry(
- __field(unsigned int, flags)
+ __field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
__string(name, dentry->d_name.name)
@@ -190,7 +260,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
),
TP_printk(
- "flags=%u (%s) name=%02x:%02x:%llu/%s",
+ "flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->flags,
show_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -219,8 +289,8 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
TP_ARGS(dir, dentry, flags, error),
TP_STRUCT__entry(
- __field(int, error)
- __field(unsigned int, flags)
+ __field(unsigned long, error)
+ __field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
__string(name, dentry->d_name.name)
@@ -229,14 +299,14 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->flags = flags;
__assign_str(name, dentry->d_name.name);
),
TP_printk(
- "error=%d flags=%u (%s) name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
+ -__entry->error, nfs_show_status(__entry->error),
__entry->flags,
show_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -260,15 +330,43 @@ DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_exit);
DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter);
DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit);
+TRACE_DEFINE_ENUM(O_WRONLY);
+TRACE_DEFINE_ENUM(O_RDWR);
+TRACE_DEFINE_ENUM(O_CREAT);
+TRACE_DEFINE_ENUM(O_EXCL);
+TRACE_DEFINE_ENUM(O_NOCTTY);
+TRACE_DEFINE_ENUM(O_TRUNC);
+TRACE_DEFINE_ENUM(O_APPEND);
+TRACE_DEFINE_ENUM(O_NONBLOCK);
+TRACE_DEFINE_ENUM(O_DSYNC);
+TRACE_DEFINE_ENUM(O_DIRECT);
+TRACE_DEFINE_ENUM(O_LARGEFILE);
+TRACE_DEFINE_ENUM(O_DIRECTORY);
+TRACE_DEFINE_ENUM(O_NOFOLLOW);
+TRACE_DEFINE_ENUM(O_NOATIME);
+TRACE_DEFINE_ENUM(O_CLOEXEC);
+
#define show_open_flags(flags) \
- __print_flags((unsigned long)flags, "|", \
+ __print_flags(flags, "|", \
+ { O_WRONLY, "O_WRONLY" }, \
+ { O_RDWR, "O_RDWR" }, \
{ O_CREAT, "O_CREAT" }, \
{ O_EXCL, "O_EXCL" }, \
+ { O_NOCTTY, "O_NOCTTY" }, \
{ O_TRUNC, "O_TRUNC" }, \
{ O_APPEND, "O_APPEND" }, \
+ { O_NONBLOCK, "O_NONBLOCK" }, \
{ O_DSYNC, "O_DSYNC" }, \
{ O_DIRECT, "O_DIRECT" }, \
- { O_DIRECTORY, "O_DIRECTORY" })
+ { O_LARGEFILE, "O_LARGEFILE" }, \
+ { O_DIRECTORY, "O_DIRECTORY" }, \
+ { O_NOFOLLOW, "O_NOFOLLOW" }, \
+ { O_NOATIME, "O_NOATIME" }, \
+ { O_CLOEXEC, "O_CLOEXEC" })
+
+TRACE_DEFINE_ENUM(FMODE_READ);
+TRACE_DEFINE_ENUM(FMODE_WRITE);
+TRACE_DEFINE_ENUM(FMODE_EXEC);
#define show_fmode_flags(mode) \
__print_flags(mode, "|", \
@@ -286,7 +384,7 @@ TRACE_EVENT(nfs_atomic_open_enter,
TP_ARGS(dir, ctx, flags),
TP_STRUCT__entry(
- __field(unsigned int, flags)
+ __field(unsigned long, flags)
__field(unsigned int, fmode)
__field(dev_t, dev)
__field(u64, dir)
@@ -302,7 +400,7 @@ TRACE_EVENT(nfs_atomic_open_enter,
),
TP_printk(
- "flags=%u (%s) fmode=%s name=%02x:%02x:%llu/%s",
+ "flags=0x%lx (%s) fmode=%s name=%02x:%02x:%llu/%s",
__entry->flags,
show_open_flags(__entry->flags),
show_fmode_flags(__entry->fmode),
@@ -323,8 +421,8 @@ TRACE_EVENT(nfs_atomic_open_exit,
TP_ARGS(dir, ctx, flags, error),
TP_STRUCT__entry(
- __field(int, error)
- __field(unsigned int, flags)
+ __field(unsigned long, error)
+ __field(unsigned long, flags)
__field(unsigned int, fmode)
__field(dev_t, dev)
__field(u64, dir)
@@ -332,7 +430,7 @@ TRACE_EVENT(nfs_atomic_open_exit,
),
TP_fast_assign(
- __entry->error = error;
+ __entry->error = -error;
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
@@ -341,9 +439,9 @@ TRACE_EVENT(nfs_atomic_open_exit,
),
TP_printk(
- "error=%d flags=%u (%s) fmode=%s "
+ "error=%ld (%s) flags=0x%lx (%s) fmode=%s "
"name=%02x:%02x:%llu/%s",
- __entry->error,
+ -__entry->error, nfs_show_status(__entry->error),
__entry->flags,
show_open_flags(__entry->flags),
show_fmode_flags(__entry->fmode),
@@ -363,7 +461,7 @@ TRACE_EVENT(nfs_create_enter,
TP_ARGS(dir, dentry, flags),
TP_STRUCT__entry(
- __field(unsigned int, flags)
+ __field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
__string(name, dentry->d_name.name)
@@ -377,7 +475,7 @@ TRACE_EVENT(nfs_create_enter,
),
TP_printk(
- "flags=%u (%s) name=%02x:%02x:%llu/%s",
+ "flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->flags,
show_open_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -397,15 +495,15 @@ TRACE_EVENT(nfs_create_exit,
TP_ARGS(dir, dentry, flags, error),
TP_STRUCT__entry(
- __field(int, error)
- __field(unsigned int, flags)
+ __field(unsigned long, error)
+ __field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
__string(name, dentry->d_name.name)
),
TP_fast_assign(
- __entry->error = error;
+ __entry->error = -error;
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
@@ -413,8 +511,8 @@ TRACE_EVENT(nfs_create_exit,
),
TP_printk(
- "error=%d flags=%u (%s) name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
+ -__entry->error, nfs_show_status(__entry->error),
__entry->flags,
show_open_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -469,7 +567,7 @@ DECLARE_EVENT_CLASS(nfs_directory_event_done,
TP_ARGS(dir, dentry, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u64, dir)
__string(name, dentry->d_name.name)
@@ -478,13 +576,13 @@ DECLARE_EVENT_CLASS(nfs_directory_event_done,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__assign_str(name, dentry->d_name.name);
),
TP_printk(
- "error=%d name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) name=%02x:%02x:%llu/%s",
+ -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir,
__get_str(name)
@@ -557,7 +655,7 @@ TRACE_EVENT(nfs_link_exit,
TP_ARGS(inode, dir, dentry, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned long, error)
__field(dev_t, dev)
__field(u64, fileid)
__field(u64, dir)
@@ -568,13 +666,13 @@ TRACE_EVENT(nfs_link_exit,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->dir = NFS_FILEID(dir);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__assign_str(name, dentry->d_name.name);
),
TP_printk(
- "error=%d fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s",
+ -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->fileid,
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -642,7 +740,7 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(int, error)
+ __field(unsigned long, error)
__field(u64, old_dir)
__string(old_name, old_dentry->d_name.name)
__field(u64, new_dir)
@@ -651,17 +749,17 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
TP_fast_assign(
__entry->dev = old_dir->i_sb->s_dev;
+ __entry->error = -error;
__entry->old_dir = NFS_FILEID(old_dir);
__entry->new_dir = NFS_FILEID(new_dir);
- __entry->error = error;
__assign_str(old_name, old_dentry->d_name.name);
__assign_str(new_name, new_dentry->d_name.name);
),
TP_printk(
- "error=%d old_name=%02x:%02x:%llu/%s "
+ "error=%ld (%s) old_name=%02x:%02x:%llu/%s "
"new_name=%02x:%02x:%llu/%s",
- __entry->error,
+ -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->old_dir,
__get_str(old_name),
@@ -697,7 +795,7 @@ TRACE_EVENT(nfs_sillyrename_unlink,
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(int, error)
+ __field(unsigned long, error)
__field(u64, dir)
__dynamic_array(char, name, data->args.name.len + 1)
),
@@ -707,15 +805,15 @@ TRACE_EVENT(nfs_sillyrename_unlink,
size_t len = data->args.name.len;
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
- __entry->error = error;
+ __entry->error = -error;
memcpy(__get_str(name),
data->args.name.name, len);
__get_str(name)[len] = 0;
),
TP_printk(
- "error=%d name=%02x:%02x:%llu/%s",
- __entry->error,
+ "error=%ld (%s) name=%02x:%02x:%llu/%s",
+ -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir,
__get_str(name)
@@ -974,6 +1072,8 @@ TRACE_DEFINE_ENUM(NFSERR_PERM);
TRACE_DEFINE_ENUM(NFSERR_NOENT);
TRACE_DEFINE_ENUM(NFSERR_IO);
TRACE_DEFINE_ENUM(NFSERR_NXIO);
+TRACE_DEFINE_ENUM(ECHILD);
+TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
TRACE_DEFINE_ENUM(NFSERR_ACCES);
TRACE_DEFINE_ENUM(NFSERR_EXIST);
TRACE_DEFINE_ENUM(NFSERR_XDEV);
@@ -985,6 +1085,7 @@ TRACE_DEFINE_ENUM(NFSERR_FBIG);
TRACE_DEFINE_ENUM(NFSERR_NOSPC);
TRACE_DEFINE_ENUM(NFSERR_ROFS);
TRACE_DEFINE_ENUM(NFSERR_MLINK);
+TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
TRACE_DEFINE_ENUM(NFSERR_DQUOT);
@@ -1007,6 +1108,8 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOENT, "NOENT" }, \
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
+ { ECHILD, "CHILD" }, \
+ { NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
{ NFSERR_XDEV, "XDEV" }, \
@@ -1018,6 +1121,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOSPC, "NOSPC" }, \
{ NFSERR_ROFS, "ROFS" }, \
{ NFSERR_MLINK, "MLINK" }, \
+ { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
{ NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
{ NFSERR_NOTEMPTY, "NOTEMPTY" }, \
{ NFSERR_DQUOT, "DQUOT" }, \
@@ -1035,22 +1139,33 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
TRACE_EVENT(nfs_xdr_status,
TP_PROTO(
+ const struct xdr_stream *xdr,
int error
),
- TP_ARGS(error),
+ TP_ARGS(xdr, error),
TP_STRUCT__entry(
- __field(int, error)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(unsigned long, error)
),
TP_fast_assign(
+ const struct rpc_rqst *rqstp = xdr->rqst;
+ const struct rpc_task *task = rqstp->rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->error = error;
),
TP_printk(
- "error=%d (%s)",
- __entry->error, nfs_show_status(__entry->error)
+ "task:%u@%d xid=0x%08x error=%ld (%s)",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ -__entry->error, nfs_show_status(__entry->error)
)
);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6ef5278326b6..ed4e1b07447b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -77,7 +77,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
static inline struct nfs_page *
nfs_page_alloc(void)
{
- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
if (p)
INIT_LIST_HEAD(&p->wb_list);
return p;
@@ -775,8 +775,6 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
if (pagecount <= ARRAY_SIZE(pg_array->page_array))
pg_array->pagevec = pg_array->page_array;
else {
- if (hdr->rw_mode == FMODE_WRITE)
- gfp_flags = GFP_NOIO;
pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
if (!pg_array->pagevec) {
pg_array->npages = 0;
@@ -851,7 +849,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
desc->pg_mirrors_dynamic = NULL;
if (mirror_count == 1)
return desc->pg_mirrors_static;
- ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
+ ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
if (ret != NULL) {
for (i = 0; i < mirror_count; i++)
nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 83722e936b4a..75bd5b552ba4 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1890,7 +1890,7 @@ lookup_again:
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
!atomic_read(&lo->plh_outstanding)));
- if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
+ if (IS_ERR(lseg))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
goto lookup_again;
@@ -1915,6 +1915,7 @@ lookup_again:
* stateid.
*/
if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+ int status;
/*
* The first layoutget for the file. Need to serialize per
@@ -1934,13 +1935,20 @@ lookup_again:
}
first = true;
- if (nfs4_select_rw_stateid(ctx->state,
+ status = nfs4_select_rw_stateid(ctx->state,
iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
- NULL, &stateid, NULL) != 0) {
+ NULL, &stateid, NULL);
+ if (status != 0) {
trace_pnfs_update_layout(ino, pos, count,
iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_INVALID_OPEN);
- goto out_unlock;
+ if (status != -EAGAIN)
+ goto out_unlock;
+ spin_unlock(&ino->i_lock);
+ nfs4_schedule_stateid_recovery(server, ctx->state);
+ pnfs_clear_first_layoutget(lo);
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
}
} else {
nfs4_stateid_copy(&stateid, &lo->plh_stateid);
@@ -2029,6 +2037,8 @@ lookup_again:
out_put_layout_hdr:
if (first)
pnfs_clear_first_layoutget(lo);
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_EXIT);
pnfs_put_layout_hdr(lo);
out:
dprintk("%s: inode %s/%llu pNFS layout segment %s for "
@@ -2468,7 +2478,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
wb_size,
IOMODE_RW,
false,
- GFP_NOFS);
+ GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f88ddac2dcdf..628631e2e34f 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -77,6 +77,8 @@
#define NFS_DEFAULT_VERSION 2
#endif
+#define NFS_MAX_CONNECTIONS 16
+
enum {
/* Mount options that take no arguments */
Opt_soft, Opt_softerr, Opt_hard,
@@ -108,6 +110,7 @@ enum {
Opt_nfsvers,
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
Opt_addr, Opt_mountaddr, Opt_clientaddr,
+ Opt_nconnect,
Opt_lookupcache,
Opt_fscache_uniq,
Opt_local_lock,
@@ -181,6 +184,8 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mounthost, "mounthost=%s" },
{ Opt_mountaddr, "mountaddr=%s" },
+ { Opt_nconnect, "nconnect=%s" },
+
{ Opt_lookupcache, "lookupcache=%s" },
{ Opt_fscache_uniq, "fsc=%s" },
{ Opt_local_lock, "local_lock=%s" },
@@ -452,10 +457,8 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct dentry *pd_dentry;
pd_dentry = dget_parent(dentry);
- if (pd_dentry != NULL) {
- nfs_zap_caches(d_inode(pd_dentry));
- dput(pd_dentry);
- }
+ nfs_zap_caches(d_inode(pd_dentry));
+ dput(pd_dentry);
}
nfs_free_fattr(res.fattr);
if (error < 0)
@@ -582,7 +585,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
}
default:
if (showdefaults)
- seq_printf(m, ",mountaddr=unspecified");
+ seq_puts(m, ",mountaddr=unspecified");
}
if (nfss->mountd_version || showdefaults)
@@ -673,6 +676,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_printf(m, ",proto=%s",
rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID));
rcu_read_unlock();
+ if (clp->cl_nconnect > 0)
+ seq_printf(m, ",nconnect=%u", clp->cl_nconnect);
if (version == 4) {
if (nfss->port != NFS_PORT)
seq_printf(m, ",port=%u", nfss->port);
@@ -690,29 +695,29 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
nfs_show_nfsv4_options(m, nfss, showdefaults);
if (nfss->options & NFS_OPTION_FSCACHE)
- seq_printf(m, ",fsc");
+ seq_puts(m, ",fsc");
if (nfss->options & NFS_OPTION_MIGRATION)
- seq_printf(m, ",migration");
+ seq_puts(m, ",migration");
if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
- seq_printf(m, ",lookupcache=none");
+ seq_puts(m, ",lookupcache=none");
else
- seq_printf(m, ",lookupcache=pos");
+ seq_puts(m, ",lookupcache=pos");
}
local_flock = nfss->flags & NFS_MOUNT_LOCAL_FLOCK;
local_fcntl = nfss->flags & NFS_MOUNT_LOCAL_FCNTL;
if (!local_flock && !local_fcntl)
- seq_printf(m, ",local_lock=none");
+ seq_puts(m, ",local_lock=none");
else if (local_flock && local_fcntl)
- seq_printf(m, ",local_lock=all");
+ seq_puts(m, ",local_lock=all");
else if (local_flock)
- seq_printf(m, ",local_lock=flock");
+ seq_puts(m, ",local_lock=flock");
else
- seq_printf(m, ",local_lock=posix");
+ seq_puts(m, ",local_lock=posix");
}
/*
@@ -735,11 +740,21 @@ int nfs_show_options(struct seq_file *m, struct dentry *root)
EXPORT_SYMBOL_GPL(nfs_show_options);
#if IS_ENABLED(CONFIG_NFS_V4)
+static void show_lease(struct seq_file *m, struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ unsigned long expire;
+
+ seq_printf(m, ",lease_time=%ld", clp->cl_lease_time / HZ);
+ expire = clp->cl_last_renewal + clp->cl_lease_time;
+ seq_printf(m, ",lease_expired=%ld",
+ time_after(expire, jiffies) ? 0 : (jiffies - expire) / HZ);
+}
#ifdef CONFIG_NFS_V4_1
static void show_sessions(struct seq_file *m, struct nfs_server *server)
{
if (nfs4_has_session(server->nfs_client))
- seq_printf(m, ",sessions");
+ seq_puts(m, ",sessions");
}
#else
static void show_sessions(struct seq_file *m, struct nfs_server *server) {}
@@ -816,7 +831,7 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
/*
* Display all mount option settings
*/
- seq_printf(m, "\n\topts:\t");
+ seq_puts(m, "\n\topts:\t");
seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
@@ -827,7 +842,7 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
show_implementation_id(m, nfss);
- seq_printf(m, "\n\tcaps:\t");
+ seq_puts(m, "\n\tcaps:\t");
seq_printf(m, "caps=0x%x", nfss->caps);
seq_printf(m, ",wtmult=%u", nfss->wtmult);
seq_printf(m, ",dtsize=%u", nfss->dtsize);
@@ -836,13 +851,14 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
#if IS_ENABLED(CONFIG_NFS_V4)
if (nfss->nfs_client->rpc_ops->version == 4) {
- seq_printf(m, "\n\tnfsv4:\t");
+ seq_puts(m, "\n\tnfsv4:\t");
seq_printf(m, "bm0=0x%x", nfss->attr_bitmask[0]);
seq_printf(m, ",bm1=0x%x", nfss->attr_bitmask[1]);
seq_printf(m, ",bm2=0x%x", nfss->attr_bitmask[2]);
seq_printf(m, ",acl=0x%x", nfss->acl_bitmask);
show_sessions(m, nfss);
show_pnfs(m, nfss);
+ show_lease(m, nfss);
}
#endif
@@ -874,20 +890,20 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
preempt_enable();
}
- seq_printf(m, "\n\tevents:\t");
+ seq_puts(m, "\n\tevents:\t");
for (i = 0; i < __NFSIOS_COUNTSMAX; i++)
seq_printf(m, "%lu ", totals.events[i]);
- seq_printf(m, "\n\tbytes:\t");
+ seq_puts(m, "\n\tbytes:\t");
for (i = 0; i < __NFSIOS_BYTESMAX; i++)
seq_printf(m, "%Lu ", totals.bytes[i]);
#ifdef CONFIG_NFS_FSCACHE
if (nfss->options & NFS_OPTION_FSCACHE) {
- seq_printf(m, "\n\tfsc:\t");
+ seq_puts(m, "\n\tfsc:\t");
for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
seq_printf(m, "%Lu ", totals.fscache[i]);
}
#endif
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
rpc_clnt_show_stats(m, nfss->client);
@@ -1549,6 +1565,11 @@ static int nfs_parse_mount_options(char *raw,
if (mnt->mount_server.addrlen == 0)
goto out_invalid_address;
break;
+ case Opt_nconnect:
+ if (nfs_get_option_ul_bound(args, &option, 1, NFS_MAX_CONNECTIONS))
+ goto out_invalid_value;
+ mnt->nfs_server.nconnect = option;
+ break;
case Opt_lookupcache:
string = match_strdup(args);
if (string == NULL)
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
new file mode 100644
index 000000000000..4f3390b20239
--- /dev/null
+++ b/fs/nfs/sysfs.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Hammerspace Inc
+ */
+
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/nfs_fs.h>
+#include <linux/rcupdate.h>
+
+#include "nfs4_fs.h"
+#include "netns.h"
+#include "sysfs.h"
+
+struct kobject *nfs_client_kobj;
+static struct kset *nfs_client_kset;
+
+static void nfs_netns_object_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type(
+ struct kobject *kobj)
+{
+ return &net_ns_type_operations;
+}
+
+static struct kobj_type nfs_netns_object_type = {
+ .release = nfs_netns_object_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = nfs_netns_object_child_ns_type,
+};
+
+static struct kobject *nfs_netns_object_alloc(const char *name,
+ struct kset *kset, struct kobject *parent)
+{
+ struct kobject *kobj;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (kobj) {
+ kobj->kset = kset;
+ if (kobject_init_and_add(kobj, &nfs_netns_object_type,
+ parent, "%s", name) == 0)
+ return kobj;
+ kobject_put(kobj);
+ }
+ return NULL;
+}
+
+int nfs_sysfs_init(void)
+{
+ nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
+ if (!nfs_client_kset)
+ return -ENOMEM;
+ nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
+ if (!nfs_client_kobj) {
+ kset_unregister(nfs_client_kset);
+ nfs_client_kset = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void nfs_sysfs_exit(void)
+{
+ kobject_put(nfs_client_kobj);
+ kset_unregister(nfs_client_kset);
+}
+
+static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct nfs_netns_client *c = container_of(kobj,
+ struct nfs_netns_client,
+ kobject);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", c->identifier);
+}
+
+/* Strip trailing '\n' */
+static size_t nfs_string_strip(const char *c, size_t len)
+{
+ while (len > 0 && c[len-1] == '\n')
+ --len;
+ return len;
+}
+
+static ssize_t nfs_netns_identifier_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nfs_netns_client *c = container_of(kobj,
+ struct nfs_netns_client,
+ kobject);
+ const char *old;
+ char *p;
+ size_t len;
+
+ len = nfs_string_strip(buf, min_t(size_t, count, CONTAINER_ID_MAXLEN));
+ if (!len)
+ return 0;
+ p = kmemdup_nul(buf, len, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ old = xchg(&c->identifier, p);
+ if (old) {
+ synchronize_rcu();
+ kfree(old);
+ }
+ return count;
+}
+
+static void nfs_netns_client_release(struct kobject *kobj)
+{
+ struct nfs_netns_client *c = container_of(kobj,
+ struct nfs_netns_client,
+ kobject);
+
+ if (c->identifier)
+ kfree(c->identifier);
+ kfree(c);
+}
+
+static const void *nfs_netns_client_namespace(struct kobject *kobj)
+{
+ return container_of(kobj, struct nfs_netns_client, kobject)->net;
+}
+
+static struct kobj_attribute nfs_netns_client_id = __ATTR(identifier,
+ 0644, nfs_netns_identifier_show, nfs_netns_identifier_store);
+
+static struct attribute *nfs_netns_client_attrs[] = {
+ &nfs_netns_client_id.attr,
+ NULL,
+};
+
+static struct kobj_type nfs_netns_client_type = {
+ .release = nfs_netns_client_release,
+ .default_attrs = nfs_netns_client_attrs,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = nfs_netns_client_namespace,
+};
+
+static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
+ struct net *net)
+{
+ struct nfs_netns_client *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p) {
+ p->net = net;
+ p->kobject.kset = nfs_client_kset;
+ if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
+ parent, "nfs_client") == 0)
+ return p;
+ kobject_put(&p->kobject);
+ }
+ return NULL;
+}
+
+void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net)
+{
+ struct nfs_netns_client *clp;
+
+ clp = nfs_netns_client_alloc(nfs_client_kobj, net);
+ if (clp) {
+ netns->nfs_client = clp;
+ kobject_uevent(&clp->kobject, KOBJ_ADD);
+ }
+}
+
+void nfs_netns_sysfs_destroy(struct nfs_net *netns)
+{
+ struct nfs_netns_client *clp = netns->nfs_client;
+
+ if (clp) {
+ kobject_uevent(&clp->kobject, KOBJ_REMOVE);
+ kobject_del(&clp->kobject);
+ kobject_put(&clp->kobject);
+ netns->nfs_client = NULL;
+ }
+}
diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h
new file mode 100644
index 000000000000..f1b27411dcc0
--- /dev/null
+++ b/fs/nfs/sysfs.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Hammerspace Inc
+ */
+
+#ifndef __NFS_SYSFS_H
+#define __NFS_SYSFS_H
+
+#define CONTAINER_ID_MAXLEN (64)
+
+struct nfs_netns_client {
+ struct kobject kobject;
+ struct net *net;
+ const char *identifier;
+};
+
+extern struct kobject *nfs_client_kobj;
+
+extern int nfs_sysfs_init(void);
+extern void nfs_sysfs_exit(void);
+
+void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net);
+void nfs_netns_sysfs_destroy(struct nfs_net *netns);
+
+#endif
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 059a7c38bc4f..92d9cadc6102 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
static struct nfs_pgio_header *nfs_writehdr_alloc(void)
{
- struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
+ struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
memset(p, 0, sizeof(*p));
p->rw_mode = FMODE_WRITE;
@@ -721,12 +721,11 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
struct inode *inode = mapping->host;
struct nfs_pageio_descriptor pgio;
struct nfs_io_completion *ioc;
- unsigned int pflags = memalloc_nofs_save();
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- ioc = nfs_io_completion_alloc(GFP_NOFS);
+ ioc = nfs_io_completion_alloc(GFP_KERNEL);
if (ioc)
nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
@@ -737,8 +736,6 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
nfs_pageio_complete(&pgio);
nfs_io_completion_put(ioc);
- memalloc_nofs_restore(pflags);
-
if (err < 0)
goto out_err;
err = pgio.pg_error;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 0a9a49ded546..13c548733860 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/fs_context.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/lockd/lockd.h>
@@ -1337,7 +1338,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
inode_unlock(dir);
}
-static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
+static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
@@ -1372,7 +1373,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
#endif
/* last one */ {""}
};
- get_net(sb->s_fs_info);
+
ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
if (ret)
return ret;
@@ -1381,14 +1382,31 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
return PTR_ERR(dentry);
nn->nfsd_client_dir = dentry;
return 0;
+}
+static int nfsd_fs_get_tree(struct fs_context *fc)
+{
+ fc->s_fs_info = get_net(fc->net_ns);
+ return vfs_get_super(fc, vfs_get_keyed_super, nfsd_fill_super);
}
-static struct dentry *nfsd_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static void nfsd_fs_free_fc(struct fs_context *fc)
{
- struct net *net = current->nsproxy->net_ns;
- return mount_ns(fs_type, flags, data, net, net->user_ns, nfsd_fill_super);
+ if (fc->s_fs_info)
+ put_net(fc->s_fs_info);
+}
+
+static const struct fs_context_operations nfsd_fs_context_ops = {
+ .free = nfsd_fs_free_fc,
+ .get_tree = nfsd_fs_get_tree,
+};
+
+static int nfsd_init_fs_context(struct fs_context *fc)
+{
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(fc->net_ns->user_ns);
+ fc->ops = &nfsd_fs_context_ops;
+ return 0;
}
static void nfsd_umount(struct super_block *sb)
@@ -1402,7 +1420,7 @@ static void nfsd_umount(struct super_block *sb)
static struct file_system_type nfsd_fs_type = {
.owner = THIS_MODULE,
.name = "nfsd",
- .mount = nfsd_mount,
+ .init_fs_context = nfsd_init_fs_context,
.kill_sb = nfsd_umount,
};
MODULE_ALIAS_FS("nfsd");
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index cce8de32779f..0b815178126e 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -45,8 +45,6 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
#include <linux/sysctl.h>
-static int zero;
-
struct ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
@@ -54,7 +52,7 @@ struct ctl_table inotify_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
},
{
.procname = "max_user_watches",
@@ -62,7 +60,7 @@ struct ctl_table inotify_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
},
{
.procname = "max_queued_events",
@@ -70,7 +68,7 @@ struct ctl_table inotify_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero
+ .extra1 = SYSCTL_ZERO
},
{ }
};
diff --git a/fs/nsfs.c b/fs/nsfs.c
index e3bf08c5af41..a0431642c6b5 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/proc_ns.h>
@@ -258,15 +259,20 @@ static const struct super_operations nsfs_ops = {
.evict_inode = nsfs_evict,
.show_path = nsfs_show_path,
};
-static struct dentry *nsfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+
+static int nsfs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "nsfs:", &nsfs_ops,
- &ns_dentry_operations, NSFS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, NSFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &nsfs_ops;
+ ctx->dops = &ns_dentry_operations;
+ return 0;
}
+
static struct file_system_type nsfs = {
.name = "nsfs",
- .mount = nsfs_mount,
+ .init_fs_context = nsfs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index e6cb7689fec4..40c8c2e32fa3 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
@@ -375,7 +376,7 @@ static const struct super_operations openprom_sops = {
.remount_fs = openprom_remount,
};
-static int openprom_fill_super(struct super_block *s, void *data, int silent)
+static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
{
struct inode *root_inode;
struct op_inode_info *oi;
@@ -409,16 +410,25 @@ out_no_root:
return ret;
}
-static struct dentry *openprom_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int openpromfs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, openprom_fill_super);
+ return get_tree_single(fc, openprom_fill_super);
+}
+
+static const struct fs_context_operations openpromfs_context_ops = {
+ .get_tree = openpromfs_get_tree,
+};
+
+static int openpromfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &openpromfs_context_ops;
+ return 0;
}
static struct file_system_type openprom_fs_type = {
.owner = THIS_MODULE,
.name = "openpromfs",
- .mount = openprom_mount,
+ .init_fs_context = openpromfs_init_fs_context,
.kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("openpromfs");
diff --git a/fs/pipe.c b/fs/pipe.c
index 41065901106b..8a2ab2f974bd 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/magic.h>
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
@@ -1182,16 +1183,20 @@ static const struct super_operations pipefs_ops = {
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
-static struct dentry *pipefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+
+static int pipefs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
- &pipefs_dentry_operations, PIPEFS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &pipefs_ops;
+ ctx->dops = &pipefs_dentry_operations;
+ return 0;
}
static struct file_system_type pipe_fs_type = {
.name = "pipefs",
- .mount = pipefs_mount,
+ .init_fs_context = pipefs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 36ad1b0d6259..d80989b6c344 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -22,6 +22,10 @@ static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
+/* shared constants to be used in various sysctls */
+const int sysctl_vals[] = { 0, 1, INT_MAX };
+EXPORT_SYMBOL(sysctl_vals);
+
/* Support for permanently empty directories */
struct ctl_table sysctl_mount_point[] = {
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 522199e9525e..33f72d1b92cc 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -157,8 +157,6 @@ static int proc_get_tree(struct fs_context *fc)
{
struct proc_fs_context *ctx = fc->fs_private;
- put_user_ns(fc->user_ns);
- fc->user_ns = get_user_ns(ctx->pid_ns->user_ns);
fc->s_fs_info = ctx->pid_ns;
return vfs_get_super(fc, vfs_get_keyed_super, proc_fill_super);
}
@@ -167,8 +165,7 @@ static void proc_fs_context_free(struct fs_context *fc)
{
struct proc_fs_context *ctx = fc->fs_private;
- if (ctx->pid_ns)
- put_pid_ns(ctx->pid_ns);
+ put_pid_ns(ctx->pid_ns);
kfree(ctx);
}
@@ -188,6 +185,8 @@ static int proc_init_fs_context(struct fs_context *fc)
return -ENOMEM;
ctx->pid_ns = get_pid_ns(task_active_pid_ns(current));
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->pid_ns->user_ns);
fc->fs_private = ctx;
fc->ops = &proc_fs_context_ops;
return 0;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 818cedbed95f..731642e0f5a0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -832,7 +832,8 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
- seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma));
+ seq_printf(m, "THPeligible: %d\n",
+ transparent_hugepage_enabled(vma));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 11201b2d06b9..733c6b4193dc 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -266,12 +266,8 @@ static struct file_system_type ramfs_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
-int __init init_ramfs_fs(void)
+static int __init init_ramfs_fs(void)
{
- static unsigned long once;
-
- if (test_and_set_bit(0, &once))
- return 0;
return register_filesystem(&ramfs_fs_type);
}
fs_initcall(init_ramfs_fs);
diff --git a/fs/super.c b/fs/super.c
index 2739f57515f8..113c58f19425 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -476,6 +476,17 @@ void generic_shutdown_super(struct super_block *sb)
EXPORT_SYMBOL(generic_shutdown_super);
+bool mount_capable(struct fs_context *fc)
+{
+ struct user_namespace *user_ns = fc->global ? &init_user_ns
+ : fc->user_ns;
+
+ if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
+ return capable(CAP_SYS_ADMIN);
+ else
+ return ns_capable(user_ns, CAP_SYS_ADMIN);
+}
+
/**
* sget_fc - Find or create a superblock
* @fc: Filesystem context.
@@ -503,20 +514,6 @@ struct super_block *sget_fc(struct fs_context *fc,
struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
int err;
- if (!(fc->sb_flags & SB_KERNMOUNT) &&
- fc->purpose != FS_CONTEXT_FOR_SUBMOUNT) {
- /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
- * over the namespace.
- */
- if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
- if (!capable(CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
- } else {
- if (!ns_capable(fc->user_ns, CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
- }
- }
-
retry:
spin_lock(&sb_lock);
if (test) {
@@ -543,6 +540,7 @@ retry:
}
fc->s_fs_info = NULL;
s->s_type = fc->fs_type;
+ s->s_iflags |= fc->s_iflags;
strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
list_add_tail(&s->s_list, &super_blocks);
hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
@@ -565,28 +563,31 @@ share_extant_sb:
EXPORT_SYMBOL(sget_fc);
/**
- * sget_userns - find or create a superblock
- * @type: filesystem type superblock should belong to
- * @test: comparison callback
- * @set: setup callback
- * @flags: mount flags
- * @user_ns: User namespace for the super_block
- * @data: argument to each of them
+ * sget - find or create a superblock
+ * @type: filesystem type superblock should belong to
+ * @test: comparison callback
+ * @set: setup callback
+ * @flags: mount flags
+ * @data: argument to each of them
*/
-struct super_block *sget_userns(struct file_system_type *type,
+struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
- int flags, struct user_namespace *user_ns,
+ int flags,
void *data)
{
+ struct user_namespace *user_ns = current_user_ns();
struct super_block *s = NULL;
struct super_block *old;
int err;
- if (!(flags & (SB_KERNMOUNT|SB_SUBMOUNT)) &&
- !(type->fs_flags & FS_USERNS_MOUNT) &&
- !capable(CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
+ /* We don't yet pass the user namespace of the parent
+ * mount through to here so always use &init_user_ns
+ * until that changes.
+ */
+ if (flags & SB_SUBMOUNT)
+ user_ns = &init_user_ns;
+
retry:
spin_lock(&sb_lock);
if (test) {
@@ -627,39 +628,6 @@ retry:
register_shrinker_prepared(&s->s_shrink);
return s;
}
-
-EXPORT_SYMBOL(sget_userns);
-
-/**
- * sget - find or create a superblock
- * @type: filesystem type superblock should belong to
- * @test: comparison callback
- * @set: setup callback
- * @flags: mount flags
- * @data: argument to each of them
- */
-struct super_block *sget(struct file_system_type *type,
- int (*test)(struct super_block *,void *),
- int (*set)(struct super_block *,void *),
- int flags,
- void *data)
-{
- struct user_namespace *user_ns = current_user_ns();
-
- /* We don't yet pass the user namespace of the parent
- * mount through to here so always use &init_user_ns
- * until that changes.
- */
- if (flags & SB_SUBMOUNT)
- user_ns = &init_user_ns;
-
- /* Ensure the requestor has permissions over the target filesystem */
- if (!(flags & (SB_KERNMOUNT|SB_SUBMOUNT)) && !ns_capable(user_ns, CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
-
- return sget_userns(type, test, set, flags, user_ns, data);
-}
-
EXPORT_SYMBOL(sget);
void drop_super(struct super_block *sb)
@@ -1147,50 +1115,6 @@ void kill_litter_super(struct super_block *sb)
}
EXPORT_SYMBOL(kill_litter_super);
-static int ns_test_super(struct super_block *sb, void *data)
-{
- return sb->s_fs_info == data;
-}
-
-static int ns_set_super(struct super_block *sb, void *data)
-{
- sb->s_fs_info = data;
- return set_anon_super(sb, NULL);
-}
-
-struct dentry *mount_ns(struct file_system_type *fs_type,
- int flags, void *data, void *ns, struct user_namespace *user_ns,
- int (*fill_super)(struct super_block *, void *, int))
-{
- struct super_block *sb;
-
- /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
- * over the namespace.
- */
- if (!(flags & SB_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
-
- sb = sget_userns(fs_type, ns_test_super, ns_set_super, flags,
- user_ns, ns);
- if (IS_ERR(sb))
- return ERR_CAST(sb);
-
- if (!sb->s_root) {
- int err;
- err = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
- if (err) {
- deactivate_locked_super(sb);
- return ERR_PTR(err);
- }
-
- sb->s_flags |= SB_ACTIVE;
- }
-
- return dget(sb->s_root);
-}
-
-EXPORT_SYMBOL(mount_ns);
-
int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
{
return set_anon_super(sb, NULL);
@@ -1274,6 +1198,22 @@ int vfs_get_super(struct fs_context *fc,
}
EXPORT_SYMBOL(vfs_get_super);
+int get_tree_nodev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+{
+ return vfs_get_super(fc, vfs_get_independent_super, fill_super);
+}
+EXPORT_SYMBOL(get_tree_nodev);
+
+int get_tree_single(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+{
+ return vfs_get_super(fc, vfs_get_single_super, fill_super);
+}
+EXPORT_SYMBOL(get_tree_single);
+
#ifdef CONFIG_BLOCK
static int set_bdev_super(struct super_block *s, void *data)
{
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 1b56686ab178..db81cfbab9d6 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -72,8 +72,7 @@ static int sysfs_init_fs_context(struct fs_context *fc)
fc->fs_private = kfc;
fc->ops = &sysfs_fs_context_ops;
if (netns) {
- if (fc->user_ns)
- put_user_ns(fc->user_ns);
+ put_user_ns(fc->user_ns);
fc->user_ns = get_user_ns(netns->user_ns);
}
fc->global = true;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index e5f8de62fc51..400970d740bb 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1470,7 +1470,7 @@ static int ubifs_migrate_page(struct address_space *mapping,
{
int rc;
- rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+ rc = migrate_page_move_mapping(mapping, newpage, page, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index b74a47169297..06b68b6115bc 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -49,6 +49,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_refcount_btree.o \
xfs_sb.o \
xfs_symlink_remote.o \
+ xfs_trans_inode.o \
xfs_trans_resv.o \
xfs_types.o \
)
@@ -107,8 +108,7 @@ xfs-y += xfs_log.o \
xfs_rmap_item.o \
xfs_log_recover.o \
xfs_trans_ail.o \
- xfs_trans_buf.o \
- xfs_trans_inode.o
+ xfs_trans_buf.o
# optional features
xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 93d14e47269d..a9ad90926b87 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -66,6 +66,10 @@ xfs_trans_ichgtime(
inode->i_mtime = tv;
if (flags & XFS_ICHGTIME_CHG)
inode->i_ctime = tv;
+ if (flags & XFS_ICHGTIME_CREATE) {
+ ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
+ ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
+ }
}
/*