From 5bff0386305461021bbef2d958fa0f0151f56a6f Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 8 Nov 2011 15:09:19 +0300 Subject: SUNRPC: remove non-exclusive pipe creation from RPC pipefs This patch-set was created in context of clone of git branch: git://git.linux-nfs.org/projects/trondmy/nfs-2.6.git. v2: 1) Rebased of current repo state (i.e. all commits were pulled before apply) I feel it is ready for inclusion if no objections will appear. SUNRPC pipefs non-exclusive pipe creation code looks obsolete. IOW, as I see it, all pipes are creating with unique full path and only once. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 44 ++++++-------------------------------------- 1 file changed, 6 insertions(+), 38 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 63a7a7add21e..8e3397fc0a7d 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -554,7 +554,6 @@ static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, if (err) return err; rpci = RPC_I(dentry->d_inode); - rpci->nkern_readwriters = 1; rpci->private = private; rpci->flags = flags; rpci->ops = ops; @@ -587,16 +586,12 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry) static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; - struct rpc_inode *rpci = RPC_I(inode); - rpci->nkern_readwriters--; - if (rpci->nkern_readwriters != 0) - return 0; rpc_close_pipes(inode); return __rpc_unlink(dir, dentry); } -static struct dentry *__rpc_lookup_create(struct dentry *parent, +static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, struct qstr *name) { struct dentry *dentry; @@ -604,27 +599,13 @@ static struct dentry *__rpc_lookup_create(struct dentry *parent, dentry = d_lookup(parent, name); if (!dentry) { dentry = d_alloc(parent, name); - if (!dentry) { - dentry = ERR_PTR(-ENOMEM); - goto out_err; - } + if (!dentry) + return ERR_PTR(-ENOMEM); } - if (!dentry->d_inode) + if (dentry->d_inode == NULL) { d_set_d_op(dentry, &rpc_dentry_operations); -out_err: - return dentry; -} - -static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, - struct qstr *name) -{ - struct dentry *dentry; - - dentry = __rpc_lookup_create(parent, name); - if (IS_ERR(dentry)) - return dentry; - if (dentry->d_inode == NULL) return dentry; + } dput(dentry); return ERR_PTR(-EEXIST); } @@ -812,22 +793,9 @@ struct dentry *rpc_mkpipe(struct dentry *parent, const char *name, q.hash = full_name_hash(q.name, q.len), mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - dentry = __rpc_lookup_create(parent, &q); + dentry = __rpc_lookup_create_exclusive(parent, &q); if (IS_ERR(dentry)) goto out; - if (dentry->d_inode) { - struct rpc_inode *rpci = RPC_I(dentry->d_inode); - if (rpci->private != private || - rpci->ops != ops || - rpci->flags != flags) { - dput (dentry); - err = -EBUSY; - goto out_err; - } - rpci->nkern_readwriters++; - goto out; - } - err = __rpc_mkpipe(dir, dentry, umode, &rpc_pipe_fops, private, ops, flags); if (err) -- cgit v1.2.3-55-g7522 From 38b0da7522c086f1dcdeda39a2d1849c6a31f518 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:38:56 +0300 Subject: SUNRPC: create RPC pipefs superblock per network namespace context This is the initial step of RPC pipefs virtualization. It changes nothing to current pipefs behaviour except that mount of pipefs in other than init_net network namespace context will provide only root tree. No other dentries will be visible. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 8e3397fc0a7d..e32e6b8c006d 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -993,6 +993,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; + struct net *net = data; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -1017,7 +1018,7 @@ static struct dentry * rpc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_single(fs_type, flags, data, rpc_fill_super); + return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); } static struct file_system_type rpc_pipe_fs_type = { -- cgit v1.2.3-55-g7522 From 021c68dec8c04c44cb82eb5bbee77028fafe22e8 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:04 +0300 Subject: SUNRPC: hold current network namespace while pipefs superblock is active We want to be sure that network namespace is still alive while we have pipefs mounted. This will be required later, when RPC pipefs will be mounting only from user-space context. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index e32e6b8c006d..f628b0f48a87 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -27,6 +27,9 @@ #include #include #include +#include + +#include "netns.h" static struct vfsmount *rpc_mnt __read_mostly; static int rpc_mount_count; @@ -1011,6 +1014,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) } if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) return -ENOMEM; + sb->s_fs_info = get_net(net); return 0; } @@ -1021,11 +1025,19 @@ rpc_mount(struct file_system_type *fs_type, return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); } +void rpc_kill_sb(struct super_block *sb) +{ + struct net *net = sb->s_fs_info; + + put_net(net); + kill_litter_super(sb); +} + static struct file_system_type rpc_pipe_fs_type = { .owner = THIS_MODULE, .name = "rpc_pipefs", .mount = rpc_mount, - .kill_sb = kill_litter_super, + .kill_sb = rpc_kill_sb, }; static void -- cgit v1.2.3-55-g7522 From 2d00131acc641b2cb6f0bdefb8c7bdd8fdf7410b Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:13 +0300 Subject: SUNRPC: send notification events on pipefs sb creation and destruction They will be used to notify subscribers about pipefs superblock creation and destruction. Subcribers will have to create their dentries on passed superblock on mount event and destroy otherwise. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 8 ++++++++ net/sunrpc/rpc_pipe.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index edadc3acf949..d39782ce6c67 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -43,6 +43,14 @@ RPC_I(struct inode *inode) return container_of(inode, struct rpc_inode, vfs_inode); } +extern int rpc_pipefs_notifier_register(struct notifier_block *); +extern void rpc_pipefs_notifier_unregister(struct notifier_block *); + +enum { + RPC_PIPEFS_MOUNT, + RPC_PIPEFS_UMOUNT, +}; + extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index f628b0f48a87..58a5062df260 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -28,8 +28,10 @@ #include #include #include +#include #include "netns.h" +#include "sunrpc.h" static struct vfsmount *rpc_mnt __read_mostly; static int rpc_mount_count; @@ -41,6 +43,20 @@ static struct kmem_cache *rpc_inode_cachep __read_mostly; #define RPC_UPCALL_TIMEOUT (30*HZ) +static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list); + +int rpc_pipefs_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register); + +void rpc_pipefs_notifier_unregister(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); + static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, void (*destroy_msg)(struct rpc_pipe_msg *), int err) { @@ -997,6 +1013,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) struct inode *inode; struct dentry *root; struct net *net = data; + int err; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -1014,8 +1031,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) } if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) return -ENOMEM; + err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, + RPC_PIPEFS_MOUNT, + sb); + if (err) + goto err_depopulate; sb->s_fs_info = get_net(net); return 0; + +err_depopulate: + blocking_notifier_call_chain(&rpc_pipefs_notifier_list, + RPC_PIPEFS_UMOUNT, + sb); + __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); + return err; } static struct dentry * @@ -1030,6 +1059,9 @@ void rpc_kill_sb(struct super_block *sb) struct net *net = sb->s_fs_info; put_net(net); + blocking_notifier_call_chain(&rpc_pipefs_notifier_list, + RPC_PIPEFS_UMOUNT, + sb); kill_litter_super(sb); } -- cgit v1.2.3-55-g7522 From 432eb1a5fb380477ae759041bac2bb305977e436 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:22 +0300 Subject: SUNRPC: pipefs dentry lookup helper introduced In all places, where pipefs dentries are created, only directory inode is actually required to create new dentry. And all this directories has root pipefs dentry as their parent. So we actually don't need this pipefs mount point at all if some pipefs lookup method will be provided. IOW, all we really need is just superblock and simple lookup method to find root's child dentry with appropriate name. And this patch introduces this method. Note, that no locking implemented in rpc_d_lookup_sb(). So it can be used only in case of assurance, that pipefs superblock still exist. IOW, we can use this method only in pipefs mount-umount notification subscribers callbacks. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 3 +++ net/sunrpc/rpc_pipe.c | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index d39782ce6c67..2f3382230141 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -51,6 +51,9 @@ enum { RPC_PIPEFS_UMOUNT, }; +extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, + const unsigned char *dir_name); + extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 58a5062df260..6f295e6c12a0 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1007,6 +1007,22 @@ static const struct rpc_filelist files[] = { }, }; +/* + * This call can be used only in RPC pipefs mount notification hooks. + */ +struct dentry *rpc_d_lookup_sb(const struct super_block *sb, + const unsigned char *dir_name) +{ + struct qstr dir = { + .name = dir_name, + .len = strlen(dir_name), + .hash = full_name_hash(dir_name, strlen(dir_name)), + }; + + return d_lookup(sb->s_root, &dir); +} +EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); + static int rpc_fill_super(struct super_block *sb, void *data, int silent) { -- cgit v1.2.3-55-g7522 From 90c4e82999c517e0cd00d0782c68d186cb18b784 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:30 +0300 Subject: SUNRPC: put pipefs superblock link on network namespace We have modules (like, pNFS blocklayout module) which creates pipes on rpc_pipefs. Thus we need per-net operations for them. To make it possible we require appropriate super block. So we have to put sb link on network namespace context. Note, that it's not strongly required to create pipes in per-net operations. IOW, if pipefs wasn't mounted yet, that no sb link reference will present on network namespace and in this case we need just need to pass through pipe creation. Pipe dentry will be created during pipefs mount notification. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/netns.h | 2 ++ net/sunrpc/rpc_pipe.c | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index d013bf211cae..b3842529aec9 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h @@ -9,6 +9,8 @@ struct cache_detail; struct sunrpc_net { struct proc_dir_entry *proc_net_rpc; struct cache_detail *ip_map_cache; + + struct super_block *pipefs_sb; }; extern int sunrpc_net_id; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 6f295e6c12a0..e5e1f357b561 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1029,6 +1029,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) struct inode *inode; struct dentry *root; struct net *net = data; + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int err; sb->s_blocksize = PAGE_CACHE_SIZE; @@ -1053,6 +1054,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) if (err) goto err_depopulate; sb->s_fs_info = get_net(net); + sn->pipefs_sb = sb; return 0; err_depopulate: @@ -1073,7 +1075,9 @@ rpc_mount(struct file_system_type *fs_type, void rpc_kill_sb(struct super_block *sb) { struct net *net = sb->s_fs_info; + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + sn->pipefs_sb = NULL; put_net(net); blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, -- cgit v1.2.3-55-g7522 From c21a588f35b1c50304e505fad542b3aab0814266 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:39 +0300 Subject: SUNRPC: pipefs per-net operations helper introduced During per-net pipes creation and destruction we have to make sure, that pipefs sb exists for the whole creation/destruction cycle. This is done by using special mutex which controls pipefs sb reference on network namespace context. Helper consists of two parts: first of them (rpc_get_dentry_net) searches for dentry with specified name and returns with mutex taken on success. When pipe creation or destructions is completed, caller should release this mutex by rpc_put_dentry_net call. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 3 +++ net/sunrpc/netns.h | 1 + net/sunrpc/rpc_pipe.c | 36 ++++++++++++++++++++++++++++++++++++ net/sunrpc/sunrpc_syms.c | 1 + 4 files changed, 41 insertions(+) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 2f3382230141..c13fca34dc9c 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -53,6 +53,9 @@ enum { extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, const unsigned char *dir_name); +extern void rpc_pipefs_init_net(struct net *net); +extern struct super_block *rpc_get_sb_net(const struct net *net); +extern void rpc_put_sb_net(const struct net *net); extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index b3842529aec9..11d2f4863403 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h @@ -11,6 +11,7 @@ struct sunrpc_net { struct cache_detail *ip_map_cache; struct super_block *pipefs_sb; + struct mutex pipefs_sb_lock; }; extern int sunrpc_net_id; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index e5e1f357b561..f075f8817773 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1023,6 +1023,40 @@ struct dentry *rpc_d_lookup_sb(const struct super_block *sb, } EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); +void rpc_pipefs_init_net(struct net *net) +{ + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + + mutex_init(&sn->pipefs_sb_lock); +} + +/* + * This call will be used for per network namespace operations calls. + * Note: Function will be returned with pipefs_sb_lock taken if superblock was + * found. This lock have to be released by rpc_put_sb_net() when all operations + * will be completed. + */ +struct super_block *rpc_get_sb_net(const struct net *net) +{ + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + + mutex_lock(&sn->pipefs_sb_lock); + if (sn->pipefs_sb) + return sn->pipefs_sb; + mutex_unlock(&sn->pipefs_sb_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(rpc_get_sb_net); + +void rpc_put_sb_net(const struct net *net) +{ + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + + BUG_ON(sn->pipefs_sb == NULL); + mutex_unlock(&sn->pipefs_sb_lock); +} +EXPORT_SYMBOL_GPL(rpc_put_sb_net); + static int rpc_fill_super(struct super_block *sb, void *data, int silent) { @@ -1077,7 +1111,9 @@ void rpc_kill_sb(struct super_block *sb) struct net *net = sb->s_fs_info; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + mutex_lock(&sn->pipefs_sb_lock); sn->pipefs_sb = NULL; + mutex_unlock(&sn->pipefs_sb_lock); put_net(net); blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 8ec9778c3f4a..7086d11589c8 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -38,6 +38,7 @@ static __net_init int sunrpc_init_net(struct net *net) if (err) goto err_ipmap; + rpc_pipefs_init_net(net); return 0; err_ipmap: -- cgit v1.2.3-55-g7522 From efc46bf2b2893132e07628ad8c06e915f3281fdc Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:39:47 +0300 Subject: SUNRPC: added debug messages to RPC pipefs This patch adds debug messages for notification events. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index f075f8817773..9a881138fead 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -33,6 +33,10 @@ #include "netns.h" #include "sunrpc.h" +#define RPCDBG_FACILITY RPCDBG_DEBUG + +#define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") + static struct vfsmount *rpc_mnt __read_mostly; static int rpc_mount_count; @@ -1082,6 +1086,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) } if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) return -ENOMEM; + dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, + NET_NAME(net)); err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_MOUNT, sb); @@ -1115,6 +1121,8 @@ void rpc_kill_sb(struct super_block *sb) sn->pipefs_sb = NULL; mutex_unlock(&sn->pipefs_sb_lock); put_net(net); + dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, + NET_NAME(net)); blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); -- cgit v1.2.3-55-g7522 From 766347bec3490111e1c4482af7c7394868c2aed1 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:43:23 +0300 Subject: SUNRPC: replace inode lock with pipe lock for RPC PipeFS operations Currenly, inode i_lock is used to provide concurrent access to SUNPRC PipeFS pipes. It looks redundant, since now other use of inode is present in most of these places and thus can be easely replaced, which will allow to remove most of inode references from PipeFS code. This is a first step towards to removing PipeFS inode references from kernel code other than PipeFS itself. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 1 + net/sunrpc/auth_gss/auth_gss.c | 57 +++++++++++++++++++------------------- net/sunrpc/rpc_pipe.c | 38 ++++++++++++------------- 3 files changed, 48 insertions(+), 48 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index c13fca34dc9c..c93ea8689bc8 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -35,6 +35,7 @@ struct rpc_inode { int flags; struct delayed_work queue_timeout; const struct rpc_pipe_ops *ops; + spinlock_t lock; }; static inline struct rpc_inode * diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index affa631ac1ab..a0844f92a447 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -112,7 +112,7 @@ gss_put_ctx(struct gss_cl_ctx *ctx) /* gss_cred_set_ctx: * called by gss_upcall_callback and gss_create_upcall in order * to set the gss context. The actual exchange of an old context - * and a new one is protected by the inode->i_lock. + * and a new one is protected by the rpci->lock. */ static void gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) @@ -316,17 +316,16 @@ static inline struct gss_upcall_msg * gss_add_msg(struct gss_upcall_msg *gss_msg) { struct rpc_inode *rpci = gss_msg->inode; - struct inode *inode = &rpci->vfs_inode; struct gss_upcall_msg *old; - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); old = __gss_find_upcall(rpci, gss_msg->uid); if (old == NULL) { atomic_inc(&gss_msg->count); list_add(&gss_msg->list, &rpci->in_downcall); } else gss_msg = old; - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); return gss_msg; } @@ -342,14 +341,14 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg) static void gss_unhash_msg(struct gss_upcall_msg *gss_msg) { - struct inode *inode = &gss_msg->inode->vfs_inode; + struct rpc_inode *rpci = gss_msg->inode; if (list_empty(&gss_msg->list)) return; - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (!list_empty(&gss_msg->list)) __gss_unhash_msg(gss_msg); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); } static void @@ -376,11 +375,11 @@ gss_upcall_callback(struct rpc_task *task) struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; - struct inode *inode = &gss_msg->inode->vfs_inode; + struct rpc_inode *rpci = gss_msg->inode; - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); gss_handle_downcall_result(gss_cred, gss_msg); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); task->tk_status = gss_msg->msg.errno; gss_release_msg(gss_msg); } @@ -506,7 +505,7 @@ gss_refresh_upcall(struct rpc_task *task) struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg; - struct inode *inode; + struct rpc_inode *rpci; int err = 0; dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, @@ -524,8 +523,8 @@ gss_refresh_upcall(struct rpc_task *task) err = PTR_ERR(gss_msg); goto out; } - inode = &gss_msg->inode->vfs_inode; - spin_lock(&inode->i_lock); + rpci = gss_msg->inode; + spin_lock(&rpci->lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { @@ -538,7 +537,7 @@ gss_refresh_upcall(struct rpc_task *task) gss_handle_downcall_result(gss_cred, gss_msg); err = gss_msg->msg.errno; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); gss_release_msg(gss_msg); out: dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", @@ -549,7 +548,7 @@ out: static inline int gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) { - struct inode *inode; + struct rpc_inode *rpci; struct rpc_cred *cred = &gss_cred->gc_base; struct gss_upcall_msg *gss_msg; DEFINE_WAIT(wait); @@ -573,14 +572,14 @@ retry: err = PTR_ERR(gss_msg); goto out; } - inode = &gss_msg->inode->vfs_inode; + rpci = gss_msg->inode; for (;;) { prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { break; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); if (fatal_signal_pending(current)) { err = -ERESTARTSYS; goto out_intr; @@ -591,7 +590,7 @@ retry: gss_cred_set_ctx(cred, gss_msg->ctx); else err = gss_msg->msg.errno; - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); out_intr: finish_wait(&gss_msg->waitqueue, &wait); gss_release_msg(gss_msg); @@ -609,7 +608,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) const void *p, *end; void *buf; struct gss_upcall_msg *gss_msg; - struct inode *inode = filp->f_path.dentry->d_inode; + struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode); struct gss_cl_ctx *ctx; uid_t uid; ssize_t err = -EFBIG; @@ -639,14 +638,14 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ - spin_lock(&inode->i_lock); - gss_msg = __gss_find_upcall(RPC_I(inode), uid); + spin_lock(&rpci->lock); + gss_msg = __gss_find_upcall(rpci, uid); if (gss_msg == NULL) { - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); goto err_put_ctx; } list_del_init(&gss_msg->list); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { @@ -674,9 +673,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = mlen; err_release_msg: - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); __gss_unhash_msg(gss_msg); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); gss_release_msg(gss_msg); err_put_ctx: gss_put_ctx(ctx); @@ -726,7 +725,7 @@ gss_pipe_release(struct inode *inode) struct gss_upcall_msg *gss_msg; restart: - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); list_for_each_entry(gss_msg, &rpci->in_downcall, list) { if (!list_empty(&gss_msg->msg.list)) @@ -734,11 +733,11 @@ restart: gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); gss_release_msg(gss_msg); goto restart; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); put_pipe_version(); } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a881138fead..16d9b9a701a4 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -83,12 +83,11 @@ rpc_timeout_upcall_queue(struct work_struct *work) LIST_HEAD(free_list); struct rpc_inode *rpci = container_of(work, struct rpc_inode, queue_timeout.work); - struct inode *inode = &rpci->vfs_inode; void (*destroy_msg)(struct rpc_pipe_msg *); - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (rpci->ops == NULL) { - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); return; } destroy_msg = rpci->ops->destroy_msg; @@ -96,7 +95,7 @@ rpc_timeout_upcall_queue(struct work_struct *work) list_splice_init(&rpci->pipe, &free_list); rpci->pipelen = 0; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); } @@ -136,7 +135,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) struct rpc_inode *rpci = RPC_I(inode); int res = -EPIPE; - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (rpci->ops == NULL) goto out; if (rpci->nreaders) { @@ -153,7 +152,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) res = 0; } out: - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); wake_up(&rpci->waitq); return res; } @@ -176,14 +175,14 @@ rpc_close_pipes(struct inode *inode) ops = rpci->ops; if (ops != NULL) { LIST_HEAD(free_list); - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); need_release = rpci->nreaders != 0 || rpci->nwriters != 0; rpci->nreaders = 0; list_splice_init(&rpci->in_upcall, &free_list); list_splice_init(&rpci->pipe, &free_list); rpci->pipelen = 0; rpci->ops = NULL; - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); rpci->nwriters = 0; if (need_release && ops->release_pipe) @@ -255,10 +254,10 @@ rpc_pipe_release(struct inode *inode, struct file *filp) goto out; msg = filp->private_data; if (msg != NULL) { - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); msg->errno = -EAGAIN; list_del_init(&msg->list); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); rpci->ops->destroy_msg(msg); } if (filp->f_mode & FMODE_WRITE) @@ -267,10 +266,10 @@ rpc_pipe_release(struct inode *inode, struct file *filp) rpci->nreaders --; if (rpci->nreaders == 0) { LIST_HEAD(free_list); - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); list_splice_init(&rpci->pipe, &free_list); rpci->pipelen = 0; - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); rpc_purge_list(rpci, &free_list, rpci->ops->destroy_msg, -EAGAIN); } @@ -298,7 +297,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) } msg = filp->private_data; if (msg == NULL) { - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (!list_empty(&rpci->pipe)) { msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, @@ -308,7 +307,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) filp->private_data = msg; msg->copied = 0; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); if (msg == NULL) goto out_unlock; } @@ -316,9 +315,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) res = rpci->ops->upcall(filp, msg, buf, len); if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); list_del_init(&msg->list); - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); rpci->ops->destroy_msg(msg); } out_unlock: @@ -367,9 +366,9 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case FIONREAD: - spin_lock(&inode->i_lock); + spin_lock(&rpci->lock); if (rpci->ops == NULL) { - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); return -EPIPE; } len = rpci->pipelen; @@ -378,7 +377,7 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) msg = filp->private_data; len += msg->len - msg->copied; } - spin_unlock(&inode->i_lock); + spin_unlock(&rpci->lock); return put_user(len, (int __user *)arg); default: return -EINVAL; @@ -1153,6 +1152,7 @@ init_once(void *foo) INIT_DELAYED_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue); rpci->ops = NULL; + spin_lock_init(&rpci->lock); } int register_rpc_pipefs(void) -- cgit v1.2.3-55-g7522 From ba9e097593f371ebd102580a0c5b1b2cf55636a0 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:43:32 +0300 Subject: SUNRPC: split SUNPRC PipeFS pipe data and inode creation Generally, pipe data is used only for pipes, and thus allocating space for it on every RPC inode allocation is redundant. This patch splits private SUNRPC PipeFS pipe data and inode, makes pipe data allocated only for pipe inodes. This patch is also is a next step towards to to removing PipeFS inode references from kernel code other than PipeFS itself. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 10 +- net/sunrpc/auth_gss/auth_gss.c | 46 ++++---- net/sunrpc/rpc_pipe.c | 208 ++++++++++++++++++++----------------- 3 files changed, 142 insertions(+), 122 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index c93ea8689bc8..57512c23d34c 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -21,9 +21,7 @@ struct rpc_pipe_ops { void (*destroy_msg)(struct rpc_pipe_msg *); }; -struct rpc_inode { - struct inode vfs_inode; - void *private; +struct rpc_pipe { struct list_head pipe; struct list_head in_upcall; struct list_head in_downcall; @@ -38,6 +36,12 @@ struct rpc_inode { spinlock_t lock; }; +struct rpc_inode { + struct inode vfs_inode; + void *private; + struct rpc_pipe *pipe; +}; + static inline struct rpc_inode * RPC_I(struct inode *inode) { diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index a0844f92a447..e933484e55ef 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -112,7 +112,7 @@ gss_put_ctx(struct gss_cl_ctx *ctx) /* gss_cred_set_ctx: * called by gss_upcall_callback and gss_create_upcall in order * to set the gss context. The actual exchange of an old context - * and a new one is protected by the rpci->lock. + * and a new one is protected by the rpci->pipe->lock. */ static void gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) @@ -297,7 +297,7 @@ static struct gss_upcall_msg * __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) { struct gss_upcall_msg *pos; - list_for_each_entry(pos, &rpci->in_downcall, list) { + list_for_each_entry(pos, &rpci->pipe->in_downcall, list) { if (pos->uid != uid) continue; atomic_inc(&pos->count); @@ -318,14 +318,14 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) struct rpc_inode *rpci = gss_msg->inode; struct gss_upcall_msg *old; - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); old = __gss_find_upcall(rpci, gss_msg->uid); if (old == NULL) { atomic_inc(&gss_msg->count); - list_add(&gss_msg->list, &rpci->in_downcall); + list_add(&gss_msg->list, &rpci->pipe->in_downcall); } else gss_msg = old; - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); return gss_msg; } @@ -345,10 +345,10 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg) if (list_empty(&gss_msg->list)) return; - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); if (!list_empty(&gss_msg->list)) __gss_unhash_msg(gss_msg); - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); } static void @@ -377,9 +377,9 @@ gss_upcall_callback(struct rpc_task *task) struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; struct rpc_inode *rpci = gss_msg->inode; - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); gss_handle_downcall_result(gss_cred, gss_msg); - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); task->tk_status = gss_msg->msg.errno; gss_release_msg(gss_msg); } @@ -524,7 +524,7 @@ gss_refresh_upcall(struct rpc_task *task) goto out; } rpci = gss_msg->inode; - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { @@ -537,7 +537,7 @@ gss_refresh_upcall(struct rpc_task *task) gss_handle_downcall_result(gss_cred, gss_msg); err = gss_msg->msg.errno; } - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); gss_release_msg(gss_msg); out: dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", @@ -575,11 +575,11 @@ retry: rpci = gss_msg->inode; for (;;) { prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { break; } - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); if (fatal_signal_pending(current)) { err = -ERESTARTSYS; goto out_intr; @@ -590,7 +590,7 @@ retry: gss_cred_set_ctx(cred, gss_msg->ctx); else err = gss_msg->msg.errno; - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); out_intr: finish_wait(&gss_msg->waitqueue, &wait); gss_release_msg(gss_msg); @@ -638,14 +638,14 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); gss_msg = __gss_find_upcall(rpci, uid); if (gss_msg == NULL) { - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); goto err_put_ctx; } list_del_init(&gss_msg->list); - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { @@ -673,9 +673,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = mlen; err_release_msg: - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); __gss_unhash_msg(gss_msg); - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); gss_release_msg(gss_msg); err_put_ctx: gss_put_ctx(ctx); @@ -725,19 +725,19 @@ gss_pipe_release(struct inode *inode) struct gss_upcall_msg *gss_msg; restart: - spin_lock(&rpci->lock); - list_for_each_entry(gss_msg, &rpci->in_downcall, list) { + spin_lock(&rpci->pipe->lock); + list_for_each_entry(gss_msg, &rpci->pipe->in_downcall, list) { if (!list_empty(&gss_msg->msg.list)) continue; gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); gss_release_msg(gss_msg); goto restart; } - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); put_pipe_version(); } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 16d9b9a701a4..b6f6555128db 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -61,7 +61,7 @@ void rpc_pipefs_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); -static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, +static void rpc_purge_list(struct rpc_pipe *pipe, struct list_head *head, void (*destroy_msg)(struct rpc_pipe_msg *), int err) { struct rpc_pipe_msg *msg; @@ -74,29 +74,29 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, msg->errno = err; destroy_msg(msg); } while (!list_empty(head)); - wake_up(&rpci->waitq); + wake_up(&pipe->waitq); } static void rpc_timeout_upcall_queue(struct work_struct *work) { LIST_HEAD(free_list); - struct rpc_inode *rpci = - container_of(work, struct rpc_inode, queue_timeout.work); + struct rpc_pipe *pipe = + container_of(work, struct rpc_pipe, queue_timeout.work); void (*destroy_msg)(struct rpc_pipe_msg *); - spin_lock(&rpci->lock); - if (rpci->ops == NULL) { - spin_unlock(&rpci->lock); + spin_lock(&pipe->lock); + if (pipe->ops == NULL) { + spin_unlock(&pipe->lock); return; } - destroy_msg = rpci->ops->destroy_msg; - if (rpci->nreaders == 0) { - list_splice_init(&rpci->pipe, &free_list); - rpci->pipelen = 0; + destroy_msg = pipe->ops->destroy_msg; + if (pipe->nreaders == 0) { + list_splice_init(&pipe->pipe, &free_list); + pipe->pipelen = 0; } - spin_unlock(&rpci->lock); - rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); + spin_unlock(&pipe->lock); + rpc_purge_list(pipe, &free_list, destroy_msg, -ETIMEDOUT); } ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, @@ -135,25 +135,25 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) struct rpc_inode *rpci = RPC_I(inode); int res = -EPIPE; - spin_lock(&rpci->lock); - if (rpci->ops == NULL) + spin_lock(&rpci->pipe->lock); + if (rpci->pipe->ops == NULL) goto out; - if (rpci->nreaders) { - list_add_tail(&msg->list, &rpci->pipe); - rpci->pipelen += msg->len; + if (rpci->pipe->nreaders) { + list_add_tail(&msg->list, &rpci->pipe->pipe); + rpci->pipe->pipelen += msg->len; res = 0; - } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { - if (list_empty(&rpci->pipe)) + } else if (rpci->pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { + if (list_empty(&rpci->pipe->pipe)) queue_delayed_work(rpciod_workqueue, - &rpci->queue_timeout, + &rpci->pipe->queue_timeout, RPC_UPCALL_TIMEOUT); - list_add_tail(&msg->list, &rpci->pipe); - rpci->pipelen += msg->len; + list_add_tail(&msg->list, &rpci->pipe->pipe); + rpci->pipe->pipelen += msg->len; res = 0; } out: - spin_unlock(&rpci->lock); - wake_up(&rpci->waitq); + spin_unlock(&rpci->pipe->lock); + wake_up(&rpci->pipe->waitq); return res; } EXPORT_SYMBOL_GPL(rpc_queue_upcall); @@ -167,27 +167,27 @@ rpc_inode_setowner(struct inode *inode, void *private) static void rpc_close_pipes(struct inode *inode) { - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; const struct rpc_pipe_ops *ops; int need_release; mutex_lock(&inode->i_mutex); - ops = rpci->ops; + ops = pipe->ops; if (ops != NULL) { LIST_HEAD(free_list); - spin_lock(&rpci->lock); - need_release = rpci->nreaders != 0 || rpci->nwriters != 0; - rpci->nreaders = 0; - list_splice_init(&rpci->in_upcall, &free_list); - list_splice_init(&rpci->pipe, &free_list); - rpci->pipelen = 0; - rpci->ops = NULL; - spin_unlock(&rpci->lock); - rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); - rpci->nwriters = 0; + spin_lock(&pipe->lock); + need_release = pipe->nreaders != 0 || pipe->nwriters != 0; + pipe->nreaders = 0; + list_splice_init(&pipe->in_upcall, &free_list); + list_splice_init(&pipe->pipe, &free_list); + pipe->pipelen = 0; + pipe->ops = NULL; + spin_unlock(&pipe->lock); + rpc_purge_list(pipe, &free_list, ops->destroy_msg, -EPIPE); + pipe->nwriters = 0; if (need_release && ops->release_pipe) ops->release_pipe(inode); - cancel_delayed_work_sync(&rpci->queue_timeout); + cancel_delayed_work_sync(&pipe->queue_timeout); } rpc_inode_setowner(inode, NULL); mutex_unlock(&inode->i_mutex); @@ -207,6 +207,7 @@ static void rpc_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); + kfree(RPC_I(inode)->pipe); kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); } @@ -224,18 +225,18 @@ rpc_pipe_open(struct inode *inode, struct file *filp) int res = -ENXIO; mutex_lock(&inode->i_mutex); - if (rpci->ops == NULL) + if (rpci->pipe->ops == NULL) goto out; - first_open = rpci->nreaders == 0 && rpci->nwriters == 0; - if (first_open && rpci->ops->open_pipe) { - res = rpci->ops->open_pipe(inode); + first_open = rpci->pipe->nreaders == 0 && rpci->pipe->nwriters == 0; + if (first_open && rpci->pipe->ops->open_pipe) { + res = rpci->pipe->ops->open_pipe(inode); if (res) goto out; } if (filp->f_mode & FMODE_READ) - rpci->nreaders++; + rpci->pipe->nreaders++; if (filp->f_mode & FMODE_WRITE) - rpci->nwriters++; + rpci->pipe->nwriters++; res = 0; out: mutex_unlock(&inode->i_mutex); @@ -245,38 +246,38 @@ out: static int rpc_pipe_release(struct inode *inode, struct file *filp) { - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; struct rpc_pipe_msg *msg; int last_close; mutex_lock(&inode->i_mutex); - if (rpci->ops == NULL) + if (pipe->ops == NULL) goto out; msg = filp->private_data; if (msg != NULL) { - spin_lock(&rpci->lock); + spin_lock(&pipe->lock); msg->errno = -EAGAIN; list_del_init(&msg->list); - spin_unlock(&rpci->lock); - rpci->ops->destroy_msg(msg); + spin_unlock(&pipe->lock); + pipe->ops->destroy_msg(msg); } if (filp->f_mode & FMODE_WRITE) - rpci->nwriters --; + pipe->nwriters --; if (filp->f_mode & FMODE_READ) { - rpci->nreaders --; - if (rpci->nreaders == 0) { + pipe->nreaders --; + if (pipe->nreaders == 0) { LIST_HEAD(free_list); - spin_lock(&rpci->lock); - list_splice_init(&rpci->pipe, &free_list); - rpci->pipelen = 0; - spin_unlock(&rpci->lock); - rpc_purge_list(rpci, &free_list, - rpci->ops->destroy_msg, -EAGAIN); + spin_lock(&pipe->lock); + list_splice_init(&pipe->pipe, &free_list); + pipe->pipelen = 0; + spin_unlock(&pipe->lock); + rpc_purge_list(pipe, &free_list, + pipe->ops->destroy_msg, -EAGAIN); } } - last_close = rpci->nwriters == 0 && rpci->nreaders == 0; - if (last_close && rpci->ops->release_pipe) - rpci->ops->release_pipe(inode); + last_close = pipe->nwriters == 0 && pipe->nreaders == 0; + if (last_close && pipe->ops->release_pipe) + pipe->ops->release_pipe(inode); out: mutex_unlock(&inode->i_mutex); return 0; @@ -291,34 +292,34 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) int res = 0; mutex_lock(&inode->i_mutex); - if (rpci->ops == NULL) { + if (rpci->pipe->ops == NULL) { res = -EPIPE; goto out_unlock; } msg = filp->private_data; if (msg == NULL) { - spin_lock(&rpci->lock); - if (!list_empty(&rpci->pipe)) { - msg = list_entry(rpci->pipe.next, + spin_lock(&rpci->pipe->lock); + if (!list_empty(&rpci->pipe->pipe)) { + msg = list_entry(rpci->pipe->pipe.next, struct rpc_pipe_msg, list); - list_move(&msg->list, &rpci->in_upcall); - rpci->pipelen -= msg->len; + list_move(&msg->list, &rpci->pipe->in_upcall); + rpci->pipe->pipelen -= msg->len; filp->private_data = msg; msg->copied = 0; } - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); if (msg == NULL) goto out_unlock; } /* NOTE: it is up to the callback to update msg->copied */ - res = rpci->ops->upcall(filp, msg, buf, len); + res = rpci->pipe->ops->upcall(filp, msg, buf, len); if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; - spin_lock(&rpci->lock); + spin_lock(&rpci->pipe->lock); list_del_init(&msg->list); - spin_unlock(&rpci->lock); - rpci->ops->destroy_msg(msg); + spin_unlock(&rpci->pipe->lock); + rpci->pipe->ops->destroy_msg(msg); } out_unlock: mutex_unlock(&inode->i_mutex); @@ -334,8 +335,8 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of mutex_lock(&inode->i_mutex); res = -EPIPE; - if (rpci->ops != NULL) - res = rpci->ops->downcall(filp, buf, len); + if (rpci->pipe->ops != NULL) + res = rpci->pipe->ops->downcall(filp, buf, len); mutex_unlock(&inode->i_mutex); return res; } @@ -347,12 +348,12 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) unsigned int mask = 0; rpci = RPC_I(filp->f_path.dentry->d_inode); - poll_wait(filp, &rpci->waitq, wait); + poll_wait(filp, &rpci->pipe->waitq, wait); mask = POLLOUT | POLLWRNORM; - if (rpci->ops == NULL) + if (rpci->pipe->ops == NULL) mask |= POLLERR | POLLHUP; - if (filp->private_data || !list_empty(&rpci->pipe)) + if (filp->private_data || !list_empty(&rpci->pipe->pipe)) mask |= POLLIN | POLLRDNORM; return mask; } @@ -366,18 +367,18 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case FIONREAD: - spin_lock(&rpci->lock); - if (rpci->ops == NULL) { - spin_unlock(&rpci->lock); + spin_lock(&rpci->pipe->lock); + if (rpci->pipe->ops == NULL) { + spin_unlock(&rpci->pipe->lock); return -EPIPE; } - len = rpci->pipelen; + len = rpci->pipe->pipelen; if (filp->private_data) { struct rpc_pipe_msg *msg; msg = filp->private_data; len += msg->len - msg->copied; } - spin_unlock(&rpci->lock); + spin_unlock(&rpci->pipe->lock); return put_user(len, (int __user *)arg); default: return -EINVAL; @@ -562,6 +563,23 @@ static int __rpc_mkdir(struct inode *dir, struct dentry *dentry, return 0; } +static void +init_pipe(struct rpc_pipe *pipe) +{ + pipe->nreaders = 0; + pipe->nwriters = 0; + INIT_LIST_HEAD(&pipe->in_upcall); + INIT_LIST_HEAD(&pipe->in_downcall); + INIT_LIST_HEAD(&pipe->pipe); + pipe->pipelen = 0; + init_waitqueue_head(&pipe->waitq); + INIT_DELAYED_WORK(&pipe->queue_timeout, + rpc_timeout_upcall_queue); + pipe->ops = NULL; + spin_lock_init(&pipe->lock); + +} + static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *i_fop, @@ -569,16 +587,24 @@ static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, const struct rpc_pipe_ops *ops, int flags) { + struct rpc_pipe *pipe; struct rpc_inode *rpci; int err; + pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); + if (!pipe) + return -ENOMEM; + init_pipe(pipe); err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); - if (err) + if (err) { + kfree(pipe); return err; + } rpci = RPC_I(dentry->d_inode); rpci->private = private; - rpci->flags = flags; - rpci->ops = ops; + rpci->pipe = pipe; + rpci->pipe->flags = flags; + rpci->pipe->ops = ops; fsnotify_create(dir, dentry); return 0; } @@ -1142,17 +1168,7 @@ init_once(void *foo) inode_init_once(&rpci->vfs_inode); rpci->private = NULL; - rpci->nreaders = 0; - rpci->nwriters = 0; - INIT_LIST_HEAD(&rpci->in_upcall); - INIT_LIST_HEAD(&rpci->in_downcall); - INIT_LIST_HEAD(&rpci->pipe); - rpci->pipelen = 0; - init_waitqueue_head(&rpci->waitq); - INIT_DELAYED_WORK(&rpci->queue_timeout, - rpc_timeout_upcall_queue); - rpci->ops = NULL; - spin_lock_init(&rpci->lock); + rpci->pipe = NULL; } int register_rpc_pipefs(void) -- cgit v1.2.3-55-g7522 From d0fe13ba9178d3bb78bbd8577bdedc00f76b7a66 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:43:41 +0300 Subject: SUNRPC: cleanup PipeFS redundant RPC inode usage This patch removes redundant RPC inode references from PipeFS. These places are actually where pipes operations are performed. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 93 +++++++++++++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 47 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index b6f6555128db..299b1a3c3e49 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -132,28 +132,28 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); int rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) { - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; int res = -EPIPE; - spin_lock(&rpci->pipe->lock); - if (rpci->pipe->ops == NULL) + spin_lock(&pipe->lock); + if (pipe->ops == NULL) goto out; - if (rpci->pipe->nreaders) { - list_add_tail(&msg->list, &rpci->pipe->pipe); - rpci->pipe->pipelen += msg->len; + if (pipe->nreaders) { + list_add_tail(&msg->list, &pipe->pipe); + pipe->pipelen += msg->len; res = 0; - } else if (rpci->pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { - if (list_empty(&rpci->pipe->pipe)) + } else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { + if (list_empty(&pipe->pipe)) queue_delayed_work(rpciod_workqueue, - &rpci->pipe->queue_timeout, + &pipe->queue_timeout, RPC_UPCALL_TIMEOUT); - list_add_tail(&msg->list, &rpci->pipe->pipe); - rpci->pipe->pipelen += msg->len; + list_add_tail(&msg->list, &pipe->pipe); + pipe->pipelen += msg->len; res = 0; } out: - spin_unlock(&rpci->pipe->lock); - wake_up(&rpci->pipe->waitq); + spin_unlock(&pipe->lock); + wake_up(&pipe->waitq); return res; } EXPORT_SYMBOL_GPL(rpc_queue_upcall); @@ -220,23 +220,23 @@ rpc_destroy_inode(struct inode *inode) static int rpc_pipe_open(struct inode *inode, struct file *filp) { - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; int first_open; int res = -ENXIO; mutex_lock(&inode->i_mutex); - if (rpci->pipe->ops == NULL) + if (pipe->ops == NULL) goto out; - first_open = rpci->pipe->nreaders == 0 && rpci->pipe->nwriters == 0; - if (first_open && rpci->pipe->ops->open_pipe) { - res = rpci->pipe->ops->open_pipe(inode); + first_open = pipe->nreaders == 0 && pipe->nwriters == 0; + if (first_open && pipe->ops->open_pipe) { + res = pipe->ops->open_pipe(inode); if (res) goto out; } if (filp->f_mode & FMODE_READ) - rpci->pipe->nreaders++; + pipe->nreaders++; if (filp->f_mode & FMODE_WRITE) - rpci->pipe->nwriters++; + pipe->nwriters++; res = 0; out: mutex_unlock(&inode->i_mutex); @@ -287,39 +287,39 @@ static ssize_t rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; struct rpc_pipe_msg *msg; int res = 0; mutex_lock(&inode->i_mutex); - if (rpci->pipe->ops == NULL) { + if (pipe->ops == NULL) { res = -EPIPE; goto out_unlock; } msg = filp->private_data; if (msg == NULL) { - spin_lock(&rpci->pipe->lock); - if (!list_empty(&rpci->pipe->pipe)) { - msg = list_entry(rpci->pipe->pipe.next, + spin_lock(&pipe->lock); + if (!list_empty(&pipe->pipe)) { + msg = list_entry(pipe->pipe.next, struct rpc_pipe_msg, list); - list_move(&msg->list, &rpci->pipe->in_upcall); - rpci->pipe->pipelen -= msg->len; + list_move(&msg->list, &pipe->in_upcall); + pipe->pipelen -= msg->len; filp->private_data = msg; msg->copied = 0; } - spin_unlock(&rpci->pipe->lock); + spin_unlock(&pipe->lock); if (msg == NULL) goto out_unlock; } /* NOTE: it is up to the callback to update msg->copied */ - res = rpci->pipe->ops->upcall(filp, msg, buf, len); + res = pipe->ops->upcall(filp, msg, buf, len); if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; - spin_lock(&rpci->pipe->lock); + spin_lock(&pipe->lock); list_del_init(&msg->list); - spin_unlock(&rpci->pipe->lock); - rpci->pipe->ops->destroy_msg(msg); + spin_unlock(&pipe->lock); + pipe->ops->destroy_msg(msg); } out_unlock: mutex_unlock(&inode->i_mutex); @@ -330,13 +330,13 @@ static ssize_t rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; int res; mutex_lock(&inode->i_mutex); res = -EPIPE; - if (rpci->pipe->ops != NULL) - res = rpci->pipe->ops->downcall(filp, buf, len); + if (pipe->ops != NULL) + res = pipe->ops->downcall(filp, buf, len); mutex_unlock(&inode->i_mutex); return res; } @@ -344,16 +344,15 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of static unsigned int rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) { - struct rpc_inode *rpci; + struct rpc_pipe *pipe = RPC_I(filp->f_path.dentry->d_inode)->pipe; unsigned int mask = 0; - rpci = RPC_I(filp->f_path.dentry->d_inode); - poll_wait(filp, &rpci->pipe->waitq, wait); + poll_wait(filp, &pipe->waitq, wait); mask = POLLOUT | POLLWRNORM; - if (rpci->pipe->ops == NULL) + if (pipe->ops == NULL) mask |= POLLERR | POLLHUP; - if (filp->private_data || !list_empty(&rpci->pipe->pipe)) + if (filp->private_data || !list_empty(&pipe->pipe)) mask |= POLLIN | POLLRDNORM; return mask; } @@ -362,23 +361,23 @@ static long rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_inode *rpci = RPC_I(inode); + struct rpc_pipe *pipe = RPC_I(inode)->pipe; int len; switch (cmd) { case FIONREAD: - spin_lock(&rpci->pipe->lock); - if (rpci->pipe->ops == NULL) { - spin_unlock(&rpci->pipe->lock); + spin_lock(&pipe->lock); + if (pipe->ops == NULL) { + spin_unlock(&pipe->lock); return -EPIPE; } - len = rpci->pipe->pipelen; + len = pipe->pipelen; if (filp->private_data) { struct rpc_pipe_msg *msg; msg = filp->private_data; len += msg->len - msg->copied; } - spin_unlock(&rpci->pipe->lock); + spin_unlock(&pipe->lock); return put_user(len, (int __user *)arg); default: return -EINVAL; @@ -808,7 +807,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry, * @private: private data to associate with the pipe, for the caller's use * @ops: operations defining the behavior of the pipe: upcall, downcall, * release_pipe, open_pipe, and destroy_msg. - * @flags: rpc_inode flags + * @flags: rpc_pipe flags * * Data is made available for userspace to read by calls to * rpc_queue_upcall(). The actual reads will result in calls to -- cgit v1.2.3-55-g7522 From d706ed1f50d3f7fae61a177183562179abe8e4bb Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:43:49 +0300 Subject: SUNPRC: cleanup RPC PipeFS pipes upcall interface RPC pipe upcall doesn't requires only private pipe data. Thus RPC inode references in this code can be removed. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayoutdev.c | 2 +- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- fs/nfs/idmap.c | 4 ++-- include/linux/sunrpc/rpc_pipe_fs.h | 2 +- net/sunrpc/auth_gss/auth_gss.c | 3 +-- net/sunrpc/rpc_pipe.c | 3 +-- 6 files changed, 7 insertions(+), 9 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index d08ba9107fde..81019190e46d 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -146,7 +146,7 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s CALLING USERSPACE DAEMON\n", __func__); add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(bl_device_pipe->d_inode, &msg); + rc = rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg); if (rc < 0) { remove_wait_queue(&bl_wq, &wq); rv = ERR_PTR(rc); diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index d055c7558073..3c38244a8724 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -66,7 +66,7 @@ static void dev_remove(dev_t dev) msg.len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&bl_wq, &wq); goto out; } diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2c05f1991e1e..3c63c47c793d 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -589,7 +589,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h, msg.len = sizeof(*im); add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } @@ -650,7 +650,7 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h, add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(idmap->idmap_dentry->d_inode, &msg) < 0) { + if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 57512c23d34c..3ebc257e2b43 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -64,7 +64,7 @@ extern void rpc_put_sb_net(const struct net *net); extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); -extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); +extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); struct rpc_clnt; extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index e933484e55ef..4157e3151581 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -473,8 +473,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr return gss_new; gss_msg = gss_add_msg(gss_new); if (gss_msg == gss_new) { - struct inode *inode = &gss_new->inode->vfs_inode; - int res = rpc_queue_upcall(inode, &gss_new->msg); + int res = rpc_queue_upcall(gss_new->inode->pipe, &gss_new->msg); if (res) { gss_unhash_msg(gss_new); gss_msg = ERR_PTR(res); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 299b1a3c3e49..4093da79d512 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -130,9 +130,8 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); * initialize the fields of @msg (other than @msg->list) appropriately. */ int -rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) +rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) { - struct rpc_pipe *pipe = RPC_I(inode)->pipe; int res = -EPIPE; spin_lock(&pipe->lock); -- cgit v1.2.3-55-g7522 From c239d83b9921b8a8005a3bcd23000cfe18acf5c2 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 26 Dec 2011 15:44:06 +0300 Subject: SUNRPC: split SUNPRC PipeFS dentry and private pipe data creation This patch is a final step towards to removing PipeFS inode references from kernel code other than PipeFS itself. It makes all kernel SUNRPC PipeFS users depends on pipe private data, which state depend on their specific operations, etc. This patch completes SUNRPC PipeFS preparations and allows to create pipe private data and PipeFS dentries independently. Next step will be making SUNPRC PipeFS dentries allocated by SUNRPC PipeFS network namespace aware routines. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 16 ++++++++--- fs/nfs/blocklayout/blocklayout.h | 2 +- fs/nfs/blocklayout/blocklayoutdev.c | 2 +- fs/nfs/blocklayout/blocklayoutdm.c | 2 +- fs/nfs/idmap.c | 28 +++++++++++++------ include/linux/sunrpc/rpc_pipe_fs.h | 7 +++-- net/sunrpc/auth_gss/auth_gss.c | 54 ++++++++++++++++++++++++------------- net/sunrpc/rpc_pipe.c | 54 +++++++++++++++++++++---------------- 8 files changed, 107 insertions(+), 58 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 48cfac31f64c..848660fd58c4 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -46,7 +46,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy Adamson "); MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); -struct dentry *bl_device_pipe; +struct rpc_pipe *bl_device_pipe; wait_queue_head_t bl_wq; static void print_page(struct page *page) @@ -1051,16 +1051,23 @@ static int __init nfs4blocklayout_init(void) if (ret) goto out_putrpc; - bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL, - &bl_upcall_ops, 0); + bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); path_put(&path); if (IS_ERR(bl_device_pipe)) { ret = PTR_ERR(bl_device_pipe); goto out_putrpc; } + bl_device_pipe->dentry = rpc_mkpipe_dentry(path.dentry, "blocklayout", + NULL, bl_device_pipe); + if (IS_ERR(bl_device_pipe->dentry)) { + ret = PTR_ERR(bl_device_pipe->dentry); + goto out_destroy_pipe; + } out: return ret; +out_destroy_pipe: + rpc_destroy_pipe_data(bl_device_pipe); out_putrpc: rpc_put_mount(); out_remove: @@ -1074,7 +1081,8 @@ static void __exit nfs4blocklayout_exit(void) __func__); pnfs_unregister_layoutdriver(&blocklayout_type); - rpc_unlink(bl_device_pipe); + rpc_unlink(bl_device_pipe->dentry); + rpc_destroy_pipe_data(bl_device_pipe); rpc_put_mount(); } diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index e31a2df28e70..49c670b18a9e 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -161,7 +161,7 @@ struct bl_msg_hdr { u16 totallen; /* length of entire message, including hdr itself */ }; -extern struct dentry *bl_device_pipe; +extern struct rpc_pipe *bl_device_pipe; extern wait_queue_head_t bl_wq; #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c index 81019190e46d..949b62478799 100644 --- a/fs/nfs/blocklayout/blocklayoutdev.c +++ b/fs/nfs/blocklayout/blocklayoutdev.c @@ -146,7 +146,7 @@ nfs4_blk_decode_device(struct nfs_server *server, dprintk("%s CALLING USERSPACE DAEMON\n", __func__); add_wait_queue(&bl_wq, &wq); - rc = rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg); + rc = rpc_queue_upcall(bl_device_pipe, &msg); if (rc < 0) { remove_wait_queue(&bl_wq, &wq); rv = ERR_PTR(rc); diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 3c38244a8724..631f254d12ab 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -66,7 +66,7 @@ static void dev_remove(dev_t dev) msg.len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&bl_wq, &wq); - if (rpc_queue_upcall(RPC_I(bl_device_pipe->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(bl_device_pipe, &msg) < 0) { remove_wait_queue(&bl_wq, &wq); goto out; } diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 3c63c47c793d..2992cb854e12 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -410,7 +410,7 @@ struct idmap_hashtable { }; struct idmap { - struct dentry *idmap_dentry; + struct rpc_pipe *idmap_pipe; wait_queue_head_t idmap_wq; struct idmap_msg idmap_im; struct mutex idmap_lock; /* Serializes upcalls */ @@ -435,6 +435,7 @@ int nfs_idmap_new(struct nfs_client *clp) { struct idmap *idmap; + struct rpc_pipe *pipe; int error; BUG_ON(clp->cl_idmap != NULL); @@ -443,14 +444,23 @@ nfs_idmap_new(struct nfs_client *clp) if (idmap == NULL) return -ENOMEM; - idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_path.dentry, - "idmap", idmap, &idmap_upcall_ops, 0); - if (IS_ERR(idmap->idmap_dentry)) { - error = PTR_ERR(idmap->idmap_dentry); + pipe = rpc_mkpipe_data(&idmap_upcall_ops, 0); + if (IS_ERR(pipe)) { + error = PTR_ERR(pipe); kfree(idmap); return error; } + if (clp->cl_rpcclient->cl_path.dentry) + pipe->dentry = rpc_mkpipe_dentry(clp->cl_rpcclient->cl_path.dentry, + "idmap", idmap, pipe); + if (IS_ERR(pipe->dentry)) { + error = PTR_ERR(pipe->dentry); + rpc_destroy_pipe_data(pipe); + kfree(idmap); + return error; + } + idmap->idmap_pipe = pipe; mutex_init(&idmap->idmap_lock); mutex_init(&idmap->idmap_im_lock); init_waitqueue_head(&idmap->idmap_wq); @@ -468,7 +478,9 @@ nfs_idmap_delete(struct nfs_client *clp) if (!idmap) return; - rpc_unlink(idmap->idmap_dentry); + if (idmap->idmap_pipe->dentry) + rpc_unlink(idmap->idmap_pipe->dentry); + rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = NULL; kfree(idmap); } @@ -589,7 +601,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h, msg.len = sizeof(*im); add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } @@ -650,7 +662,7 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h, add_wait_queue(&idmap->idmap_wq, &wq); - if (rpc_queue_upcall(RPC_I(idmap->idmap_dentry->d_inode)->pipe, &msg) < 0) { + if (rpc_queue_upcall(idmap->idmap_pipe, &msg) < 0) { remove_wait_queue(&idmap->idmap_wq, &wq); goto out; } diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 3ebc257e2b43..0d1f748f76da 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -34,6 +34,7 @@ struct rpc_pipe { struct delayed_work queue_timeout; const struct rpc_pipe_ops *ops; spinlock_t lock; + struct dentry *dentry; }; struct rpc_inode { @@ -77,8 +78,10 @@ extern struct dentry *rpc_create_cache_dir(struct dentry *, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); -extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, - const struct rpc_pipe_ops *, int flags); +struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); +void rpc_destroy_pipe_data(struct rpc_pipe *pipe); +extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, + struct rpc_pipe *); extern int rpc_unlink(struct dentry *); extern struct vfsmount *rpc_get_mount(void); extern void rpc_put_mount(void); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 304b8309f217..f684ce606667 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -81,7 +81,7 @@ struct gss_auth { * mechanism (for example, "krb5") and exists for * backwards-compatibility with older gssd's. */ - struct dentry *dentry[2]; + struct rpc_pipe *pipe[2]; }; /* pipe_version >= 0 if and only if someone has a pipe open. */ @@ -449,7 +449,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, kfree(gss_msg); return ERR_PTR(vers); } - gss_msg->pipe = RPC_I(gss_auth->dentry[vers]->d_inode)->pipe; + gss_msg->pipe = gss_auth->pipe[vers]; INIT_LIST_HEAD(&gss_msg->list); rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); init_waitqueue_head(&gss_msg->waitqueue); @@ -799,21 +799,33 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) * that we supported only the old pipe. So we instead create * the new pipe first. */ - gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_path.dentry, - "gssd", - clnt, &gss_upcall_ops_v1, - RPC_PIPE_WAIT_FOR_OPEN); - if (IS_ERR(gss_auth->dentry[1])) { - err = PTR_ERR(gss_auth->dentry[1]); + gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1, + RPC_PIPE_WAIT_FOR_OPEN); + if (IS_ERR(gss_auth->pipe[1])) { + err = PTR_ERR(gss_auth->pipe[1]); goto err_put_mech; } - gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_path.dentry, - gss_auth->mech->gm_name, - clnt, &gss_upcall_ops_v0, - RPC_PIPE_WAIT_FOR_OPEN); - if (IS_ERR(gss_auth->dentry[0])) { - err = PTR_ERR(gss_auth->dentry[0]); + gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0, + RPC_PIPE_WAIT_FOR_OPEN); + if (IS_ERR(gss_auth->pipe[0])) { + err = PTR_ERR(gss_auth->pipe[0]); + goto err_destroy_pipe_1; + } + + gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_path.dentry, + "gssd", + clnt, gss_auth->pipe[1]); + if (IS_ERR(gss_auth->pipe[1]->dentry)) { + err = PTR_ERR(gss_auth->pipe[1]->dentry); + goto err_destroy_pipe_0; + } + + gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_path.dentry, + gss_auth->mech->gm_name, + clnt, gss_auth->pipe[0]); + if (IS_ERR(gss_auth->pipe[0]->dentry)) { + err = PTR_ERR(gss_auth->pipe[0]->dentry); goto err_unlink_pipe_1; } err = rpcauth_init_credcache(auth); @@ -822,9 +834,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) return auth; err_unlink_pipe_0: - rpc_unlink(gss_auth->dentry[0]); + rpc_unlink(gss_auth->pipe[0]->dentry); err_unlink_pipe_1: - rpc_unlink(gss_auth->dentry[1]); + rpc_unlink(gss_auth->pipe[1]->dentry); +err_destroy_pipe_0: + rpc_destroy_pipe_data(gss_auth->pipe[0]); +err_destroy_pipe_1: + rpc_destroy_pipe_data(gss_auth->pipe[1]); err_put_mech: gss_mech_put(gss_auth->mech); err_free: @@ -837,8 +853,10 @@ out_dec: static void gss_free(struct gss_auth *gss_auth) { - rpc_unlink(gss_auth->dentry[1]); - rpc_unlink(gss_auth->dentry[0]); + rpc_unlink(gss_auth->pipe[0]->dentry); + rpc_unlink(gss_auth->pipe[1]->dentry); + rpc_destroy_pipe_data(gss_auth->pipe[0]); + rpc_destroy_pipe_data(gss_auth->pipe[1]); gss_mech_put(gss_auth->mech); kfree(gss_auth); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 4093da79d512..6dd8b96e8df7 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -206,7 +206,6 @@ static void rpc_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); - kfree(RPC_I(inode)->pipe); kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); } @@ -575,34 +574,44 @@ init_pipe(struct rpc_pipe *pipe) rpc_timeout_upcall_queue); pipe->ops = NULL; spin_lock_init(&pipe->lock); + pipe->dentry = NULL; +} +void rpc_destroy_pipe_data(struct rpc_pipe *pipe) +{ + kfree(pipe); } +EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data); -static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, - umode_t mode, - const struct file_operations *i_fop, - void *private, - const struct rpc_pipe_ops *ops, - int flags) +struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags) { struct rpc_pipe *pipe; - struct rpc_inode *rpci; - int err; pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); if (!pipe) - return -ENOMEM; + return ERR_PTR(-ENOMEM); init_pipe(pipe); + pipe->ops = ops; + pipe->flags = flags; + return pipe; +} +EXPORT_SYMBOL_GPL(rpc_mkpipe_data); + +static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry, + umode_t mode, + const struct file_operations *i_fop, + void *private, + struct rpc_pipe *pipe) +{ + struct rpc_inode *rpci; + int err; + err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); - if (err) { - kfree(pipe); + if (err) return err; - } rpci = RPC_I(dentry->d_inode); rpci->private = private; rpci->pipe = pipe; - rpci->pipe->flags = flags; - rpci->pipe->ops = ops; fsnotify_create(dir, dentry); return 0; } @@ -819,9 +828,8 @@ static int rpc_rmdir_depopulate(struct dentry *dentry, * The @private argument passed here will be available to all these methods * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. */ -struct dentry *rpc_mkpipe(struct dentry *parent, const char *name, - void *private, const struct rpc_pipe_ops *ops, - int flags) +struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, + void *private, struct rpc_pipe *pipe) { struct dentry *dentry; struct inode *dir = parent->d_inode; @@ -829,9 +837,9 @@ struct dentry *rpc_mkpipe(struct dentry *parent, const char *name, struct qstr q; int err; - if (ops->upcall == NULL) + if (pipe->ops->upcall == NULL) umode &= ~S_IRUGO; - if (ops->downcall == NULL) + if (pipe->ops->downcall == NULL) umode &= ~S_IWUGO; q.name = name; @@ -842,8 +850,8 @@ struct dentry *rpc_mkpipe(struct dentry *parent, const char *name, dentry = __rpc_lookup_create_exclusive(parent, &q); if (IS_ERR(dentry)) goto out; - err = __rpc_mkpipe(dir, dentry, umode, &rpc_pipe_fops, - private, ops, flags); + err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops, + private, pipe); if (err) goto out_err; out: @@ -856,7 +864,7 @@ out_err: err); goto out; } -EXPORT_SYMBOL_GPL(rpc_mkpipe); +EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry); /** * rpc_unlink - remove a pipe -- cgit v1.2.3-55-g7522 From 80df9d202255071c8ec610a6a3fdca5cac69f7bd Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Wed, 11 Jan 2012 19:18:17 +0400 Subject: SUNRPC: subscribe RPC clients to pipefs notifications This patch subscribes RPC clients to RPC pipefs notifications. RPC clients notifier block is registering with pipefs initialization during SUNRPC module init. This notifier callback is responsible for RPC client PipeFS directory and GSS pipes creation. For pipes creation and destruction two additional callbacks were added to struct rpc_authops. Note that no locking required in notifier callback because PipeFS superblock pointer is passed as an argument from it's creation or destruction routine and thus we can be sure about it's validity. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/auth.h | 2 ++ net/sunrpc/auth_gss/auth_gss.c | 10 ++++-- net/sunrpc/clnt.c | 69 +++++++++++++++++++++++++++++++++++++++++- net/sunrpc/rpc_pipe.c | 19 ++++++++---- net/sunrpc/sunrpc.h | 2 ++ 5 files changed, 92 insertions(+), 10 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 7874a8a56638..492a36d72829 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -99,6 +99,8 @@ struct rpc_authops { struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int); + int (*pipes_create)(struct rpc_auth *); + void (*pipes_destroy)(struct rpc_auth *); }; struct rpc_credops { diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 1a6fa9173519..9da2d837b512 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -762,8 +762,10 @@ static void gss_pipes_dentries_destroy(struct rpc_auth *auth) struct gss_auth *gss_auth; gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->pipe[0]->dentry); - rpc_unlink(gss_auth->pipe[1]->dentry); + if (gss_auth->pipe[0]->dentry) + rpc_unlink(gss_auth->pipe[0]->dentry); + if (gss_auth->pipe[1]->dentry) + rpc_unlink(gss_auth->pipe[1]->dentry); } static int gss_pipes_dentries_create(struct rpc_auth *auth) @@ -1614,7 +1616,9 @@ static const struct rpc_authops authgss_ops = { .create = gss_create, .destroy = gss_destroy, .lookup_cred = gss_lookup_cred, - .crcreate = gss_create_cred + .crcreate = gss_create_cred, + .pipes_create = gss_pipes_dentries_create, + .pipes_destroy = gss_pipes_dentries_destroy, }; static const struct rpc_credops gss_credops = { diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 90e82c5daeb6..417074500592 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -98,8 +98,11 @@ static void rpc_unregister_client(struct rpc_clnt *clnt) static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { - if (clnt->cl_path.dentry) + if (clnt->cl_path.dentry) { + if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy) + clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth); rpc_remove_client_dir(clnt->cl_path.dentry); + } clnt->cl_path.dentry = NULL; } @@ -181,6 +184,70 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) return 0; } +static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, + struct super_block *sb) +{ + struct dentry *dentry; + int err = 0; + + switch (event) { + case RPC_PIPEFS_MOUNT: + if (clnt->cl_program->pipe_dir_name == NULL) + break; + dentry = rpc_setup_pipedir_sb(sb, clnt, + clnt->cl_program->pipe_dir_name); + BUG_ON(dentry == NULL); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + clnt->cl_path.dentry = dentry; + if (clnt->cl_auth->au_ops->pipes_create) { + err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth); + if (err) + __rpc_clnt_remove_pipedir(clnt); + } + break; + case RPC_PIPEFS_UMOUNT: + __rpc_clnt_remove_pipedir(clnt); + break; + default: + printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); + return -ENOTSUPP; + } + return err; +} + +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct rpc_clnt *clnt; + int error = 0; + struct sunrpc_net *sn = net_generic(sb->s_fs_info, sunrpc_net_id); + + spin_lock(&sn->rpc_client_lock); + list_for_each_entry(clnt, &sn->all_clients, cl_clients) { + error = __rpc_pipefs_event(clnt, event, sb); + if (error) + break; + } + spin_unlock(&sn->rpc_client_lock); + return error; +} + +static struct notifier_block rpc_clients_block = { + .notifier_call = rpc_pipefs_event, +}; + +int rpc_clients_notifier_register(void) +{ + return rpc_pipefs_notifier_register(&rpc_clients_block); +} + +void rpc_clients_notifier_unregister(void) +{ + return rpc_pipefs_notifier_unregister(&rpc_clients_block); +} + static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { struct rpc_program *program = args->program; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 6dd8b96e8df7..910de4169a8d 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -937,7 +937,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry, /** * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() - * @dentry: directory to remove + * @clnt: rpc client */ int rpc_remove_client_dir(struct dentry *dentry) { @@ -1188,17 +1188,24 @@ int register_rpc_pipefs(void) init_once); if (!rpc_inode_cachep) return -ENOMEM; + err = rpc_clients_notifier_register(); + if (err) + goto err_notifier; err = register_filesystem(&rpc_pipe_fs_type); - if (err) { - kmem_cache_destroy(rpc_inode_cachep); - return err; - } - + if (err) + goto err_register; return 0; + +err_register: + rpc_clients_notifier_unregister(); +err_notifier: + kmem_cache_destroy(rpc_inode_cachep); + return err; } void unregister_rpc_pipefs(void) { + rpc_clients_notifier_unregister(); kmem_cache_destroy(rpc_inode_cachep); unregister_filesystem(&rpc_pipe_fs_type); } diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index 90c292e2738b..14c9f6d1c5ff 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -47,5 +47,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, struct page *headpage, unsigned long headoffset, struct page *tailpage, unsigned long tailoffset); +int rpc_clients_notifier_register(void); +void rpc_clients_notifier_unregister(void); #endif /* _NET_SUNRPC_SUNRPC_H */ -- cgit v1.2.3-55-g7522 From ad6b134008f4e765dd19976552b929273ae523bd Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:12:38 +0400 Subject: SUNRPC: fix pipe->ops cleanup on pipe dentry unlink This patch looks late due to GSS AUTH patches sent already. But it fixes a flaw in RPC PipeFS pipes handling. I've added this patch in the series, because this series related to pipes. But it should be a part of previous series named "SUNPRC: cleanup PipeFS for network-namespace-aware users". Pipe dentry can be created and destroyed many times during pipe life cycle. This actually means, that we can't set pipe->ops to NULL in rpc_close_pipes() and use this variable as a flag, indicating, that pipe's dentry is unlinking. To follow this restriction, this patch replaces "pipe->ops = NULL" assignment and checks for NULL with "pipe->dentry = NULL" assignment and checks for NULL respectively. This patch also removes check for non-NULL pipe->ops (or pipe->dentry) in rpc_close_pipes() because it always non-NULL now. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 51 ++++++++++++++++++++------------------------------- 1 file changed, 20 insertions(+), 31 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 910de4169a8d..6b417fcabdbf 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -86,10 +86,6 @@ rpc_timeout_upcall_queue(struct work_struct *work) void (*destroy_msg)(struct rpc_pipe_msg *); spin_lock(&pipe->lock); - if (pipe->ops == NULL) { - spin_unlock(&pipe->lock); - return; - } destroy_msg = pipe->ops->destroy_msg; if (pipe->nreaders == 0) { list_splice_init(&pipe->pipe, &free_list); @@ -135,8 +131,6 @@ rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) int res = -EPIPE; spin_lock(&pipe->lock); - if (pipe->ops == NULL) - goto out; if (pipe->nreaders) { list_add_tail(&msg->list, &pipe->pipe); pipe->pipelen += msg->len; @@ -150,7 +144,6 @@ rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) pipe->pipelen += msg->len; res = 0; } -out: spin_unlock(&pipe->lock); wake_up(&pipe->waitq); return res; @@ -167,27 +160,23 @@ static void rpc_close_pipes(struct inode *inode) { struct rpc_pipe *pipe = RPC_I(inode)->pipe; - const struct rpc_pipe_ops *ops; int need_release; + LIST_HEAD(free_list); mutex_lock(&inode->i_mutex); - ops = pipe->ops; - if (ops != NULL) { - LIST_HEAD(free_list); - spin_lock(&pipe->lock); - need_release = pipe->nreaders != 0 || pipe->nwriters != 0; - pipe->nreaders = 0; - list_splice_init(&pipe->in_upcall, &free_list); - list_splice_init(&pipe->pipe, &free_list); - pipe->pipelen = 0; - pipe->ops = NULL; - spin_unlock(&pipe->lock); - rpc_purge_list(pipe, &free_list, ops->destroy_msg, -EPIPE); - pipe->nwriters = 0; - if (need_release && ops->release_pipe) - ops->release_pipe(inode); - cancel_delayed_work_sync(&pipe->queue_timeout); - } + spin_lock(&pipe->lock); + need_release = pipe->nreaders != 0 || pipe->nwriters != 0; + pipe->nreaders = 0; + list_splice_init(&pipe->in_upcall, &free_list); + list_splice_init(&pipe->pipe, &free_list); + pipe->pipelen = 0; + pipe->dentry = NULL; + spin_unlock(&pipe->lock); + rpc_purge_list(pipe, &free_list, pipe->ops->destroy_msg, -EPIPE); + pipe->nwriters = 0; + if (need_release && pipe->ops->release_pipe) + pipe->ops->release_pipe(inode); + cancel_delayed_work_sync(&pipe->queue_timeout); rpc_inode_setowner(inode, NULL); mutex_unlock(&inode->i_mutex); } @@ -223,7 +212,7 @@ rpc_pipe_open(struct inode *inode, struct file *filp) int res = -ENXIO; mutex_lock(&inode->i_mutex); - if (pipe->ops == NULL) + if (pipe->dentry == NULL) goto out; first_open = pipe->nreaders == 0 && pipe->nwriters == 0; if (first_open && pipe->ops->open_pipe) { @@ -249,7 +238,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) int last_close; mutex_lock(&inode->i_mutex); - if (pipe->ops == NULL) + if (pipe->dentry == NULL) goto out; msg = filp->private_data; if (msg != NULL) { @@ -290,7 +279,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) int res = 0; mutex_lock(&inode->i_mutex); - if (pipe->ops == NULL) { + if (pipe->dentry == NULL) { res = -EPIPE; goto out_unlock; } @@ -333,7 +322,7 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of mutex_lock(&inode->i_mutex); res = -EPIPE; - if (pipe->ops != NULL) + if (pipe->dentry != NULL) res = pipe->ops->downcall(filp, buf, len); mutex_unlock(&inode->i_mutex); return res; @@ -348,7 +337,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) poll_wait(filp, &pipe->waitq, wait); mask = POLLOUT | POLLWRNORM; - if (pipe->ops == NULL) + if (pipe->dentry == NULL) mask |= POLLERR | POLLHUP; if (filp->private_data || !list_empty(&pipe->pipe)) mask |= POLLIN | POLLRDNORM; @@ -365,7 +354,7 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case FIONREAD: spin_lock(&pipe->lock); - if (pipe->ops == NULL) { + if (pipe->dentry == NULL) { spin_unlock(&pipe->lock); return -EPIPE; } -- cgit v1.2.3-55-g7522 From eee17325f1dfbe004f1475743bab9e3d050d00f5 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 16:13:19 +0400 Subject: NFS: idmap PipeFS notifier introduced v2: 1) Added "nfs_idmap_init" and "nfs_idmap_quit" definitions for kernels built without CONFIG_NFS_V4 option set. This patch subscribes NFS clients to RPC pipefs notifications. Idmap notifier is registering on NFS module load. This notifier callback is responsible for creation/destruction of PipeFS idmap pipe dentry for NFS4 clients. Since ipdmap pipe is created in rpc client pipefs directory, we have make sure, that this directory has been created already. IOW RPC client notifier callback has been called already. To achive this, PipeFS notifier priorities has been introduced (RPC clients notifier priority is greater than NFS idmap one). But this approach gives another problem: unlink for RPC client directory will be called before NFS idmap pipe unlink on UMOUNT event and will fail, because directory is not empty. The solution, introduced in this patch, is to try to remove client directory once again after idmap pipe was unlinked. This looks like ugly hack, so probably it should be replaced in some more elegant way. Note that no locking required in notifier callback because PipeFS superblock pointer is passed as an argument from it's creation or destruction routine and thus we can be sure about it's validity. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 4 +- fs/nfs/idmap.c | 75 ++++++++++++++++++++++++++++++++++++++ fs/nfs/internal.h | 4 ++ include/linux/nfs_idmap.h | 21 ++++++----- include/linux/sunrpc/rpc_pipe_fs.h | 7 ++++ net/sunrpc/clnt.c | 1 + net/sunrpc/rpc_pipe.c | 16 ++++++++ 7 files changed, 116 insertions(+), 12 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 64815b725409..ca9a4aa38dff 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -52,8 +52,8 @@ #define NFSDBG_FACILITY NFSDBG_CLIENT -static DEFINE_SPINLOCK(nfs_client_lock); -static LIST_HEAD(nfs_client_list); +DEFINE_SPINLOCK(nfs_client_lock); +LIST_HEAD(nfs_client_list); static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); #ifdef CONFIG_NFS_V4 diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 769274ed51c4..ff084d258c41 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -377,6 +377,7 @@ int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, #include #include "nfs4_fs.h" +#include "internal.h" #define IDMAP_HASH_SZ 128 @@ -530,6 +531,80 @@ nfs_idmap_delete(struct nfs_client *clp) kfree(idmap); } +static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event, + struct super_block *sb) +{ + int err = 0; + + switch (event) { + case RPC_PIPEFS_MOUNT: + BUG_ON(clp->cl_rpcclient->cl_dentry == NULL); + err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry, + clp->cl_idmap, + clp->cl_idmap->idmap_pipe); + break; + case RPC_PIPEFS_UMOUNT: + if (clp->cl_idmap->idmap_pipe) { + struct dentry *parent; + + parent = clp->cl_idmap->idmap_pipe->dentry->d_parent; + __nfs_idmap_unregister(clp->cl_idmap->idmap_pipe); + /* + * Note: This is a dirty hack. SUNRPC hook has been + * called already but simple_rmdir() call for the + * directory returned with error because of idmap pipe + * inside. Thus now we have to remove this directory + * here. + */ + if (rpc_rmdir(parent)) + printk(KERN_ERR "%s: failed to remove clnt dir!\n", __func__); + } + break; + default: + printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); + return -ENOTSUPP; + } + return err; +} + +static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct super_block *sb = ptr; + struct nfs_client *clp; + int error = 0; + + spin_lock(&nfs_client_lock); + list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + if (clp->net != sb->s_fs_info) + continue; + if (clp->rpc_ops != &nfs_v4_clientops) + continue; + error = __rpc_pipefs_event(clp, event, sb); + if (error) + break; + } + spin_unlock(&nfs_client_lock); + return error; +} + +#define PIPEFS_NFS_PRIO 1 + +static struct notifier_block nfs_idmap_block = { + .notifier_call = rpc_pipefs_event, + .priority = SUNRPC_PIPEFS_NFS_PRIO, +}; + +int nfs_idmap_init(void) +{ + return rpc_pipefs_notifier_register(&nfs_idmap_block); +} + +void nfs_idmap_quit(void) +{ + rpc_pipefs_notifier_unregister(&nfs_idmap_block); +} + /* * Helper routines for manipulating the hashtable */ diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d602188f889f..2b9836fe4434 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -182,6 +182,10 @@ static inline void nfs_fs_proc_exit(void) { } #endif +#ifdef CONFIG_NFS_V4 +extern spinlock_t nfs_client_lock; +extern struct list_head nfs_client_list; +#endif /* nfs4namespace.c */ #ifdef CONFIG_NFS_V4 diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h index 308c18877018..3c9eeb7da646 100644 --- a/include/linux/nfs_idmap.h +++ b/include/linux/nfs_idmap.h @@ -69,31 +69,32 @@ struct nfs_server; struct nfs_fattr; struct nfs4_string; -#ifdef CONFIG_NFS_USE_NEW_IDMAPPER - +#ifdef CONFIG_NFS_V4 int nfs_idmap_init(void); void nfs_idmap_quit(void); - -static inline int nfs_idmap_new(struct nfs_client *clp) +#else +static inline int nfs_idmap_init(void) { return 0; } -static inline void nfs_idmap_delete(struct nfs_client *clp) -{ -} +static inline void nfs_idmap_quit(void) +{} +#endif -#else /* CONFIG_NFS_USE_NEW_IDMAPPER not set */ +#ifdef CONFIG_NFS_USE_NEW_IDMAPPER -static inline int nfs_idmap_init(void) +static inline int nfs_idmap_new(struct nfs_client *clp) { return 0; } -static inline void nfs_idmap_quit(void) +static inline void nfs_idmap_delete(struct nfs_client *clp) { } +#else /* CONFIG_NFS_USE_NEW_IDMAPPER not set */ + int nfs_idmap_new(struct nfs_client *); void nfs_idmap_delete(struct nfs_client *); diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 0d1f748f76da..ca32ebd14c18 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -49,6 +49,11 @@ RPC_I(struct inode *inode) return container_of(inode, struct rpc_inode, vfs_inode); } +enum { + SUNRPC_PIPEFS_NFS_PRIO, + SUNRPC_PIPEFS_RPC_PRIO, +}; + extern int rpc_pipefs_notifier_register(struct notifier_block *); extern void rpc_pipefs_notifier_unregister(struct notifier_block *); @@ -78,6 +83,8 @@ extern struct dentry *rpc_create_cache_dir(struct dentry *, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); +extern int rpc_rmdir(struct dentry *dentry); + struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); void rpc_destroy_pipe_data(struct rpc_pipe *pipe); extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index ed7c22de9319..4c6848017168 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -222,6 +222,7 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, static struct notifier_block rpc_clients_block = { .notifier_call = rpc_pipefs_event, + .priority = SUNRPC_PIPEFS_RPC_PRIO, }; int rpc_clients_notifier_register(void) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 6b417fcabdbf..bae4e71d8663 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -616,6 +616,22 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) return ret; } +int rpc_rmdir(struct dentry *dentry) +{ + struct dentry *parent; + struct inode *dir; + int error; + + parent = dget_parent(dentry); + dir = parent->d_inode; + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + error = __rpc_rmdir(dir, dentry); + mutex_unlock(&dir->i_mutex); + dput(parent); + return error; +} +EXPORT_SYMBOL_GPL(rpc_rmdir); + static int __rpc_unlink(struct inode *dir, struct dentry *dentry) { int ret; -- cgit v1.2.3-55-g7522 From 12bc372b96b35a2dc9245ec61369028932b82ea8 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Tue, 10 Jan 2012 17:04:48 +0400 Subject: SUNRPC: kernel PipeFS mount point creation routines removed This patch removes static rpc_mnt variable and its creation and destruction routines, because they are not used anymore. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 2 -- net/sunrpc/rpc_pipe.c | 21 --------------------- 2 files changed, 23 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index ca32ebd14c18..426ce6eeee66 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -90,8 +90,6 @@ void rpc_destroy_pipe_data(struct rpc_pipe *pipe); extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, struct rpc_pipe *); extern int rpc_unlink(struct dentry *); -extern struct vfsmount *rpc_get_mount(void); -extern void rpc_put_mount(void); extern int register_rpc_pipefs(void); extern void unregister_rpc_pipefs(void); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index bae4e71d8663..6873c9b51cc9 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -37,9 +36,6 @@ #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") -static struct vfsmount *rpc_mnt __read_mostly; -static int rpc_mount_count; - static struct file_system_type rpc_pipe_fs_type; @@ -449,23 +445,6 @@ struct rpc_filelist { umode_t mode; }; -struct vfsmount *rpc_get_mount(void) -{ - int err; - - err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mnt, &rpc_mount_count); - if (err != 0) - return ERR_PTR(err); - return rpc_mnt; -} -EXPORT_SYMBOL_GPL(rpc_get_mount); - -void rpc_put_mount(void) -{ - simple_release_fs(&rpc_mnt, &rpc_mount_count); -} -EXPORT_SYMBOL_GPL(rpc_put_mount); - static int rpc_delete_dentry(const struct dentry *dentry) { return 1; -- cgit v1.2.3-55-g7522 From 2c9030eef9dbd0d737a7f55646da70d217fd6255 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 27 Feb 2012 22:05:45 +0400 Subject: SUNRPC: check RPC inode's pipe reference before dereferencing There are 2 tightly bound objects: pipe data (created for kernel needs, has reference to dentry, which depends on PipeFS mount/umount) and PipeFS dentry/inode pair (created on mount for user-space needs). They both independently may have or have not a valid reference to each other. This means, that we have to make sure, that pipe->dentry reference is valid on upcalls, and dentry->pipe reference is valid on downcalls. The latter check is absent - my fault. IOW, PipeFS dentry can be opened by some process (rpc.idmapd for example), but it's pipe data can belong to NFS mount, which was unmounted already and thus pipe data was destroyed. To fix this, pipe reference have to be set to NULL on rpc_unlink() and checked on PipeFS file operations instead of pipe->dentry check. Note: PipeFS "poll" file operation will be updated in next patch, because it's logic is more complicated. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 6873c9b51cc9..b67b2aecc4ff 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -174,6 +174,7 @@ rpc_close_pipes(struct inode *inode) pipe->ops->release_pipe(inode); cancel_delayed_work_sync(&pipe->queue_timeout); rpc_inode_setowner(inode, NULL); + RPC_I(inode)->pipe = NULL; mutex_unlock(&inode->i_mutex); } @@ -203,12 +204,13 @@ rpc_destroy_inode(struct inode *inode) static int rpc_pipe_open(struct inode *inode, struct file *filp) { - struct rpc_pipe *pipe = RPC_I(inode)->pipe; + struct rpc_pipe *pipe; int first_open; int res = -ENXIO; mutex_lock(&inode->i_mutex); - if (pipe->dentry == NULL) + pipe = RPC_I(inode)->pipe; + if (pipe == NULL) goto out; first_open = pipe->nreaders == 0 && pipe->nwriters == 0; if (first_open && pipe->ops->open_pipe) { @@ -229,12 +231,13 @@ out: static int rpc_pipe_release(struct inode *inode, struct file *filp) { - struct rpc_pipe *pipe = RPC_I(inode)->pipe; + struct rpc_pipe *pipe; struct rpc_pipe_msg *msg; int last_close; mutex_lock(&inode->i_mutex); - if (pipe->dentry == NULL) + pipe = RPC_I(inode)->pipe; + if (pipe == NULL) goto out; msg = filp->private_data; if (msg != NULL) { @@ -270,12 +273,13 @@ static ssize_t rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_pipe *pipe = RPC_I(inode)->pipe; + struct rpc_pipe *pipe; struct rpc_pipe_msg *msg; int res = 0; mutex_lock(&inode->i_mutex); - if (pipe->dentry == NULL) { + pipe = RPC_I(inode)->pipe; + if (pipe == NULL) { res = -EPIPE; goto out_unlock; } @@ -313,13 +317,12 @@ static ssize_t rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_pipe *pipe = RPC_I(inode)->pipe; int res; mutex_lock(&inode->i_mutex); res = -EPIPE; - if (pipe->dentry != NULL) - res = pipe->ops->downcall(filp, buf, len); + if (RPC_I(inode)->pipe != NULL) + res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len); mutex_unlock(&inode->i_mutex); return res; } @@ -344,16 +347,18 @@ static long rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; - struct rpc_pipe *pipe = RPC_I(inode)->pipe; + struct rpc_pipe *pipe; int len; switch (cmd) { case FIONREAD: - spin_lock(&pipe->lock); - if (pipe->dentry == NULL) { - spin_unlock(&pipe->lock); + mutex_lock(&inode->i_mutex); + pipe = RPC_I(inode)->pipe; + if (pipe == NULL) { + mutex_unlock(&inode->i_mutex); return -EPIPE; } + spin_lock(&pipe->lock); len = pipe->pipelen; if (filp->private_data) { struct rpc_pipe_msg *msg; @@ -361,6 +366,7 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) len += msg->len - msg->copied; } spin_unlock(&pipe->lock); + mutex_unlock(&inode->i_mutex); return put_user(len, (int __user *)arg); default: return -EINVAL; -- cgit v1.2.3-55-g7522 From 591ad7feaec5417681b4112f8df52fc43bb7c92e Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Mon, 27 Feb 2012 22:05:54 +0400 Subject: SUNRPC: move waitq from RPC pipe to RPC inode Currently, wait queue, used for polling of RPC pipe changes from user-space, is a part of RPC pipe. But the pipe data itself can be released on NFS umount prior to dentry-inode pair, connected to it (is case of this pair is open by some process). This is not a problem for almost all pipe users, because all PipeFS file operations checks pipe reference prior to using it. Except evenfd. This thing registers itself with "poll" file operation and thus has a reference to pipe wait queue. This leads to oopses on destroying eventfd after NFS umount (like rpc_idmapd do) since not pipe data left to the point already. The solution is to wait queue from pipe data to internal RPC inode data. This looks more logical, because this wiat queue used only for user-space processes, which already holds inode reference. Note: upcalls have to get pipe->dentry prior to dereferecing wait queue to make sure, that mount point won't disappear from underneath us. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- include/linux/sunrpc/rpc_pipe_fs.h | 2 +- net/sunrpc/rpc_pipe.c | 39 +++++++++++++++++++++++++------------- 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 426ce6eeee66..a7b422b33eda 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -28,7 +28,6 @@ struct rpc_pipe { int pipelen; int nreaders; int nwriters; - wait_queue_head_t waitq; #define RPC_PIPE_WAIT_FOR_OPEN 1 int flags; struct delayed_work queue_timeout; @@ -41,6 +40,7 @@ struct rpc_inode { struct inode vfs_inode; void *private; struct rpc_pipe *pipe; + wait_queue_head_t waitq; }; static inline struct rpc_inode * diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index b67b2aecc4ff..ac9ee1590739 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -57,7 +57,7 @@ void rpc_pipefs_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); -static void rpc_purge_list(struct rpc_pipe *pipe, struct list_head *head, +static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, void (*destroy_msg)(struct rpc_pipe_msg *), int err) { struct rpc_pipe_msg *msg; @@ -70,7 +70,7 @@ static void rpc_purge_list(struct rpc_pipe *pipe, struct list_head *head, msg->errno = err; destroy_msg(msg); } while (!list_empty(head)); - wake_up(&pipe->waitq); + wake_up(waitq); } static void @@ -80,6 +80,7 @@ rpc_timeout_upcall_queue(struct work_struct *work) struct rpc_pipe *pipe = container_of(work, struct rpc_pipe, queue_timeout.work); void (*destroy_msg)(struct rpc_pipe_msg *); + struct dentry *dentry; spin_lock(&pipe->lock); destroy_msg = pipe->ops->destroy_msg; @@ -87,8 +88,13 @@ rpc_timeout_upcall_queue(struct work_struct *work) list_splice_init(&pipe->pipe, &free_list); pipe->pipelen = 0; } + dentry = dget(pipe->dentry); spin_unlock(&pipe->lock); - rpc_purge_list(pipe, &free_list, destroy_msg, -ETIMEDOUT); + if (dentry) { + rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, + &free_list, destroy_msg, -ETIMEDOUT); + dput(dentry); + } } ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, @@ -125,6 +131,7 @@ int rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) { int res = -EPIPE; + struct dentry *dentry; spin_lock(&pipe->lock); if (pipe->nreaders) { @@ -140,8 +147,12 @@ rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) pipe->pipelen += msg->len; res = 0; } + dentry = dget(pipe->dentry); spin_unlock(&pipe->lock); - wake_up(&pipe->waitq); + if (dentry) { + wake_up(&RPC_I(dentry->d_inode)->waitq); + dput(dentry); + } return res; } EXPORT_SYMBOL_GPL(rpc_queue_upcall); @@ -168,7 +179,7 @@ rpc_close_pipes(struct inode *inode) pipe->pipelen = 0; pipe->dentry = NULL; spin_unlock(&pipe->lock); - rpc_purge_list(pipe, &free_list, pipe->ops->destroy_msg, -EPIPE); + rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE); pipe->nwriters = 0; if (need_release && pipe->ops->release_pipe) pipe->ops->release_pipe(inode); @@ -257,7 +268,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) list_splice_init(&pipe->pipe, &free_list); pipe->pipelen = 0; spin_unlock(&pipe->lock); - rpc_purge_list(pipe, &free_list, + rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EAGAIN); } } @@ -330,16 +341,18 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of static unsigned int rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) { - struct rpc_pipe *pipe = RPC_I(filp->f_path.dentry->d_inode)->pipe; - unsigned int mask = 0; + struct inode *inode = filp->f_path.dentry->d_inode; + struct rpc_inode *rpci = RPC_I(inode); + unsigned int mask = POLLOUT | POLLWRNORM; - poll_wait(filp, &pipe->waitq, wait); + poll_wait(filp, &rpci->waitq, wait); - mask = POLLOUT | POLLWRNORM; - if (pipe->dentry == NULL) + mutex_lock(&inode->i_mutex); + if (rpci->pipe == NULL) mask |= POLLERR | POLLHUP; - if (filp->private_data || !list_empty(&pipe->pipe)) + else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) mask |= POLLIN | POLLRDNORM; + mutex_unlock(&inode->i_mutex); return mask; } @@ -543,7 +556,6 @@ init_pipe(struct rpc_pipe *pipe) INIT_LIST_HEAD(&pipe->in_downcall); INIT_LIST_HEAD(&pipe->pipe); pipe->pipelen = 0; - init_waitqueue_head(&pipe->waitq); INIT_DELAYED_WORK(&pipe->queue_timeout, rpc_timeout_upcall_queue); pipe->ops = NULL; @@ -1165,6 +1177,7 @@ init_once(void *foo) inode_init_once(&rpci->vfs_inode); rpci->private = NULL; rpci->pipe = NULL; + init_waitqueue_head(&rpci->waitq); } int register_rpc_pipefs(void) -- cgit v1.2.3-55-g7522 From 2446ab6070861aba2dd9229463ffbc40016a9f33 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 17:00:56 -0500 Subject: SUNRPC: Use RCU to dereference the rpc_clnt.cl_xprt field A migration event will replace the rpc_xprt used by an rpc_clnt. To ensure this can be done safely, all references to cl_xprt must now use a form of rcu_dereference(). Special care is taken with rpc_peeraddr2str(), which returns a pointer to memory whose lifetime is the same as the rpc_xprt. Signed-off-by: Trond Myklebust [ cel: fix lockdep splats and layering violations ] [ cel: forward ported to 3.4 ] [ cel: remove rpc_max_reqs(), add rpc_net_ns() ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 9 ++-- fs/nfs/client.c | 16 ++++-- fs/nfs/nfs4namespace.c | 2 +- fs/nfs/nfs4proc.c | 13 +++-- fs/nfs/nfs4state.c | 25 +++++++--- fs/nfs/super.c | 5 ++ include/linux/sunrpc/clnt.h | 4 +- include/linux/sunrpc/debug.h | 13 +++++ net/sunrpc/auth_gss/auth_gss.c | 4 +- net/sunrpc/clnt.c | 110 +++++++++++++++++++++++++++++++++-------- net/sunrpc/rpc_pipe.c | 3 ++ net/sunrpc/rpcb_clnt.c | 15 ++++-- net/sunrpc/stats.c | 6 ++- 13 files changed, 175 insertions(+), 50 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 0e0865e38065..1bb297243624 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" @@ -33,7 +34,7 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, res->bitmap[0] = res->bitmap[1] = 0; res->status = htonl(NFS4ERR_BADHANDLE); - dprintk("NFS: GETATTR callback request from %s\n", + dprintk_rcu("NFS: GETATTR callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); inode = nfs_delegation_find_inode(cps->clp, &args->fh); @@ -73,7 +74,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ goto out; - dprintk("NFS: RECALL callback request from %s\n", + dprintk_rcu("NFS: RECALL callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); res = htonl(NFS4ERR_BADHANDLE); @@ -533,7 +534,7 @@ __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, if (!cps->clp) /* set in cb_sequence */ goto out; - dprintk("NFS: RECALL_ANY callback request from %s\n", + dprintk_rcu("NFS: RECALL_ANY callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); status = cpu_to_be32(NFS4ERR_INVAL); @@ -568,7 +569,7 @@ __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, if (!cps->clp) /* set in cb_sequence */ goto out; - dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", + dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), args->crsa_target_max_slots); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1506adf4d4ed..d038dc5916e5 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1284,16 +1284,18 @@ static int nfs4_init_callback(struct nfs_client *clp) int error; if (clp->rpc_ops->version == 4) { + struct rpc_xprt *xprt; + + xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt); + if (nfs4_has_session(clp)) { - error = xprt_setup_backchannel( - clp->cl_rpcclient->cl_xprt, + error = xprt_setup_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); if (error < 0) return error; } - error = nfs_callback_up(clp->cl_mvops->minor_version, - clp->cl_rpcclient->cl_xprt); + error = nfs_callback_up(clp->cl_mvops->minor_version, xprt); if (error < 0) { dprintk("%s: failed to start callback. Error = %d\n", __func__, error); @@ -1678,7 +1680,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, data->addrlen, parent_client->cl_ipaddr, data->authflavor, - parent_server->client->cl_xprt->prot, + rpc_protocol(parent_server->client), parent_server->client->cl_timeout, parent_client->cl_mvops->minor_version, parent_client->net); @@ -1905,12 +1907,14 @@ static int nfs_server_list_show(struct seq_file *m, void *v) if (clp->cl_cons_state != NFS_CS_READY) return 0; + rcu_read_lock(); seq_printf(m, "v%u %s %s %3d %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), atomic_read(&clp->cl_count), clp->cl_hostname); + rcu_read_unlock(); return 0; } @@ -1993,6 +1997,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); + rcu_read_lock(); seq_printf(m, "v%u %s %s %-7s %-17s %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), @@ -2000,6 +2005,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) dev, fsid, nfs_server_fscache_state(server)); + rcu_read_unlock(); return 0; } diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 667ea7406fd3..9c8eca315f43 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -96,8 +96,8 @@ static int nfs4_validate_fspath(struct dentry *dentry, static size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, size_t salen, struct nfs_server *server) { + struct net *net = rpc_net_ns(server->client); ssize_t ret; - struct net *net = server->client->cl_xprt->xprt_net; ret = rpc_pton(net, string, len, sa, salen); if (ret == 0) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6c8e170e2e6b..671510cc14c0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3833,6 +3833,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, *p = htonl((u32)clp->cl_boot_time.tv_nsec); for(;;) { + rcu_read_lock(); setclientid.sc_name_len = scnprintf(setclientid.sc_name, sizeof(setclientid.sc_name), "%s/%s %s %s %u", clp->cl_ipaddr, @@ -3849,6 +3850,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); + rcu_read_unlock(); status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); if (status != -NFS4ERR_CLID_INUSE) @@ -5244,11 +5246,16 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) void nfs4_destroy_session(struct nfs4_session *session) { + struct rpc_xprt *xprt; + nfs4_proc_destroy_session(session); + + rcu_read_lock(); + xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); + rcu_read_unlock(); dprintk("%s Destroy backchannel for xprt %p\n", - __func__, session->clp->cl_rpcclient->cl_xprt); - xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt, - NFS41_BC_MIN_CALLBACKS); + __func__, xprt); + xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); nfs4_destroy_slot_tables(session); kfree(session); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c1111a37dc14..bae959e294cd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1037,19 +1037,28 @@ static void nfs4_clear_state_manager_bit(struct nfs_client *clp) void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; + char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; __module_get(THIS_MODULE); atomic_inc(&clp->cl_count); - task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR)); - if (!IS_ERR(task)) - return; - nfs4_clear_state_manager_bit(clp); - nfs_put_client(clp); - module_put(THIS_MODULE); + + /* The rcu_read_lock() is not strictly necessary, as the state + * manager is the only thread that ever changes the rpc_xprt + * after it's initialized. At this point, we're single threaded. */ + rcu_read_lock(); + snprintf(buf, sizeof(buf), "%s-manager", + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + rcu_read_unlock(); + task = kthread_run(nfs4_run_state_manager, clp, buf); + if (IS_ERR(task)) { + printk(KERN_ERR "%s: kthread_run: %ld\n", + __func__, PTR_ERR(task)); + nfs4_clear_state_manager_bit(clp); + nfs_put_client(clp); + module_put(THIS_MODULE); + } } /* diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f4ccdae6a0cf..7002be11d99f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -701,8 +702,10 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, else seq_puts(m, nfs_infop->nostr); } + rcu_read_lock(); seq_printf(m, ",proto=%s", rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID)); + rcu_read_unlock(); if (version == 4) { if (nfss->port != NFS_PORT) seq_printf(m, ",port=%u", nfss->port); @@ -751,9 +754,11 @@ static int nfs_show_options(struct seq_file *m, struct dentry *root) nfs_show_mount_options(m, nfss, 0); + rcu_read_lock(); seq_printf(m, ",addr=%s", rpc_peeraddr2str(nfss->nfs_client->cl_rpcclient, RPC_DISPLAY_ADDR)); + rcu_read_unlock(); return 0; } diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index a4c62e95c720..e3d12b4a0314 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -35,7 +35,7 @@ struct rpc_clnt { struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ spinlock_t cl_lock; /* spinlock */ - struct rpc_xprt * cl_xprt; /* transport */ + struct rpc_xprt __rcu * cl_xprt; /* transport */ struct rpc_procinfo * cl_procinfo; /* procedure info */ u32 cl_prog, /* RPC program number */ cl_vers, /* RPC version number */ @@ -156,6 +156,8 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int rpc_restart_call_prepare(struct rpc_task *); int rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); +int rpc_protocol(struct rpc_clnt *); +struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index b506936f4ce6..6cb2517bcf75 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -50,19 +50,32 @@ extern unsigned int nlm_debug; #endif #define dprintk(args...) dfprintk(FACILITY, ## args) +#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) #undef ifdebug #ifdef RPC_DEBUG # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) + # define dfprintk(fac, args...) \ do { \ ifdebug(fac) \ printk(KERN_DEFAULT args); \ } while (0) + +# define dfprintk_rcu(fac, args...) \ + do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT args); \ + rcu_read_unlock(); \ + } \ + } while (0) + # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) # define dfprintk(fac, args...) do ; while (0) +# define dfprintk_rcu(fac, args...) do ; while (0) # define RPC_IFDEBUG(x) #endif diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index cb2e56452748..d3ad81f8da5b 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -799,7 +799,7 @@ err_unlink_pipe_1: static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt, struct rpc_auth *auth) { - struct net *net = clnt->cl_xprt->xprt_net; + struct net *net = rpc_net_ns(clnt); struct super_block *sb; sb = rpc_get_sb_net(net); @@ -813,7 +813,7 @@ static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt, static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt, struct rpc_auth *auth) { - struct net *net = clnt->cl_xprt->xprt_net; + struct net *net = rpc_net_ns(clnt); struct super_block *sb; int err = 0; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 25c3da53fb69..7783fc0e7263 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -81,7 +82,8 @@ static int rpc_ping(struct rpc_clnt *clnt); static void rpc_register_client(struct rpc_clnt *clnt) { - struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id); + struct net *net = rpc_net_ns(clnt); + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_add(&clnt->cl_clients, &sn->all_clients); @@ -90,7 +92,8 @@ static void rpc_register_client(struct rpc_clnt *clnt) static void rpc_unregister_client(struct rpc_clnt *clnt) { - struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id); + struct net *net = rpc_net_ns(clnt); + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_del(&clnt->cl_clients); @@ -109,12 +112,13 @@ static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { + struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; - pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net); + pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { __rpc_clnt_remove_pipedir(clnt); - rpc_put_sb_net(clnt->cl_xprt->xprt_net); + rpc_put_sb_net(net); } } @@ -155,17 +159,18 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, static int rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name) { + struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; struct dentry *dentry; clnt->cl_dentry = NULL; if (dir_name == NULL) return 0; - pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net); + pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) return 0; dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name); - rpc_put_sb_net(clnt->cl_xprt->xprt_net); + rpc_put_sb_net(net); if (IS_ERR(dentry)) return PTR_ERR(dentry); clnt->cl_dentry = dentry; @@ -295,7 +300,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru if (clnt->cl_server == NULL) goto out_no_server; - clnt->cl_xprt = xprt; + rcu_assign_pointer(clnt->cl_xprt, xprt); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; @@ -310,7 +315,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); - if (!xprt_bound(clnt->cl_xprt)) + if (!xprt_bound(xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = xprt->timeout; @@ -477,6 +482,7 @@ struct rpc_clnt * rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_clnt *new; + struct rpc_xprt *xprt; int err = -ENOMEM; new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); @@ -499,18 +505,25 @@ rpc_clone_client(struct rpc_clnt *clnt) if (new->cl_principal == NULL) goto out_no_principal; } + rcu_read_lock(); + xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); + rcu_read_unlock(); + if (xprt == NULL) + goto out_no_transport; + rcu_assign_pointer(new->cl_xprt, xprt); atomic_set(&new->cl_count, 1); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); - xprt_get(clnt->cl_xprt); atomic_inc(&clnt->cl_count); rpc_register_client(new); rpciod_up(); return new; out_no_path: + xprt_put(xprt); +out_no_transport: kfree(new->cl_principal); out_no_principal: rpc_free_iostats(new->cl_metrics); @@ -590,7 +603,7 @@ rpc_free_client(struct rpc_clnt *clnt) rpc_free_iostats(clnt->cl_metrics); kfree(clnt->cl_principal); clnt->cl_metrics = NULL; - xprt_put(clnt->cl_xprt); + xprt_put(rcu_dereference_raw(clnt->cl_xprt)); rpciod_down(); kfree(clnt); } @@ -879,13 +892,18 @@ EXPORT_SYMBOL_GPL(rpc_call_start); size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; - struct rpc_xprt *xprt = clnt->cl_xprt; + struct rpc_xprt *xprt; + + rcu_read_lock(); + xprt = rcu_dereference(clnt->cl_xprt); - bytes = sizeof(xprt->addr); + bytes = xprt->addrlen; if (bytes > bufsize) bytes = bufsize; - memcpy(buf, &clnt->cl_xprt->addr, bytes); - return xprt->addrlen; + memcpy(buf, &xprt->addr, bytes); + rcu_read_unlock(); + + return bytes; } EXPORT_SYMBOL_GPL(rpc_peeraddr); @@ -894,11 +912,16 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr); * @clnt: RPC client structure * @format: address format * + * NB: the lifetime of the memory referenced by the returned pointer is + * the same as the rpc_xprt itself. As long as the caller uses this + * pointer, it must hold the RCU read lock. */ const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { - struct rpc_xprt *xprt = clnt->cl_xprt; + struct rpc_xprt *xprt; + + xprt = rcu_dereference(clnt->cl_xprt); if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; @@ -910,14 +933,51 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr2str); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { - struct rpc_xprt *xprt = clnt->cl_xprt; + struct rpc_xprt *xprt; + + rcu_read_lock(); + xprt = rcu_dereference(clnt->cl_xprt); if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_setbufsize); -/* - * Return size of largest payload RPC client can support, in bytes +/** + * rpc_protocol - Get transport protocol number for an RPC client + * @clnt: RPC client to query + * + */ +int rpc_protocol(struct rpc_clnt *clnt) +{ + int protocol; + + rcu_read_lock(); + protocol = rcu_dereference(clnt->cl_xprt)->prot; + rcu_read_unlock(); + return protocol; +} +EXPORT_SYMBOL_GPL(rpc_protocol); + +/** + * rpc_net_ns - Get the network namespace for this RPC client + * @clnt: RPC client to query + * + */ +struct net *rpc_net_ns(struct rpc_clnt *clnt) +{ + struct net *ret; + + rcu_read_lock(); + ret = rcu_dereference(clnt->cl_xprt)->xprt_net; + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(rpc_net_ns); + +/** + * rpc_max_payload - Get maximum payload size for a transport, in bytes + * @clnt: RPC client to query * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram @@ -926,7 +986,12 @@ EXPORT_SYMBOL_GPL(rpc_setbufsize); */ size_t rpc_max_payload(struct rpc_clnt *clnt) { - return clnt->cl_xprt->max_payload; + size_t ret; + + rcu_read_lock(); + ret = rcu_dereference(clnt->cl_xprt)->max_payload; + rcu_read_unlock(); + return ret; } EXPORT_SYMBOL_GPL(rpc_max_payload); @@ -937,8 +1002,11 @@ EXPORT_SYMBOL_GPL(rpc_max_payload); */ void rpc_force_rebind(struct rpc_clnt *clnt) { - if (clnt->cl_autobind) - xprt_clear_bound(clnt->cl_xprt); + if (clnt->cl_autobind) { + rcu_read_lock(); + xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); + rcu_read_unlock(); + } } EXPORT_SYMBOL_GPL(rpc_force_rebind); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ac9ee1590739..3d30943ed6db 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -402,12 +403,14 @@ rpc_show_info(struct seq_file *m, void *v) { struct rpc_clnt *clnt = m->private; + rcu_read_lock(); seq_printf(m, "RPC server: %s\n", clnt->cl_server); seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, clnt->cl_prog, clnt->cl_vers); seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); + rcu_read_unlock(); return 0; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index b1f08bd67883..4f8af63798a2 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -620,9 +620,10 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt) { struct rpc_clnt *parent = clnt->cl_parent; + struct rpc_xprt *xprt = rcu_dereference(clnt->cl_xprt); while (parent != clnt) { - if (parent->cl_xprt != clnt->cl_xprt) + if (rcu_dereference(parent->cl_xprt) != xprt) break; if (clnt->cl_autobind) break; @@ -653,8 +654,12 @@ void rpcb_getport_async(struct rpc_task *task) size_t salen; int status; - clnt = rpcb_find_transport_owner(task->tk_client); - xprt = clnt->cl_xprt; + rcu_read_lock(); + do { + clnt = rpcb_find_transport_owner(task->tk_client); + xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); + } while (xprt == NULL); + rcu_read_unlock(); dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", task->tk_pid, __func__, @@ -667,6 +672,7 @@ void rpcb_getport_async(struct rpc_task *task) if (xprt_test_and_set_binding(xprt)) { dprintk("RPC: %5u %s: waiting for another binder\n", task->tk_pid, __func__); + xprt_put(xprt); return; } @@ -734,7 +740,7 @@ void rpcb_getport_async(struct rpc_task *task) switch (bind_version) { case RPCBVERS_4: case RPCBVERS_3: - map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID); + map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC); map->r_owner = ""; break; @@ -763,6 +769,7 @@ bailout_release_client: bailout_nofree: rpcb_wake_rpcbind_waiters(xprt, status); task->tk_status = status; + xprt_put(xprt); } EXPORT_SYMBOL_GPL(rpcb_getport_async); diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 1eb3304bc105..bc2068ee795b 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "netns.h" @@ -179,7 +180,7 @@ static void _print_name(struct seq_file *seq, unsigned int op, void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) { struct rpc_iostats *stats = clnt->cl_metrics; - struct rpc_xprt *xprt = clnt->cl_xprt; + struct rpc_xprt *xprt; unsigned int op, maxproc = clnt->cl_maxproc; if (!stats) @@ -189,8 +190,11 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) seq_printf(seq, "p/v: %u/%u (%s)\n", clnt->cl_prog, clnt->cl_vers, clnt->cl_protname); + rcu_read_lock(); + xprt = rcu_dereference(clnt->cl_xprt); if (xprt) xprt->ops->print_stats(xprt, seq); + rcu_read_unlock(); seq_printf(seq, "\tper-op statistics\n"); for (op = 0; op < maxproc; op++) { -- cgit v1.2.3-55-g7522 From 4e0038b6b246e4145fc4a53dca61a556d17bc52c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 1 Mar 2012 17:01:05 -0500 Subject: SUNRPC: Move clnt->cl_server into struct rpc_xprt When the cl_xprt field is updated, the cl_server field will also have to change. Since the contents of cl_server follow the remote endpoint of cl_xprt, just move that field to the rpc_xprt. Signed-off-by: Trond Myklebust [ cel: simplify check_gss_callback_principal(), whitespace changes ] [ cel: forward ported to 3.4 ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 3 +- fs/nfs/nfs4proc.c | 3 +- include/linux/sunrpc/clnt.h | 1 - include/linux/sunrpc/xprt.h | 2 ++ net/sunrpc/clnt.c | 88 ++++++++++++++++++++++----------------------- net/sunrpc/rpc_pipe.c | 3 +- net/sunrpc/rpcb_clnt.c | 4 +-- net/sunrpc/xprt.c | 15 +++++++- 8 files changed, 66 insertions(+), 53 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 4a122ae71762..2afe23349c7b 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -332,7 +332,6 @@ void nfs_callback_down(int minorversion) int check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) { - struct rpc_clnt *r = clp->cl_rpcclient; char *p = svc_gss_principal(rqstp); if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) @@ -353,7 +352,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) if (memcmp(p, "nfs@", 4) != 0) return 0; p += 4; - if (strcmp(p, r->cl_server) != 0) + if (strcmp(p, clp->cl_hostname) != 0) return 0; return 1; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 671510cc14c0..54767dd66cf9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1100,6 +1100,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data if (state == NULL) goto err_put_inode; if (data->o_res.delegation_type != 0) { + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; int delegation_flags = 0; rcu_read_lock(); @@ -1111,7 +1112,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", - NFS_CLIENT(inode)->cl_server); + clp->cl_hostname); } else if ((delegation_flags & 1UL<inode, data->owner->so_cred, diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index e3d12b4a0314..acd5502a8190 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -41,7 +41,6 @@ struct rpc_clnt { cl_vers, /* RPC version number */ cl_maxproc; /* max procedure number */ - const char * cl_server; /* server machine name */ const char * cl_protname; /* protocol name */ struct rpc_auth * cl_auth; /* authenticator */ struct rpc_stat * cl_stats; /* per-program statistics */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index ea712f97f4a1..77d278defa70 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -229,6 +229,7 @@ struct rpc_xprt { } stat; struct net *xprt_net; + const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; }; @@ -258,6 +259,7 @@ struct xprt_create { struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; + const char *servername; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ }; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 7783fc0e7263..e39ace9a4e1d 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -265,15 +265,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru struct rpc_clnt *clnt = NULL; struct rpc_auth *auth; int err; - size_t len; /* sanity check the name before trying to print it */ - err = -EINVAL; - len = strlen(args->servername); - if (len > RPC_MAXNETNAMELEN) - goto out_no_rpciod; - len++; - dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, args->servername, xprt); @@ -296,10 +289,6 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru goto out_err; clnt->cl_parent = clnt; - clnt->cl_server = kstrdup(args->servername, GFP_KERNEL); - if (clnt->cl_server == NULL) - goto out_no_server; - rcu_assign_pointer(clnt->cl_xprt, xprt); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; @@ -363,8 +352,6 @@ out_no_path: out_no_principal: rpc_free_iostats(clnt->cl_metrics); out_no_stats: - kfree(clnt->cl_server); -out_no_server: kfree(clnt); out_err: xprt_put(xprt); @@ -394,6 +381,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, + .servername = args->servername, .bc_xprt = args->bc_xprt, }; char servername[48]; @@ -402,7 +390,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address. */ - if (args->servername == NULL) { + if (xprtargs.servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = @@ -429,7 +417,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) * address family isn't recognized. */ return ERR_PTR(-EINVAL); } - args->servername = servername; + xprtargs.servername = servername; } xprt = xprt_create_transport(&xprtargs); @@ -488,9 +476,6 @@ rpc_clone_client(struct rpc_clnt *clnt) new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); if (!new) goto out_no_clnt; - new->cl_server = kstrdup(clnt->cl_server, GFP_KERNEL); - if (new->cl_server == NULL) - goto out_no_server; new->cl_parent = clnt; /* Turn off autobind on clones */ new->cl_autobind = 0; @@ -528,8 +513,6 @@ out_no_transport: out_no_principal: rpc_free_iostats(new->cl_metrics); out_no_stats: - kfree(new->cl_server); -out_no_server: kfree(new); out_no_clnt: dprintk("RPC: %s: returned error %d\n", __func__, err); @@ -574,8 +557,9 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks); */ void rpc_shutdown_client(struct rpc_clnt *clnt) { - dprintk("RPC: shutting down %s client for %s\n", - clnt->cl_protname, clnt->cl_server); + dprintk_rcu("RPC: shutting down %s client for %s\n", + clnt->cl_protname, + rcu_dereference(clnt->cl_xprt)->servername); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); @@ -593,11 +577,11 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client); static void rpc_free_client(struct rpc_clnt *clnt) { - dprintk("RPC: destroying %s client for %s\n", - clnt->cl_protname, clnt->cl_server); + dprintk_rcu("RPC: destroying %s client for %s\n", + clnt->cl_protname, + rcu_dereference(clnt->cl_xprt)->servername); if (clnt->cl_parent != clnt) rpc_release_client(clnt->cl_parent); - kfree(clnt->cl_server); rpc_unregister_client(clnt); rpc_clnt_remove_pipedir(clnt); rpc_free_iostats(clnt->cl_metrics); @@ -1685,8 +1669,11 @@ call_timeout(struct rpc_task *task) } if (RPC_IS_SOFT(task)) { if (clnt->cl_chatty) + rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, timed out\n", - clnt->cl_protname, clnt->cl_server); + clnt->cl_protname, + rcu_dereference(clnt->cl_xprt)->servername); + rcu_read_unlock(); if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_exit(task, -ETIMEDOUT); else @@ -1696,9 +1683,13 @@ call_timeout(struct rpc_task *task) if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; - if (clnt->cl_chatty) + if (clnt->cl_chatty) { + rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, still trying\n", - clnt->cl_protname, clnt->cl_server); + clnt->cl_protname, + rcu_dereference(clnt->cl_xprt)->servername); + rcu_read_unlock(); + } } rpc_force_rebind(clnt); /* @@ -1727,9 +1718,13 @@ call_decode(struct rpc_task *task) dprint_status(task); if (task->tk_flags & RPC_CALL_MAJORSEEN) { - if (clnt->cl_chatty) + if (clnt->cl_chatty) { + rcu_read_lock(); printk(KERN_NOTICE "%s: server %s OK\n", - clnt->cl_protname, clnt->cl_server); + clnt->cl_protname, + rcu_dereference(clnt->cl_xprt)->servername); + rcu_read_unlock(); + } task->tk_flags &= ~RPC_CALL_MAJORSEEN; } @@ -1807,6 +1802,7 @@ rpc_encode_header(struct rpc_task *task) static __be32 * rpc_verify_header(struct rpc_task *task) { + struct rpc_clnt *clnt = task->tk_client; struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; int len = task->tk_rqstp->rq_rcv_buf.len >> 2; __be32 *p = iov->iov_base; @@ -1879,8 +1875,11 @@ rpc_verify_header(struct rpc_task *task) task->tk_action = call_bind; goto out_retry; case RPC_AUTH_TOOWEAK: + rcu_read_lock(); printk(KERN_NOTICE "RPC: server %s requires stronger " - "authentication.\n", task->tk_client->cl_server); + "authentication.\n", + rcu_dereference(clnt->cl_xprt)->servername); + rcu_read_unlock(); break; default: dprintk("RPC: %5u %s: unknown auth error: %x\n", @@ -1903,28 +1902,27 @@ rpc_verify_header(struct rpc_task *task) case RPC_SUCCESS: return p; case RPC_PROG_UNAVAIL: - dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", - task->tk_pid, __func__, - (unsigned int)task->tk_client->cl_prog, - task->tk_client->cl_server); + dprintk_rcu("RPC: %5u %s: program %u is unsupported " + "by server %s\n", task->tk_pid, __func__, + (unsigned int)clnt->cl_prog, + rcu_dereference(clnt->cl_xprt)->servername); error = -EPFNOSUPPORT; goto out_err; case RPC_PROG_MISMATCH: - dprintk("RPC: %5u %s: program %u, version %u unsupported by " - "server %s\n", task->tk_pid, __func__, - (unsigned int)task->tk_client->cl_prog, - (unsigned int)task->tk_client->cl_vers, - task->tk_client->cl_server); + dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported " + "by server %s\n", task->tk_pid, __func__, + (unsigned int)clnt->cl_prog, + (unsigned int)clnt->cl_vers, + rcu_dereference(clnt->cl_xprt)->servername); error = -EPROTONOSUPPORT; goto out_err; case RPC_PROC_UNAVAIL: - dprintk("RPC: %5u %s: proc %s unsupported by program %u, " + dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, " "version %u on server %s\n", task->tk_pid, __func__, rpc_proc_name(task), - task->tk_client->cl_prog, - task->tk_client->cl_vers, - task->tk_client->cl_server); + clnt->cl_prog, clnt->cl_vers, + rcu_dereference(clnt->cl_xprt)->servername); error = -EOPNOTSUPP; goto out_err; case RPC_GARBAGE_ARGS: @@ -1938,7 +1936,7 @@ rpc_verify_header(struct rpc_task *task) } out_garbage: - task->tk_client->cl_stats->rpcgarbage++; + clnt->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; dprintk("RPC: %5u %s: retrying\n", diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 3d30943ed6db..7d96e3cd57cc 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -404,7 +404,8 @@ rpc_show_info(struct seq_file *m, void *v) struct rpc_clnt *clnt = m->private; rcu_read_lock(); - seq_printf(m, "RPC server: %s\n", clnt->cl_server); + seq_printf(m, "RPC server: %s\n", + rcu_dereference(clnt->cl_xprt)->servername); seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, clnt->cl_prog, clnt->cl_vers); seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 4f8af63798a2..e699ff0ce909 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -663,7 +663,7 @@ void rpcb_getport_async(struct rpc_task *task) dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", task->tk_pid, __func__, - clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); + xprt->servername, clnt->cl_prog, clnt->cl_vers, xprt->prot); /* Put self on the wait queue to ensure we get notified if * some other task is already attempting to bind the port */ @@ -714,7 +714,7 @@ void rpcb_getport_async(struct rpc_task *task) dprintk("RPC: %5u %s: trying rpcbind version %u\n", task->tk_pid, __func__, bind_version); - rpcb_clnt = rpcb_create(xprt->xprt_net, clnt->cl_server, sap, salen, + rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen, xprt->prot, bind_version); if (IS_ERR(rpcb_clnt)) { status = PTR_ERR(rpcb_clnt); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 32e37945a840..0cbcd1ab49ab 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -66,6 +66,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net); static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); +static void xprt_destroy(struct rpc_xprt *xprt); static DEFINE_SPINLOCK(xprt_list_lock); static LIST_HEAD(xprt_list); @@ -751,7 +752,7 @@ static void xprt_connect_status(struct rpc_task *task) default: dprintk("RPC: %5u xprt_connect_status: error %d connecting to " "server %s\n", task->tk_pid, -task->tk_status, - task->tk_client->cl_server); + xprt->servername); xprt_release_write(xprt, task); task->tk_status = -EIO; } @@ -1229,6 +1230,17 @@ found: (unsigned long)xprt); else init_timer(&xprt->timer); + + if (strlen(args->servername) > RPC_MAXNETNAMELEN) { + xprt_destroy(xprt); + return ERR_PTR(-EINVAL); + } + xprt->servername = kstrdup(args->servername, GFP_KERNEL); + if (xprt->servername == NULL) { + xprt_destroy(xprt); + return ERR_PTR(-ENOMEM); + } + dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); out: @@ -1251,6 +1263,7 @@ static void xprt_destroy(struct rpc_xprt *xprt) rpc_destroy_wait_queue(&xprt->sending); rpc_destroy_wait_queue(&xprt->backlog); cancel_work_sync(&xprt->task_cleanup); + kfree(xprt->servername); /* * Tear down transport state and free the rpc_xprt */ -- cgit v1.2.3-55-g7522 From 09acfea5d8de419ebe84be43b08f7b79c965215f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 11 Mar 2012 15:22:54 -0400 Subject: SUNRPC: Fix a few sparse warnings net/sunrpc/svcsock.c:412:22: warning: incorrect type in assignment (different address spaces) - svc_partial_recvfrom now takes a struct kvec, so the variable save_iovbase needs to be an ordinary (void *) Make a bunch of variables in net/sunrpc/xprtsock.c static Fix a couple of "warning: symbol 'foo' was not declared. Should it be static?" reports. Fix a couple of conflicting function declarations. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/bc_xprt.h | 2 +- include/linux/sunrpc/svcauth.h | 3 +++ include/linux/sunrpc/xprtsock.h | 12 ------------ net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- net/sunrpc/auth_gss/gss_krb5_seal.c | 2 +- net/sunrpc/backchannel_rqst.c | 1 + net/sunrpc/rpc_pipe.c | 2 +- net/sunrpc/sunrpc_syms.c | 3 --- net/sunrpc/svcsock.c | 2 +- net/sunrpc/xprtsock.c | 10 +++++----- 10 files changed, 14 insertions(+), 25 deletions(-) (limited to 'net/sunrpc/rpc_pipe.c') diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index f7f3ce340c08..969c0a671dbf 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -35,7 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); -void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); +void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); int bc_send(struct rpc_rqst *req); /* diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 25d333c1b571..548790e9113b 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -135,6 +135,9 @@ extern void svcauth_unix_purge(void); extern void svcauth_unix_info_release(struct svc_xprt *xpt); extern int svcauth_unix_set_client(struct svc_rqst *rqstp); +extern int unix_gid_cache_create(struct net *net); +extern void unix_gid_cache_destroy(struct net *net); + static inline unsigned long hash_str(char *name, int bits) { unsigned long hash = 0; diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 3f14a02e9cc0..1ad36cc25b2e 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -12,18 +12,6 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); -/* - * RPC slot table sizes for UDP, TCP transports - */ -extern unsigned int xprt_udp_slot_table_entries; -extern unsigned int xprt_tcp_slot_table_entries; - -/* - * Parameters for choosing a free port - */ -extern unsigned int xprt_min_resvport; -extern unsigned int xprt_max_resvport; - #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) #define RPC_DEF_MIN_RESVPORT (665U) diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 8c67890de427..8eff8c32d1b9 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -344,7 +344,7 @@ out_err: return PTR_ERR(p); } -struct crypto_blkcipher * +static struct crypto_blkcipher * context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) { struct crypto_blkcipher *cp; diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index d7941eab7796..62ae3273186c 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -159,7 +159,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } -u32 +static u32 gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 3ad435a14ada..31def68a0f6e 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -25,6 +25,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include +#include #ifdef RPC_DEBUG #define RPCDBG_FACILITY RPCDBG_TRANS diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7d96e3cd57cc..8584ec068e97 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1149,7 +1149,7 @@ rpc_mount(struct file_system_type *fs_type, return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); } -void rpc_kill_sb(struct super_block *sb) +static void rpc_kill_sb(struct super_block *sb) { struct net *net = sb->s_fs_info; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 21d106e2ca06..8adfc88e793a 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -27,9 +27,6 @@ int sunrpc_net_id; EXPORT_SYMBOL_GPL(sunrpc_net_id); -extern int unix_gid_cache_create(struct net *net); -extern int unix_gid_cache_destroy(struct net *net); - static __net_init int sunrpc_init_net(struct net *net) { int err; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index e088b1633d36..40ae884db865 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, int buflen, unsigned int base) { size_t save_iovlen; - void __user *save_iovbase; + void *save_iovbase; unsigned int i; int ret; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 4c8281d29e2b..92bc5181dbeb 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -53,12 +53,12 @@ static void xs_close(struct rpc_xprt *xprt); /* * xprtsock tunables */ -unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; -unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; -unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; +static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; +static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; +static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; -unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; -unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; +static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; +static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; #define XS_TCP_LINGER_TO (15U * HZ) static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; -- cgit v1.2.3-55-g7522