From fafc4e1ea1a4c1eb13a30c9426fb799f5efacbc3 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 8 Apr 2016 15:11:27 +0200 Subject: sock: tigthen lockdep checks for sock_owned_by_user sock_owned_by_user should not be used without socket lock held. It seems to be a common practice to check .owned before lock reclassification, so provide a little help to abstract this check away. Cc: linux-cifs@vger.kernel.org Cc: linux-bluetooth@vger.kernel.org Cc: linux-nfs@vger.kernel.org Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) (limited to 'include/net/sock.h') diff --git a/include/net/sock.h b/include/net/sock.h index 81d6fecec0a2..baba58770ac5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1316,21 +1316,6 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } -/* Used by processes to "lock" a socket state, so that - * interrupts and bottom half handlers won't change it - * from under us. It essentially blocks any incoming - * packets, so that we won't get any new data or any - * packets that change the state of the socket. - * - * While locked, BH processing will add new packets to - * the backlog queue. This queue is processed by the - * owner of the socket lock right before it is released. - * - * Since ~2.3.5 it is also exclusive sleep lock serializing - * accesses from user process context. - */ -#define sock_owned_by_user(sk) ((sk)->sk_lock.owned) - static inline void sock_release_ownership(struct sock *sk) { if (sk->sk_lock.owned) { @@ -1403,6 +1388,35 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) spin_unlock_bh(&sk->sk_lock.slock); } +/* Used by processes to "lock" a socket state, so that + * interrupts and bottom half handlers won't change it + * from under us. It essentially blocks any incoming + * packets, so that we won't get any new data or any + * packets that change the state of the socket. + * + * While locked, BH processing will add new packets to + * the backlog queue. This queue is processed by the + * owner of the socket lock right before it is released. + * + * Since ~2.3.5 it is also exclusive sleep lock serializing + * accesses from user process context. + */ + +static inline bool sock_owned_by_user(const struct sock *sk) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON(!lockdep_sock_is_held(sk)); +#endif + return sk->sk_lock.owned; +} + +/* no reclassification while locks are held */ +static inline bool sock_allow_reclassification(const struct sock *csk) +{ + struct sock *sk = (struct sock *)csk; + + return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); +} struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); -- cgit v1.2.3-55-g7522