summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller2007-02-21 08:51:47 +0100
committerDavid S. Miller2007-02-26 20:42:48 +0100
commit2c4f6219aca5939b57596278ea8b014275d4917b (patch)
tree4635aab17f05da9945e112c61c54e93788417f4e /net
parent[TG3]: TSO workaround fixes. (diff)
downloadkernel-qcow2-linux-2c4f6219aca5939b57596278ea8b014275d4917b.tar.gz
kernel-qcow2-linux-2c4f6219aca5939b57596278ea8b014275d4917b.tar.xz
kernel-qcow2-linux-2c4f6219aca5939b57596278ea8b014275d4917b.zip
[TCP]: Fix MD5 signature pool locking.
The locking calls assumed that these code paths were only invoked in software interrupt context, but that isn't true. Therefore we need to use spin_{lock,unlock}_bh() throughout. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ac6516c642a1..74c4d103ebc2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void)
{
struct tcp_md5sig_pool **pool = NULL;
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL;
}
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
if (pool)
__tcp_free_md5sig_pool(pool);
}
@@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
int alloc = 0;
retry:
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
cpu_relax();
goto retry;
} else
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
return NULL;
}
pool = tcp_md5sig_pool;
if (pool) {
/* oops, it has already been assigned. */
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p);
} else {
tcp_md5sig_pool = pool = p;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
}
}
return pool;
@@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
struct tcp_md5sig_pool **p;
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL);
}