summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Vetter2012-09-24 18:07:48 +0200
committerDaniel Vetter2012-09-24 18:17:12 +0200
commit398b7a1b882a655ee84bd985f6c2ba89082404ae (patch)
tree5eb9284a87ee2a45a10809cfbe3a66fea6ea53ae /net
parentdrm/i915: BUG() on unexpected HDMI register (diff)
parentLinux 3.6-rc7 (diff)
downloadkernel-qcow2-linux-398b7a1b882a655ee84bd985f6c2ba89082404ae.tar.gz
kernel-qcow2-linux-398b7a1b882a655ee84bd985f6c2ba89082404ae.tar.xz
kernel-qcow2-linux-398b7a1b882a655ee84bd985f6c2ba89082404ae.zip
Merge tag 'v3.6-rc7' into drm-intel-next-queued
Manual backmerge of -rc7 to resolve a silent conflict leading to compile failure in drivers/gpu/drm/i915/intel_hdmi.c. This is due to the bugfix in -rc7: commit b98b60167279df3acac9422c3c9820d9ebbcf9fb Author: Wang Xingchao <xingchao.wang@intel.com> Date: Thu Sep 13 07:43:22 2012 +0800 drm/i915: HDMI - Clear Audio Enable bit for Hot Plug Since this code moved around a lot in -next git put that snippet at the wrong spot. I've tried to fix this by making the conflict explicit by merging a version for next with: commit 3cce574f0190dd149472059fb69267cf83d290f9 Author: Wang Xingchao <xingchao.wang@intel.com> Date: Thu Sep 13 11:19:00 2012 +0800 drm/i915: HDMI - Clear Audio Enable bit for Hot Plug unconditionally But that failed to solve the entire problem. To avoid pushing out further -nightly branch to our QA where this is broken, do the backmerge and manually add the stuff git adds to -next from the patch in -fixes. Note that this doesn't show up in git's merge diff (and hence is also not handled by git rerere), which adds to the reasons why I'd like to fix this with a verbose backmerge. The git merge diff only shows a bunch of trivial conflicts of the "code changed in lines next to each another" kind. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/bitarray.h6
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/cmtp/sock.c4
-rw-r--r--net/bluetooth/hci_conn.c4
-rw-r--r--net/bluetooth/hci_sock.c16
-rw-r--r--net/bluetooth/hidp/sock.c4
-rw-r--r--net/bluetooth/l2cap_core.c11
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/smp.c10
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/caif/cfsrvl.c5
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c9
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c10
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/fib_rules.c2
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c5
-rw-r--r--net/ipv4/route.c49
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/inet6_connection_sock.c23
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/l2tp/l2tp_core.c7
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c29
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/xt_LOG.c37
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c9
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c6
-rw-r--r--net/openvswitch/flow.h8
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_gred.c38
-rw-r--r--net/sched/sch_qfq.c5
-rw-r--r--net/sctp/output.c21
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/xprt.c34
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_replay.c15
-rw-r--r--net/xfrm/xfrm_state.c4
-rw-r--r--net/xfrm/xfrm_user.c57
66 files changed, 420 insertions, 312 deletions
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index a081ce1c0514..cebaae7e148b 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -20,8 +20,8 @@
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno
+/* Returns 1 if the corresponding bit in the given seq_bits indicates true
+ * and curr_seqno is within range of last_seqno. Otherwise returns 0.
*/
static inline int batadv_test_bit(const unsigned long *seq_bits,
uint32_t last_seqno, uint32_t curr_seqno)
@@ -32,7 +32,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits,
if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return 0;
else
- return test_bit(diff, seq_bits);
+ return test_bit(diff, seq_bits) != 0;
}
/* turn corresponding bit on, so we can remember that we got the packet */
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5e5f5b410e0b..1eaacf10d19d 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -58,7 +58,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -84,7 +84,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 311668d14571..32dc83dcb6b2 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -72,7 +72,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case CMTPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -97,7 +97,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
case CMTPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5ad7da217474..3c094e78dde9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -29,6 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/smp.h>
static void hci_le_connect(struct hci_conn *conn)
{
@@ -619,6 +620,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
BT_DBG("hcon %p", conn);
+ if (conn->type == LE_LINK)
+ return smp_conn_security(conn, sec_level);
+
/* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
return 1;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 19fdac78e555..d5ace1eda3ed 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -490,7 +490,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return -EPERM;
@@ -510,12 +510,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_sock_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_sock_blacklist_del(hdev, (void __user *) arg);
default:
@@ -546,22 +546,22 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_open(arg);
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_close(arg);
case HCIDEVRESET:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset(arg);
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
@@ -573,7 +573,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_cmd(cmd, argp);
case HCIINQUIRY:
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 18b3f6892a36..b24fb3bd8625 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -56,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case HIDPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -91,7 +91,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
case HIDPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index daa149b7003c..4ea1710a4783 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1199,14 +1199,15 @@ clean:
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
+ struct hci_conn *hcon = conn->hcon;
BT_DBG("conn %p", conn);
- if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+ if (!hcon->out && hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
- if (conn->hcon->out && conn->hcon->type == LE_LINK)
- smp_conn_security(conn, conn->hcon->pending_sec_level);
+ if (hcon->out && hcon->type == LE_LINK)
+ smp_conn_security(hcon, hcon->pending_sec_level);
mutex_lock(&conn->chan_lock);
@@ -1219,8 +1220,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
continue;
}
- if (conn->hcon->type == LE_LINK) {
- if (smp_conn_security(conn, chan->sec_level))
+ if (hcon->type == LE_LINK) {
+ if (smp_conn_security(hcon, chan->sec_level))
l2cap_chan_ready(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 1497edd191a2..34bbe1c5e389 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -616,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
- if (smp_conn_security(conn, sec.level))
+ if (smp_conn_security(conn->hcon, sec.level))
break;
sk->sk_state = BT_CONFIG;
chan->state = BT_CONFIG;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 901a616c8083..8c225ef349cd 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -267,10 +267,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
hcon->dst_type, reason);
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
- cancel_delayed_work_sync(&conn->security_timer);
+ cancel_delayed_work_sync(&conn->security_timer);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
smp_chan_destroy(conn);
- }
}
#define JUST_WORKS 0x00
@@ -760,9 +760,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
{
- struct hci_conn *hcon = conn->hcon;
+ struct l2cap_conn *conn = hcon->l2cap_data;
struct smp_chan *smp = conn->smp_chan;
__u8 authreq;
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index f88ee537fb2b..92de5e5f9db2 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -80,7 +80,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
unsigned int bitmask;
spin_lock_bh(&ebt_log_lock);
- printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
+ printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "", out ? out->name : "",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index dd485f6128e8..ba217e90765e 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -211,9 +211,10 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
void (*put)(struct cflayer *lyr))
{
struct cfsrvl *service;
- service = container_of(adapt_layer->dn, struct cfsrvl, layer);
- WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
+ if (WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL))
+ return;
+ service = container_of(adapt_layer->dn, struct cfsrvl, layer);
service->hold = hold;
service->put = put;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 83988362805e..89e33a5d4d93 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2134,7 +2134,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
static netdev_features_t harmonize_features(struct sk_buff *skb,
__be16 protocol, netdev_features_t features)
{
- if (!can_checksum_protocol(features, protocol)) {
+ if (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
features &= ~NETIF_F_SG;
} else if (illegal_highdma(skb->dev, skb)) {
@@ -2647,15 +2648,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
if (!skb_flow_dissect(skb, &keys))
return;
- if (keys.ports) {
- if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
- swap(keys.port16[0], keys.port16[1]);
+ if (keys.ports)
skb->l4_rxhash = 1;
- }
/* get a consistent hash (same value on both flow directions) */
- if ((__force u32)keys.dst < (__force u32)keys.src)
+ if (((__force u32)keys.dst < (__force u32)keys.src) ||
+ (((__force u32)keys.dst == (__force u32)keys.src) &&
+ ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
swap(keys.dst, keys.src);
+ swap(keys.port16[0], keys.port16[1]);
+ }
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src,
@@ -3321,7 +3323,7 @@ ncls:
if (pt_prev) {
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
- ret = -ENOMEM;
+ goto drop;
else
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1f..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int budget = 16;
- WARN_ON_ONCE(!irqs_disabled());
-
list_for_each_entry(napi, &dev->napi_list, dev_list) {
- local_irq_enable();
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- rcu_read_lock_bh();
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
napi, budget);
- rcu_read_unlock_bh();
spin_unlock(&napi->poll_lock);
- if (!budget) {
- local_irq_disable();
+ if (!budget)
break;
- }
}
- local_irq_disable();
}
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index cce9e53528b1..148e73d2c451 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
/* Eth + IPh + UDPh + mpls */
datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
pkt_dev->pkt_overhead;
- if (datalen < sizeof(struct pktgen_hdr))
+ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))
datalen = sizeof(struct pktgen_hdr);
udph->source = htons(pkt_dev->cur_udp_src);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index fe00d1208167..e33ebae519c8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3502,7 +3502,9 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (!skb_cloned(from))
skb_shinfo(from)->nr_frags = 0;
- /* if the skb is cloned this does nothing since we set nr_frags to 0 */
+ /* if the skb is not cloned this does nothing
+ * since we set nr_frags to 0.
+ */
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
skb_frag_ref(from, i);
diff --git a/net/core/sock.c b/net/core/sock.c
index 8f67ced8d6a8..305792076121 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1523,7 +1523,14 @@ EXPORT_SYMBOL(sock_rfree);
void sock_edemux(struct sk_buff *skb)
{
- sock_put(skb->sk);
+ struct sock *sk = skb->sk;
+
+#ifdef CONFIG_INET
+ if (sk->sk_state == TCP_TIME_WAIT)
+ inet_twsk_put(inet_twsk(sk));
+ else
+#endif
+ sock_put(sk);
}
EXPORT_SYMBOL(sock_edemux);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 77e87aff419a..47800459e4cb 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1225,7 +1225,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGEADDR:
neigh_changeaddr(&arp_tbl, dev);
- rt_cache_flush(dev_net(dev), 0);
+ rt_cache_flush(dev_net(dev));
break;
default:
break;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 44bf82e3aef7..e12fad773852 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -725,7 +725,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
case SIOCSIFFLAGS:
- ret = -EACCES;
+ ret = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto out;
break;
@@ -733,7 +733,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
- ret = -EACCES;
+ ret = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto out;
ret = -EINVAL;
@@ -1503,7 +1503,7 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
- rt_cache_flush(net, 0);
+ rt_cache_flush(net);
}
return ret;
@@ -1537,7 +1537,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
dev_disable_lro(idev->dev);
}
rtnl_unlock();
- rt_cache_flush(net, 0);
+ rt_cache_flush(net);
}
}
@@ -1554,7 +1554,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct net *net = ctl->extra2;
if (write && *valp != val)
- rt_cache_flush(net, 0);
+ rt_cache_flush(net);
return ret;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c43ae3fba792..8e2b475da9fa 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -148,7 +148,7 @@ static void fib_flush(struct net *net)
}
if (flushed)
- rt_cache_flush(net, -1);
+ rt_cache_flush(net);
}
/*
@@ -999,11 +999,11 @@ static void nl_fib_lookup_exit(struct net *net)
net->ipv4.fibnl = NULL;
}
-static void fib_disable_ip(struct net_device *dev, int force, int delay)
+static void fib_disable_ip(struct net_device *dev, int force)
{
if (fib_sync_down_dev(dev, force))
fib_flush(dev_net(dev));
- rt_cache_flush(dev_net(dev), delay);
+ rt_cache_flush(dev_net(dev));
arp_ifdown(dev);
}
@@ -1020,7 +1020,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
fib_sync_up(dev);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
- rt_cache_flush(dev_net(dev), -1);
+ rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
fib_del_ifaddr(ifa, NULL);
@@ -1029,9 +1029,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
/* Last address was deleted from this interface.
* Disable IP.
*/
- fib_disable_ip(dev, 1, 0);
+ fib_disable_ip(dev, 1);
} else {
- rt_cache_flush(dev_net(dev), -1);
+ rt_cache_flush(dev_net(dev));
}
break;
}
@@ -1045,7 +1045,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
struct net *net = dev_net(dev);
if (event == NETDEV_UNREGISTER) {
- fib_disable_ip(dev, 2, -1);
+ fib_disable_ip(dev, 2);
rt_flush_dev(dev);
return NOTIFY_DONE;
}
@@ -1062,14 +1062,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
- rt_cache_flush(dev_net(dev), -1);
+ rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
- fib_disable_ip(dev, 0, 0);
+ fib_disable_ip(dev, 0);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
- rt_cache_flush(dev_net(dev), 0);
+ rt_cache_flush(dev_net(dev));
break;
case NETDEV_UNREGISTER_BATCH:
break;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index a83d74e498d2..274309d3aded 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -259,7 +259,7 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
{
- rt_cache_flush(ops->fro_net, -1);
+ rt_cache_flush(ops->fro_net);
}
static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 57bd978483e1..d1b93595b4a7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1286,7 +1286,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
fib_release_info(fi_drop);
if (state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+ rt_cache_flush(cfg->fc_nlinfo.nl_net);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
@@ -1333,7 +1333,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
list_add_tail_rcu(&new_fa->fa_list,
(fa ? &fa->fa_list : fa_head));
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+ rt_cache_flush(cfg->fc_nlinfo.nl_net);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
&cfg->fc_nlinfo, 0);
succeeded:
@@ -1708,7 +1708,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
trie_leaf_remove(t, l);
if (fa->fa_state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+ rt_cache_flush(cfg->fc_nlinfo.nl_net);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
- kfree(mrt);
+ ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
- kfree(net->ipv4.mrt);
+ ipmr_free_table(net->ipv4.mrt);
}
#endif
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt;
}
+static void ipmr_free_table(struct mr_table *mrt)
+{
+ del_timer_sync(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
+
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 4ad9cf173992..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
port = 0;
break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fd9ecb52c66b..fd9af60397b5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -202,11 +202,6 @@ EXPORT_SYMBOL(ip_tos2prio);
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
-static inline int rt_genid(struct net *net)
-{
- return atomic_read(&net->ipv4.rt_genid);
-}
-
#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
@@ -447,27 +442,9 @@ static inline bool rt_is_expired(const struct rtable *rth)
return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
}
-/*
- * Perturbation of rt_genid by a small quantity [1..256]
- * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
- * many times (2^24) without giving recent rt_genid.
- * Jenkins hash is strong enough that litle changes of rt_genid are OK.
- */
-static void rt_cache_invalidate(struct net *net)
+void rt_cache_flush(struct net *net)
{
- unsigned char shuffle;
-
- get_random_bytes(&shuffle, sizeof(shuffle));
- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
-}
-
-/*
- * delay < 0 : invalidate cache (fast : entries will be deleted later)
- * delay >= 0 : invalidate & flush cache (can be long)
- */
-void rt_cache_flush(struct net *net, int delay)
-{
- rt_cache_invalidate(net);
+ rt_genid_bump(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -934,12 +911,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
+ rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
+ rcu_read_unlock();
return mtu;
}
@@ -956,7 +935,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
dst->obsolete = DST_OBSOLETE_KILL;
} else {
rt->rt_pmtu = mtu;
- dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+ rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
}
}
@@ -1263,7 +1242,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
- if (dst->flags & DST_NOCACHE) {
+ if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
@@ -2343,7 +2322,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
void ip_rt_multicast_event(struct in_device *in_dev)
{
- rt_cache_flush(dev_net(in_dev->dev), 0);
+ rt_cache_flush(dev_net(in_dev->dev));
}
#ifdef CONFIG_SYSCTL
@@ -2352,16 +2331,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
size_t *lenp, loff_t *ppos)
{
if (write) {
- int flush_delay;
- ctl_table ctl;
- struct net *net;
-
- memcpy(&ctl, __ctl, sizeof(ctl));
- ctl.data = &flush_delay;
- proc_dointvec(&ctl, write, buffer, lenp, ppos);
-
- net = (struct net *)__ctl->extra1;
- rt_cache_flush(net, flush_delay);
+ rt_cache_flush((struct net *)__ctl->extra1);
return 0;
}
@@ -2531,8 +2501,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
- get_random_bytes(&net->ipv4.rt_genid,
- sizeof(net->ipv4.rt_genid));
+ atomic_set(&net->rt_genid, 0);
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2109ff4a1daf..5f6419341821 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1762,8 +1762,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
#ifdef CONFIG_NET_DMA
- if (tp->ucopy.dma_chan)
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ if (tp->ucopy.dma_chan) {
+ if (tp->rcv_wnd == 0 &&
+ !skb_queue_empty(&sk->sk_async_wait_queue)) {
+ tcp_service_net_dma(sk, true);
+ tcp_cleanup_rbuf(sk, copied);
+ } else
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ }
#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
@@ -2325,10 +2331,17 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
tp->rx_opt.mss_clamp = opt.opt_val;
break;
case TCPOPT_WINDOW:
- if (opt.opt_val > 14)
- return -EFBIG;
+ {
+ u16 snd_wscale = opt.opt_val & 0xFFFF;
+ u16 rcv_wscale = opt.opt_val >> 16;
+
+ if (snd_wscale > 14 || rcv_wscale > 14)
+ return -EFBIG;
- tp->rx_opt.snd_wscale = opt.opt_val;
+ tp->rx_opt.snd_wscale = snd_wscale;
+ tp->rx_opt.rcv_wscale = rcv_wscale;
+ tp->rx_opt.wscale_ok = 1;
+ }
break;
case TCPOPT_SACK_PERM:
if (opt.opt_val != 0)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85308b90df80..d377f4854cb8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int newly_acked_sacked, bool is_dupack,
+ int prior_sacked, bool is_dupack,
int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
+ int newly_acked_sacked = 0;
int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
- int newly_acked_sacked = 0;
bool frto_cwnd = false;
/* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out;
- newly_acked_sacked = (prior_packets - prior_sacked) -
- (tp->packets_out - tp->sacked_out);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
- newly_acked_sacked = tp->sacked_out - prior_sacked;
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
}
@@ -4662,7 +4661,7 @@ queue_and_out:
if (eaten > 0)
kfree_skb_partial(skb, fragstolen);
- else if (!sock_flag(sk, SOCK_DEAD))
+ if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
return;
}
@@ -5557,8 +5556,7 @@ no_ack:
#endif
if (eaten)
kfree_skb_partial(skb, fragstolen);
- else
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return 0;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6f6d1aca3c3d..2814f66dac64 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1226,6 +1226,11 @@ try_again:
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
+ if (!peeked) {
+ atomic_inc(&sk->sk_drops);
+ UDP_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_INERRORS, is_udplite);
+ }
goto out_free;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */
- err = -ENOMEM;
-
aead = esp->aead;
alen = crypto_aead_authsize(aead);
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
}
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto error;
+ }
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 0251a6005be8..c4f934176cab 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -175,33 +175,12 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
const struct in6_addr *saddr)
{
__ip6_dst_store(sk, dst, daddr, saddr);
-
-#ifdef CONFIG_XFRM
- {
- struct rt6_info *rt = (struct rt6_info *)dst;
- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
- }
-#endif
}
static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{
- struct dst_entry *dst;
-
- dst = __sk_dst_check(sk, cookie);
-
-#ifdef CONFIG_XFRM
- if (dst) {
- struct rt6_info *rt = (struct rt6_info *)dst;
- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
- __sk_dst_reset(sk);
- dst = NULL;
- }
- }
-#endif
-
- return dst;
+ return __sk_dst_check(sk, cookie);
}
static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 13690d650c3e..286acfc21250 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -819,6 +819,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
+ if (IS_ERR(sn)) {
+ err = PTR_ERR(sn);
+ sn = NULL;
+ }
if (!sn) {
/* If it is failed, discard just allocated
root, and then (in st_failure) stale node
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8e80fd279100..854e4018d205 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -226,7 +226,7 @@ static struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
- .obsolete = -1,
+ .obsolete = DST_OBSOLETE_FORCE_CHK,
.error = -ENETUNREACH,
.input = ip6_pkt_discard,
.output = ip6_pkt_discard_out,
@@ -246,7 +246,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
- .obsolete = -1,
+ .obsolete = DST_OBSOLETE_FORCE_CHK,
.error = -EACCES,
.input = ip6_pkt_prohibit,
.output = ip6_pkt_prohibit_out,
@@ -261,7 +261,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
- .obsolete = -1,
+ .obsolete = DST_OBSOLETE_FORCE_CHK,
.error = -EINVAL,
.input = dst_discard,
.output = dst_discard,
@@ -281,13 +281,14 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
struct fib6_table *table)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
- 0, DST_OBSOLETE_NONE, flags);
+ 0, DST_OBSOLETE_FORCE_CHK, flags);
if (rt) {
struct dst_entry *dst = &rt->dst;
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
+ rt->rt6i_genid = rt_genid(net);
}
return rt;
}
@@ -1031,6 +1032,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
rt = (struct rt6_info *) dst;
+ /* All IPV6 dsts are created with ->obsolete set to the value
+ * DST_OBSOLETE_FORCE_CHK which forces validation calls down
+ * into this function always.
+ */
+ if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
+ return NULL;
+
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
if (rt->rt6i_peer_genid != rt6_peer_genid()) {
if (!rt6_has_peer(rt))
@@ -1397,8 +1405,6 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
- rt->dst.obsolete = -1;
-
if (cfg->fc_flags & RTF_EXPIRES)
rt6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
@@ -2080,7 +2086,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_idev = idev;
- rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
if (anycast)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a3e60cc04a8a..acd32e3f1b68 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -403,8 +403,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
tp->mtu_info = ntohl(info);
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
- else
- set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
+ &tp->tsq_flags))
+ sock_hold(sk);
goto out;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 99d0077b56b8..07e2bfef6845 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -394,6 +394,17 @@ try_again:
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
+ if (!peeked) {
+ atomic_inc(&sk->sk_drops);
+ if (is_udp4)
+ UDP_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_INERRORS,
+ is_udplite);
+ else
+ UDP6_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_INERRORS,
+ is_udplite);
+ }
goto out_free;
}
if (!peeked) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..1a9f3723c13c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
+ kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- synchronize_rcu();
atomic_dec(&l2tp_tunnel_count);
- kfree(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
@@ -1502,6 +1501,8 @@ out:
return err;
}
+static struct lock_class_key l2tp_socket_class;
+
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
{
struct l2tp_tunnel *tunnel = NULL;
@@ -1606,6 +1607,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->old_sk_destruct = sk->sk_destruct;
sk->sk_destruct = &l2tp_tunnel_destruct;
tunnel->sock = sk;
+ lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
+
sk->sk_allocation = GFP_ATOMIC;
/* Add tunnel to our list */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index f9ee74deeac2..3bfb34aaee29 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -153,7 +153,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
}
- if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
+ if (!pskb_may_pull(skb, ETH_HLEN))
goto error;
secpath_reset(skb);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d41974aacf51..a58c0b649ba1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1378,6 +1378,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
else
memset(next_hop, 0, ETH_ALEN);
+ memset(pinfo, 0, sizeof(*pinfo));
+
pinfo->generation = mesh_paths_generation;
pinfo->filled = MPATH_INFO_FRAME_QLEN |
@@ -1396,7 +1398,6 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
pinfo->discovery_timeout =
jiffies_to_msecs(mpath->discovery_timeout);
pinfo->discovery_retries = mpath->discovery_retries;
- pinfo->flags = 0;
if (mpath->flags & MESH_PATH_ACTIVE)
pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE;
if (mpath->flags & MESH_PATH_RESOLVING)
@@ -1405,10 +1406,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID;
if (mpath->flags & MESH_PATH_FIXED)
pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
- if (mpath->flags & MESH_PATH_RESOLVING)
- pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
-
- pinfo->flags = mpath->flags;
+ if (mpath->flags & MESH_PATH_RESOLVED)
+ pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED;
}
static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a4a5acdbaa4d..f76b83341cf9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3248,6 +3248,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
goto out_unlock;
err_clear:
+ memset(ifmgd->bssid, 0, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
ifmgd->auth_data = NULL;
err_free:
kfree(auth_data);
@@ -3439,6 +3441,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
err = 0;
goto out;
err_clear:
+ memset(ifmgd->bssid, 0, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
ifmgd->assoc_data = NULL;
err_free:
kfree(assoc_data);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- int is_mesh_mcast = 1;
- const u8 *mesh_da;
+ /* DS -> MBSS (802.11-2012 13.11.3.3).
+ * For unicast with unknown forwarding information,
+ * destination might be in the MBSS or if that fails
+ * forwarded to another mesh gate. In either case
+ * resolution will be handled in ieee80211_xmit(), so
+ * leave the original DA. This also works for mcast */
+ const u8 *mesh_da = skb->data;
+
+ if (mppath)
+ mesh_da = mppath->mpp;
+ else if (mpath)
+ mesh_da = mpath->dst;
+ rcu_read_unlock();
- if (is_multicast_ether_addr(skb->data))
- /* DA TA mSA AE:SA */
- mesh_da = skb->data;
- else {
- static const u8 bcast[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- if (mppath) {
- /* RA TA mDA mSA AE:DA SA */
- mesh_da = mppath->mpp;
- is_mesh_mcast = 0;
- } else if (mpath) {
- mesh_da = mpath->dst;
- is_mesh_mcast = 0;
- } else {
- /* DA TA mSA AE:SA */
- mesh_da = bcast;
- }
- }
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr);
- rcu_read_unlock();
- if (is_mesh_mcast)
+ if (is_multicast_ether_addr(mesh_da))
+ /* DA TA mSA AE:SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
skb->data + ETH_ALEN,
NULL);
else
+ /* RA TA mDA mSA AE:DA SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 72bf32a84874..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!svc->stats.cpustats)
+ if (!svc->stats.cpustats) {
+ ret = -ENOMEM;
goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */
- ct->timeout.expires = jiffies +
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
return;
}
/* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
void nf_ct_insert_dying_list(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
/* add this conntrack to the dying list */
spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
/* set a new timer to retry event delivery */
- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
- ct->timeout.expires = jiffies +
+ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
}
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index da4fc37a8578..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
- if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ ret = register_pernet_subsys(&ctnetlink_net_ops);
+ if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index a5ac11ebef33..e046b3756aab 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
* sCL -> sSS
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
-/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
/*
* sNO -> sIV Too late and no reason to do anything
* sSS -> sIV Client can't send SYN and then SYN/ACK
* sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
- * sSR -> sIG
- * sES -> sIG Error: SYNs in window outside the SYN_SENT state
- * are errors. Receiver will reply with RST
- * and close the connection.
- * Or we are not in sync and hold a dead connection.
- * sFW -> sIG
- * sCW -> sIG
- * sLA -> sIG
- * sTW -> sIG
- * sCL -> sIG
+ * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
+ * sES -> sIV Invalid SYN/ACK packets sent by the client
+ * sFW -> sIV
+ * sCW -> sIV
+ * sLA -> sIV
+ * sTW -> sIV
+ * sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
@@ -633,15 +630,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
ack = sack = receiver->td_end;
}
- if (seq == end
- && (!tcph->rst
- || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
+ if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
/*
- * Packets contains no data: we assume it is valid
- * and check the ack value only.
- * However RST segments are always validated by their
- * SEQ number, except when seq == 0 (reset sent answering
- * SYN.
+ * RST sent answering SYN.
*/
seq = end = sender->td_end;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..5cfb5bedb2b8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,6 +381,7 @@ __build_packet_message(struct nfulnl_instance *inst,
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
sk_buff_data_t old_tail = inst->skb->tail;
+ struct sock *sk;
nlh = nlmsg_put(inst->skb, 0, 0,
NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
@@ -480,7 +481,7 @@ __build_packet_message(struct nfulnl_instance *inst,
}
if (indev && skb_mac_header_was_set(skb)) {
- if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+ if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -499,18 +500,19 @@ __build_packet_message(struct nfulnl_instance *inst,
}
/* UID */
- if (skb->sk) {
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
- struct file *file = skb->sk->sk_socket->file;
+ sk = skb->sk;
+ if (sk && sk->sk_state != TCP_TIME_WAIT) {
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_socket && sk->sk_socket->file) {
+ struct file *file = sk->sk_socket->file;
__be32 uid = htonl(file->f_cred->fsuid);
__be32 gid = htonl(file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
nla_put_be32(inst->skb, NFULA_GID, gid))
goto nla_put_failure;
} else
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/* local sequence number */
@@ -996,8 +998,10 @@ static int __init nfnetlink_log_init(void)
#ifdef CONFIG_PROC_FS
if (!proc_create("nfnetlink_log", 0440,
- proc_net_netfilter, &nful_file_ops))
+ proc_net_netfilter, &nful_file_ops)) {
+ status = -ENOMEM;
goto cleanup_logger;
+ }
#endif
return status;
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index ff5f75fddb15..91e9af4d1f42 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -145,6 +145,19 @@ static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
return 0;
}
+static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
+{
+ if (!sk || sk->sk_state == TCP_TIME_WAIT)
+ return;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_socket && sk->sk_socket->file)
+ sb_add(m, "UID=%u GID=%u ",
+ sk->sk_socket->file->f_cred->fsuid,
+ sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
/* One level of recursion won't kill us */
static void dump_ipv4_packet(struct sbuff *m,
const struct nf_loginfo *info,
@@ -361,14 +374,8 @@ static void dump_ipv4_packet(struct sbuff *m,
}
/* Max length: 15 "UID=4294967295 " */
- if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
- sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
- }
+ if ((logflags & XT_LOG_UID) && !iphoff)
+ dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
@@ -436,8 +443,8 @@ log_packet_common(struct sbuff *m,
const struct nf_loginfo *loginfo,
const char *prefix)
{
- sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
- prefix,
+ sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
+ '0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -717,14 +724,8 @@ static void dump_ipv6_packet(struct sbuff *m,
}
/* Max length: 15 "UID=4294967295 " */
- if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
- sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
- }
+ if ((logflags & XT_LOG_UID) && recurse)
+ dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!recurse && skb->mark)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533ed..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+ if ((dst_group || dst_pid) &&
+ !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
+ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab();
}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06592d8b4a2b..7261eb81974f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -601,7 +601,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
- return -EACCES;
+ return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
@@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (er < 0) {
+ skb_free_datagram(sk, skb);
+ release_sock(sk);
+ return er;
+ }
if (sax != NULL) {
sax->sax25_family = AF_NETROM;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index f3f96badf5aa..954405ceae9e 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -45,7 +45,7 @@ static int make_writable(struct sk_buff *skb, int write_len)
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
-/* remove VLAN header from packet and update csum accrodingly. */
+/* remove VLAN header from packet and update csum accordingly. */
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
{
struct vlan_hdr *vhdr;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index d8277d29e710..cf58cedad083 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -425,10 +425,10 @@ static int validate_sample(const struct nlattr *attr,
static int validate_tp_port(const struct sw_flow_key *flow_key)
{
if (flow_key->eth.type == htons(ETH_P_IP)) {
- if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+ if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
return 0;
} else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
- if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+ if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
return 0;
}
@@ -460,7 +460,7 @@ static int validate_set(const struct nlattr *a,
if (flow_key->eth.type != htons(ETH_P_IP))
return -EINVAL;
- if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+ if (!flow_key->ip.proto)
return -EINVAL;
ipv4_key = nla_data(ovs_key);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 9b75617ca4e0..c30df1a10c67 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -145,15 +145,17 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies);
* OVS_KEY_ATTR_PRIORITY 4 -- 4 8
* OVS_KEY_ATTR_IN_PORT 4 -- 4 8
* OVS_KEY_ATTR_ETHERNET 12 -- 4 16
+ * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype)
* OVS_KEY_ATTR_8021Q 4 -- 4 8
- * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8
+ * OVS_KEY_ATTR_ENCAP 0 -- 4 4 (VLAN encapsulation)
+ * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (inner VLAN ethertype)
* OVS_KEY_ATTR_IPV6 40 -- 4 44
* OVS_KEY_ATTR_ICMPV6 2 2 4 8
* OVS_KEY_ATTR_ND 28 -- 4 32
* -------------------------------------------------
- * total 132
+ * total 144
*/
-#define FLOW_BUFSIZE 132
+#define FLOW_BUFSIZE 144
int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index aee7196aac36..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
{
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
return true;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6aabd77d1cfd..564b9fc8efd3 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
- if (cl == NULL || cl->level >= head->level)
+ if (cl == NULL)
goto fallback;
}
-
+ if (cl->level >= head->level)
+ goto fallback;
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 9fc1c62ec80e..4e606fcb2534 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (list_empty(&flow->flowchain)) {
list_add_tail(&flow->flowchain, &q->new_flows);
- codel_vars_init(&flow->cvars);
q->new_flow_count++;
flow->deficit = q->quantum;
flow->dropped = 0;
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
struct fq_codel_flow *flow = q->flows + i;
INIT_LIST_HEAD(&flow->flowchain);
+ codel_vars_init(&flow->cvars);
}
}
if (sch->limit >= 1)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index e901583e4ea5..d42234c0f13b 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch)
if (q == NULL)
continue;
- for (n = 0; n < table->DPs; n++)
- if (table->tab[n] && table->tab[n] != q &&
- table->tab[n]->prio == q->prio)
+ for (n = i + 1; n < table->DPs; n++)
+ if (table->tab[n] && table->tab[n]->prio == q->prio)
return 1;
}
@@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table,
struct gred_sched_data *q)
{
table->wred_set.qavg = q->vars.qavg;
+ table->wred_set.qidlestart = q->vars.qidlestart;
}
static inline int gred_use_ecn(struct gred_sched *t)
@@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
}
- /* sum up all the qaves of prios <= to ours to get the new qave */
+ /* sum up all the qaves of prios < ours to get the new qave */
if (!gred_wred_mode(t) && gred_rio_mode(t)) {
int i;
@@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
} else {
q->backlog -= qdisc_pkt_len(skb);
- if (!q->backlog && !gred_wred_mode(t))
- red_start_of_idle_period(&q->vars);
+ if (gred_wred_mode(t)) {
+ if (!sch->qstats.backlog)
+ red_start_of_idle_period(&t->wred_set);
+ } else {
+ if (!q->backlog)
+ red_start_of_idle_period(&q->vars);
+ }
}
return skb;
}
- if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
- red_start_of_idle_period(&t->wred_set);
-
return NULL;
}
@@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch)
q->backlog -= len;
q->stats.other++;
- if (!q->backlog && !gred_wred_mode(t))
- red_start_of_idle_period(&q->vars);
+ if (gred_wred_mode(t)) {
+ if (!sch->qstats.backlog)
+ red_start_of_idle_period(&t->wred_set);
+ } else {
+ if (!q->backlog)
+ red_start_of_idle_period(&q->vars);
+ }
}
qdisc_drop(skb, sch);
return len;
}
- if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
- red_start_of_idle_period(&t->wred_set);
-
return 0;
-
}
static void gred_reset(struct Qdisc *sch)
@@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
struct tc_gred_qopt opt;
+ unsigned long qavg;
memset(&opt, 0, sizeof(opt));
@@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (gred_wred_mode(table))
gred_load_wred_set(table, q);
- opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
+ qavg = red_calc_qavg(&q->parms, &q->vars,
+ q->vars.qavg >> q->parms.Wlog);
+ opt.qave = qavg >> q->parms.Wlog;
append_opt:
if (nla_append(skb, sizeof(opt), &opt) < 0)
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e4723d31fdd5..211a21217045 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -865,7 +865,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
if (mask) {
struct qfq_group *next = qfq_ffs(q, mask);
if (qfq_gt(roundedF, next->F)) {
- cl->S = next->F;
+ if (qfq_gt(limit, next->F))
+ cl->S = next->F;
+ else /* preserve timestamp correctness */
+ cl->S = limit;
return;
}
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 838e18b4d7ea..be50aa234dcd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -364,6 +364,25 @@ finish:
return retval;
}
+static void sctp_packet_release_owner(struct sk_buff *skb)
+{
+ sk_free(skb->sk);
+}
+
+static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sctp_packet_release_owner;
+
+ /*
+ * The data chunks have already been accounted for in sctp_sendmsg(),
+ * therefore only reserve a single byte to keep socket around until
+ * the packet has been transmitted.
+ */
+ atomic_inc(&sk->sk_wmem_alloc);
+}
+
/* All packets are sent to the network through this function from
* sctp_outq_tail().
*
@@ -405,7 +424,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
/* Set the owning socket so that we know where to get the
* destination IP address.
*/
- skb_set_owner_w(nskb, sk);
+ sctp_packet_set_owner_w(nskb, sk);
if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
diff --git a/net/socket.c b/net/socket.c
index a5471f804d99..edc3c4af9085 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
- err = compat_put_timeval(up, &ktv);
+ err = compat_put_timeval(&ktv, up);
return err;
}
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
- err = compat_put_timespec(up, &kts);
+ err = compat_put_timespec(&kts, up);
return err;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a5a402a7d21f..5d7f61d7559c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
return false;
}
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req;
+ spin_lock(&xprt->reserve_lock);
if (!list_empty(&xprt->free)) {
req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
default:
task->tk_status = -EAGAIN;
}
+ spin_unlock(&xprt->reserve_lock);
return;
out_init_req:
task->tk_status = 0;
task->tk_rqstp = req;
xprt_request_init(task, xprt);
+ spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ /* Note: grabbing the xprt_lock_write() ensures that we throttle
+ * new slot allocation if the transport is congested (i.e. when
+ * reconnecting a stream transport or when out of socket write
+ * buffer space).
+ */
+ if (xprt_lock_write(xprt, task)) {
+ xprt_alloc_slot(xprt, task);
+ xprt_release_write(xprt, task);
+ }
}
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
if (task->tk_rqstp != NULL)
return;
- /* Note: grabbing the xprt_lock_write() here is not strictly needed,
- * but ensures that we throttle new slot allocation if the transport
- * is congested (e.g. if reconnecting or if we're out of socket
- * write buffer space).
- */
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (!xprt_lock_write(xprt, task))
- return;
-
- spin_lock(&xprt->reserve_lock);
- xprt_alloc_slot(task);
- spin_unlock(&xprt->reserve_lock);
- xprt_release_write(xprt, task);
+ xprt->ops->alloc_slot(xprt, task);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 06cdbff79e4a..5d9202dc7cb1 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
static struct rpc_xprt_ops xprt_rdma_procs = {
.reserve_xprt = xprt_rdma_reserve_xprt,
.release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
+ .alloc_slot = xprt_alloc_slot,
.release_request = xprt_release_rqst_cong, /* ditto */
.set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
.rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 400567243f84..a35b8e52e551 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
static struct rpc_xprt_ops xs_local_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
+ .alloc_slot = xprt_alloc_slot,
.rpcbind = xs_local_rpcbind,
.set_port = xs_local_set_port,
.connect = xs_connect,
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong,
+ .alloc_slot = xprt_alloc_slot,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
.connect = xs_connect,
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
static struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
+ .alloc_slot = xprt_lock_and_alloc_slot,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
.connect = xs_connect,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 97026f3b215a..1e37dbf00cb3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5633,8 +5633,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
sizeof(connect.ht_capa_mask));
if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
- if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) {
+ kfree(connkeys);
return -EINVAL;
+ }
memcpy(&connect.ht_capa,
nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
sizeof(connect.ht_capa));
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 54a0dc2e2f8d..ab2bb42fe094 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,7 +212,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (async && x->repl->check(x, skb, seq)) {
+ if (async && x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5a2aa17e4d3c..387848e90078 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -585,6 +585,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_pol_hold(policy);
net->xfrm.policy_count[dir]++;
atomic_inc(&flow_cache_genid);
+ rt_genid_bump(net);
if (delpol)
__xfrm_policy_unlink(delpol, dir);
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
@@ -1763,7 +1764,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
if (!afinfo) {
dst_release(dst_orig);
- ret = ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
} else {
ret = afinfo->blackhole_route(net, dst_orig);
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 2f6d11d04a2b..3efb07d3eb27 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -420,6 +420,18 @@ err:
return -EINVAL;
}
+static int xfrm_replay_recheck_esn(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
+ htonl(xfrm_replay_seqhi(x, net_seq)))) {
+ x->stats.replay_window++;
+ return -EINVAL;
+ }
+
+ return xfrm_replay_check_esn(x, skb, net_seq);
+}
+
static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
{
unsigned int bitnr, nr, i;
@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
static struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
+ .recheck = xfrm_replay_check,
.notify = xfrm_replay_notify,
.overflow = xfrm_replay_overflow,
};
@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
static struct xfrm_replay xfrm_replay_bmp = {
.advance = xfrm_replay_advance_bmp,
.check = xfrm_replay_check_bmp,
+ .recheck = xfrm_replay_check_bmp,
.notify = xfrm_replay_notify_bmp,
.overflow = xfrm_replay_overflow_bmp,
};
@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
static struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
+ .recheck = xfrm_replay_recheck_esn,
.notify = xfrm_replay_notify_bmp,
.overflow = xfrm_replay_overflow_esn,
};
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 87cd0e4d4282..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
goto error;
x->outer_mode = xfrm_get_mode(x->props.mode, family);
- if (x->outer_mode == NULL)
+ if (x->outer_mode == NULL) {
+ err = -EPROTONOSUPPORT;
goto error;
+ }
if (init_replay) {
err = xfrm_init_replay(x);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e75d8e47f35c..289f4bf18ff0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+ struct xfrm_replay_state_esn *rs;
- if ((p->flags & XFRM_STATE_ESN) && !rt)
- return -EINVAL;
+ if (p->flags & XFRM_STATE_ESN) {
+ if (!rt)
+ return -EINVAL;
+
+ rs = nla_data(rt);
+
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ return -EINVAL;
+
+ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+ nla_len(rt) != sizeof(*rs))
+ return -EINVAL;
+ }
if (!rt)
return 0;
@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
struct nlattr *rp)
{
struct xfrm_replay_state_esn *up;
+ int ulen;
if (!replay_esn || !rp)
return 0;
up = nla_data(rp);
+ ulen = xfrm_replay_state_esn_len(up);
- if (xfrm_replay_state_esn_len(replay_esn) !=
- xfrm_replay_state_esn_len(up))
+ if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
return -EINVAL;
return 0;
@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
+ int klen, ulen;
if (!rta)
return 0;
up = nla_data(rta);
+ klen = xfrm_replay_state_esn_len(up);
+ ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ p = kzalloc(klen, GFP_KERNEL);
if (!p)
return -ENOMEM;
- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ pp = kzalloc(klen, GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
+ memcpy(p, up, ulen);
+ memcpy(pp, up, ulen);
+
*replay_esn = p;
*preplay_esn = pp;
@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
* somehow made shareable and move it to xfrm_state.c - JHS
*
*/
-static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
+static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ int update_esn)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
- struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
+ struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
/* override default values from above */
- xfrm_update_ae_params(x, attrs);
+ xfrm_update_ae_params(x, attrs, 0);
return x;
@@ -689,6 +709,7 @@ out:
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
+ memset(p, 0, sizeof(*p));
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
return -EMSGSIZE;
algo = nla_data(nla);
- strcpy(algo->alg_name, auth->alg_name);
+ strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
@@ -878,6 +899,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
{
struct xfrm_dump_info info;
struct sk_buff *skb;
+ int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
@@ -888,9 +910,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
- if (dump_one_state(x, 0, &info)) {
+ err = dump_one_state(x, 0, &info);
+ if (err) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(err);
}
return skb;
@@ -1317,6 +1340,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
{
+ memset(p, 0, sizeof(*p));
memcpy(&p->sel, &xp->selector, sizeof(p->sel));
memcpy(&p->lft, &xp->lft, sizeof(p->lft));
memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
@@ -1421,6 +1445,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+ memset(up, 0, sizeof(*up));
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
@@ -1546,6 +1571,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
{
struct xfrm_dump_info info;
struct sk_buff *skb;
+ int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
@@ -1556,9 +1582,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
- if (dump_one_policy(xp, dir, 0, &info) < 0) {
+ err = dump_one_policy(xp, dir, 0, &info);
+ if (err) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(err);
}
return skb;
@@ -1822,7 +1849,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
spin_lock_bh(&x->lock);
- xfrm_update_ae_params(x, attrs);
+ xfrm_update_ae_params(x, attrs, 1);
spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type;