summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/l2cap.c13
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/can/bcm.c19
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/pktgen.c32
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/core/utils.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c7
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ipip.c32
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c34
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/agg-tx.c35
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ieee80211_i.h10
-rw-r--r--net/mac80211/util.c19
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c64
-rw-r--r--net/netfilter/nf_log.c18
-rw-r--r--net/netfilter/xt_connlimit.c10
-rw-r--r--net/netfilter/xt_limit.c2
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/rose/rose_route.c16
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/outqueue.c10
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c40
-rw-r--r--net/sctp/transport.c8
-rw-r--r--net/sunrpc/addr.c18
37 files changed, 263 insertions, 227 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8836575f9d79..a29c5ab5815c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -281,8 +281,11 @@ out_uninit_applicant:
if (ngrp)
vlan_gvrp_uninit_applicant(real_dev);
out_free_group:
- if (ngrp)
- vlan_group_free(ngrp);
+ if (ngrp) {
+ hlist_del_rcu(&ngrp->hlist);
+ /* Free the group, after all cpu's are done. */
+ call_rcu(&ngrp->rcu, vlan_rcu_free);
+ }
return err;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a9750984f772..b7c4224f4e7d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->type = type;
conn->mode = HCI_CM_ACTIVE;
conn->state = BT_OPEN;
+ conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 77e9fb130adb..947f8bbb4bb3 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2205,7 +2205,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
void *ptr = req->data;
BT_DBG("sk %p", sk);
@@ -2394,6 +2394,10 @@ done:
rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
break;
case L2CAP_MODE_STREAMING:
@@ -2401,6 +2405,10 @@ done:
pi->max_pdu_size = rfc.max_pdu_size;
pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
break;
default:
@@ -2410,9 +2418,6 @@ done:
rfc.mode = pi->mode;
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
-
if (result == L2CAP_CONF_SUCCESS)
pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b1b3b0fbf41c..4a9f52732655 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -377,12 +377,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
struct net_bridge_port *p;
int err = 0;
- if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
+ /* Don't allow bridging non-ethernet like devices */
+ if ((dev->flags & IFF_LOOPBACK) ||
+ dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
return -EINVAL;
+ /* No bridging of bridges */
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
+ /* Device is already being bridged */
if (dev->br_port != NULL)
return -EBUSY;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 597da4f8f888..e8d58f33fe09 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -132,23 +132,27 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
/*
* procfs functions
*/
-static char *bcm_proc_getifname(int ifindex)
+static char *bcm_proc_getifname(char *result, int ifindex)
{
struct net_device *dev;
if (!ifindex)
return "any";
- /* no usage counting */
+ read_lock(&dev_base_lock);
dev = __dev_get_by_index(&init_net, ifindex);
if (dev)
- return dev->name;
+ strcpy(result, dev->name);
+ else
+ strcpy(result, "???");
+ read_unlock(&dev_base_lock);
- return "???";
+ return result;
}
static int bcm_proc_show(struct seq_file *m, void *v)
{
+ char ifname[IFNAMSIZ];
struct sock *sk = (struct sock *)m->private;
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
@@ -157,7 +161,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, " / sk %p", sk);
seq_printf(m, " / bo %p", bo);
seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
- seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex));
+ seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
seq_printf(m, " <<<\n");
list_for_each_entry(op, &bo->rx_ops, list) {
@@ -169,7 +173,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
continue;
seq_printf(m, "rx_op: %03X %-5s ",
- op->can_id, bcm_proc_getifname(op->ifindex));
+ op->can_id, bcm_proc_getifname(ifname, op->ifindex));
seq_printf(m, "[%d]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
@@ -194,7 +198,8 @@ static int bcm_proc_show(struct seq_file *m, void *v)
list_for_each_entry(op, &bo->tx_ops, list) {
seq_printf(m, "tx_op: %03X %s [%d] ",
- op->can_id, bcm_proc_getifname(op->ifindex),
+ op->can_id,
+ bcm_proc_getifname(ifname, op->ifindex),
op->nframes);
if (op->kt_ival1.tv64)
diff --git a/net/core/dev.c b/net/core/dev.c
index b8f74cfb1bfd..fe10551d3671 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -942,14 +942,15 @@ rollback:
ret = notifier_to_errno(ret);
if (ret) {
- if (err) {
- printk(KERN_ERR
- "%s: name change rollback failed: %d.\n",
- dev->name, ret);
- } else {
+ /* err >= 0 after dev_alloc_name() or stores the first errno */
+ if (err >= 0) {
err = ret;
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
+ } else {
+ printk(KERN_ERR
+ "%s: name change rollback failed: %d.\n",
+ dev->name, ret);
}
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6eb8d47cbf3a..6e79e96cb4f2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -363,6 +363,7 @@ struct pktgen_dev {
* device name (not when the inject is
* started as it used to do.)
*/
+ char odevname[32];
struct flow_state *flows;
unsigned cflows; /* Concurrent flows (config) */
unsigned lflow; /* Flow length (config) */
@@ -426,7 +427,7 @@ static const char version[] =
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname);
+ const char *ifname, bool exact);
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(void);
static void pktgen_reset_all_threads(void);
@@ -528,7 +529,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_printf(seq,
" frags: %d delay: %llu clone_skb: %d ifname: %s\n",
pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
- pkt_dev->clone_skb, pkt_dev->odev->name);
+ pkt_dev->clone_skb, pkt_dev->odevname);
seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
pkt_dev->lflow);
@@ -1688,13 +1689,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
seq_printf(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1818,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
{
struct pktgen_thread *t;
struct pktgen_dev *pkt_dev = NULL;
+ bool exact = (remove == FIND);
list_for_each_entry(t, &pktgen_threads, th_list) {
- pkt_dev = pktgen_find_dev(t, ifname);
+ pkt_dev = pktgen_find_dev(t, ifname, exact);
if (pkt_dev) {
if (remove) {
if_lock(t);
@@ -1994,7 +1996,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_min (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_min = ntxq - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2004,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_max (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_max = ntxq - 1;
}
@@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
if (!pkt_dev->running) {
printk(KERN_WARNING "pktgen: interface: %s is already "
- "stopped\n", pkt_dev->odev->name);
+ "stopped\n", pkt_dev->odevname);
return -EINVAL;
}
@@ -3464,7 +3466,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
default: /* Drivers are not supposed to return other values! */
if (net_ratelimit())
pr_info("pktgen: %s xmit error: %d\n",
- odev->name, ret);
+ pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
@@ -3566,13 +3568,18 @@ static int pktgen_thread_worker(void *arg)
}
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname)
+ const char *ifname, bool exact)
{
struct pktgen_dev *p, *pkt_dev = NULL;
- if_lock(t);
+ size_t len = strlen(ifname);
+ if_lock(t);
list_for_each_entry(p, &t->if_list, list)
- if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
+ if (strncmp(p->odevname, ifname, len) == 0) {
+ if (p->odevname[len]) {
+ if (exact || p->odevname[len] != '@')
+ continue;
+ }
pkt_dev = p;
break;
}
@@ -3628,6 +3635,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
if (!pkt_dev)
return -ENOMEM;
+ strcpy(pkt_dev->odevname, ifname);
pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
if (pkt_dev->flows == NULL) {
kfree(pkt_dev);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 80a96166df39..ec85681a7dd8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2701,7 +2701,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
NAPI_GRO_CB(skb)->free = 1;
goto done;
- }
+ } else if (skb_gro_len(p) != pinfo->gso_size)
+ return -E2BIG;
headroom = skb_headroom(p);
nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7db1de0497c6..887c03c4e3c6 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -10,7 +10,9 @@
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
+#include <linux/ratelimit.h>
#include <linux/init.h>
+
#include <net/ip.h>
#include <net/sock.h>
diff --git a/net/core/utils.c b/net/core/utils.c
index 83221aee7084..838250241d26 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -24,6 +24,8 @@
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/init.h>
+#include <linux/ratelimit.h>
+
#include <net/sock.h>
#include <asm/byteorder.h>
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 26b0ab1e9f56..2036568beea9 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -263,11 +263,10 @@ static int dn_def_dev_strategy(ctl_table *table,
return -ENODEV;
rv = -ENODEV;
- if (dev->dn_ptr != NULL) {
+ if (dev->dn_ptr != NULL)
rv = dn_dev_set_default(dev, 1);
- if (rv)
- dev_put(dev);
- }
+ if (rv)
+ dev_put(dev);
}
return rv;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd51ccd..d3fe10be7219 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -563,7 +563,7 @@ out_oversize:
printk(KERN_INFO "Oversized IP packet from %pI4.\n",
&qp->saddr);
out_fail:
- IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 08ccd344de7a..ae40ed1ba560 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -438,25 +438,27 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
- if (tiph->frag_off)
+ df |= old_iph->frag_off & htons(IP_DF);
+
+ if (df) {
mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
- else
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- if (mtu < 68) {
- stats->collisions++;
- ip_rt_put(rt);
- goto tx_error;
- }
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ if (mtu < 68) {
+ stats->collisions++;
+ ip_rt_put(rt);
+ goto tx_error;
+ }
- df |= (old_iph->frag_off&htons(IP_DF));
+ if (skb_dst(skb))
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
- if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- ip_rt_put(rt);
- goto tx_error;
+ if ((old_iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(old_iph->tot_len)) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ ip_rt_put(rt);
+ goto tx_error;
+ }
}
if (tunnel->err_count > 0) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 630a56df7b47..99508d66a642 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -483,8 +483,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
return -EINVAL;
}
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
+ if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+ dev_put(dev);
return -EADDRNOTAVAIL;
+ }
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 68afc6ecd343..fe1a64479dd0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -750,6 +750,8 @@ static int __init nf_nat_init(void)
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
+ BUG_ON(nf_ct_nat_offset != NULL);
+ rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
return 0;
cleanup_extend:
@@ -764,6 +766,7 @@ static void __exit nf_nat_cleanup(void)
nf_ct_extend_unregister(&nat_extend);
rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
+ rcu_assign_pointer(nf_ct_nat_offset, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 09172a65d9b6..f9520fa3aba9 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -73,6 +73,28 @@ adjust_tcp_sequence(u32 seq,
DUMP_OFFSET(this_way);
}
+/* Get the offset value, for conntrack */
+s16 nf_nat_get_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ struct nf_conn_nat *nat = nfct_nat(ct);
+ struct nf_nat_seq *this_way;
+ s16 offset;
+
+ if (!nat)
+ return 0;
+
+ this_way = &nat->seq[dir];
+ spin_lock_bh(&nf_nat_seqofs_lock);
+ offset = after(seq, this_way->correction_pos)
+ ? this_way->offset_after : this_way->offset_before;
+ spin_unlock_bh(&nf_nat_seqofs_lock);
+
+ return offset;
+}
+EXPORT_SYMBOL_GPL(nf_nat_get_offset);
+
/* Frobs data inside this packet, which is linear. */
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
@@ -189,11 +211,6 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
adjust_tcp_sequence(ntohl(tcph->seq),
(int)rep_len - (int)match_len,
ct, ctinfo);
- /* Tell TCP window tracking about seq change */
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
- ct, CTINFO2DIR(ctinfo),
- (int)rep_len - (int)match_len);
-
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
}
return 1;
@@ -415,12 +432,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph->seq = newseq;
tcph->ack_seq = newack;
- if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
- return 0;
-
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
-
- return 1;
+ return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
}
/* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 98440ad82558..f1813bc71088 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+ KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
#endif
if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Now that we have two receive queues this
* shouldn't happen.
*/
- if (before(*seq, TCP_SKB_CB(skb)->seq)) {
- printk(KERN_INFO "recvmsg bug: copied %X "
- "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+ KERN_INFO "recvmsg bug: copied %X "
+ "seq %X rcvnxt %X fl %X\n", *seq,
+ TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+ flags))
break;
- }
+
offset = *seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
- "copied %X seq %X\n", *seq,
- TCP_SKB_CB(skb)->seq);
+ "copied %X seq %X rcvnxt %X fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq,
+ tp->rcv_nxt, flags);
}
/* Well, if we have backlog, try to process it now yet. */
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bc064d7933ff..ce8e0e772bab 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -85,10 +85,6 @@ void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *r
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- /* stop HW Rx aggregation. ampdu_action existence
- * already verified in session init so we add the BUG_ON */
- BUG_ON(!local->ops->ampdu_action);
-
rcu_read_lock();
sta = sta_info_get(local, ra);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b09948ceec4a..89e238b001de 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -123,13 +123,18 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
ieee80211_tx_skb(sdata, skb, 0);
}
-static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
- enum ieee80211_back_parties initiator)
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+ enum ieee80211_back_parties initiator)
{
struct ieee80211_local *local = sta->local;
int ret;
u8 *state;
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
+ sta->sta.addr, tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
state = &sta->ampdu_mlme.tid_state_tx[tid];
if (*state == HT_AGG_STATE_OPERATIONAL)
@@ -143,7 +148,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
- *state = HT_AGG_STATE_OPERATIONAL;
/*
* We may have pending packets get stuck in this case...
* Not bothering with a workaround for now.
@@ -173,12 +177,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
/* check if the TID waits for addBA response */
spin_lock_bh(&sta->lock);
- if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+ if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+ HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "expecting addBA response there", tid);
+ "(or no longer) expecting addBA response there",
+ tid);
#endif
return;
}
@@ -523,11 +529,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
goto unlock;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
- sta->sta.addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
unlock:
@@ -543,7 +544,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
struct sta_info *sta;
int ret = 0;
- if (WARN_ON(!local->ops->ampdu_action))
+ if (!local->ops->ampdu_action)
return -EINVAL;
if (tid >= STA_TID_NUM)
@@ -666,21 +667,21 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
state = &sta->ampdu_mlme.tid_state_tx[tid];
- del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-
spin_lock_bh(&sta->lock);
if (!(*state & HT_ADDBA_REQUESTED_MSK))
- goto timer_still_needed;
+ goto out;
if (mgmt->u.action.u.addba_resp.dialog_token !=
sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- goto timer_still_needed;
+ goto out;
}
+ del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -699,10 +700,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
}
- goto out;
-
- timer_still_needed:
- add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
out:
spin_unlock_bh(&sta->lock);
}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 48ef1a282b91..cdc58e61d921 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -141,7 +141,6 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_mgmt *mgmt, size_t len)
{
- struct ieee80211_local *local = sdata->local;
u16 tid, params;
u16 initiator;
@@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */
spin_lock_bh(&sta->lock);
- sta->ampdu_mlme.tid_state_tx[tid] =
- HT_AGG_STATE_OPERATIONAL;
+ if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
+ ___ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_RECIPIENT);
spin_unlock_bh(&sta->lock);
- ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
- WLAN_BACK_RECIPIENT);
}
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 588005c84a6d..10d316e455de 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -662,6 +662,14 @@ struct ieee80211_local {
bool suspended;
/*
+ * Resuming is true while suspended, but when we're reprogramming the
+ * hardware -- at that time it's allowed to use ieee80211_queue_work()
+ * again even though some other parts of the stack are still suspended
+ * and we still drop received frames to avoid waking the stack.
+ */
+ bool resuming;
+
+ /*
* quiescing is true during the suspend process _only_ to
* ease timer cancelling etc.
*/
@@ -1083,6 +1091,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+ enum ieee80211_back_parties initiator);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index aeb65b3d2295..e6c08da8da26 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
*/
static bool ieee80211_can_queue_work(struct ieee80211_local *local)
{
- if (WARN(local->suspended, "queueing ieee80211 work while "
- "going to suspend\n"))
- return false;
+ if (WARN(local->suspended && !local->resuming,
+ "queueing ieee80211 work while going to suspend\n"))
+ return false;
return true;
}
@@ -1025,13 +1025,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct sta_info *sta;
unsigned long flags;
int res;
- bool from_suspend = local->suspended;
- /*
- * We're going to start the hardware, at that point
- * we are no longer suspended and can RX frames.
- */
- local->suspended = false;
+ if (local->suspended)
+ local->resuming = true;
/* restart hardware */
if (local->open_count) {
@@ -1129,11 +1125,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
- if (!from_suspend)
+ if (!local->suspended)
return 0;
#ifdef CONFIG_PM
+ /* first set suspended false, then resuming */
local->suspended = false;
+ mb();
+ local->resuming = false;
list_for_each_entry(sdata, &local->interfaces, list) {
switch(sdata->vif.type) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ca6e68dcd8a8..b9168c1864ca 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1351,6 +1351,11 @@ err_stat:
return ret;
}
+s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq);
+EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
+
int nf_conntrack_init(struct net *net)
{
int ret;
@@ -1368,6 +1373,9 @@ int nf_conntrack_init(struct net *net)
/* For use by REJECT target */
rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
+
+ /* Howto get NAT offsets */
+ rcu_assign_pointer(nf_ct_nat_offset, NULL);
}
return 0;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97a82ba75376..ba2b76937283 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -492,6 +492,21 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
+#ifdef CONFIG_NF_NAT_NEEDED
+static inline s16 nat_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
+
+ return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
+}
+#define NAT_OFFSET(pf, ct, dir, seq) \
+ (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
+#else
+#define NAT_OFFSET(pf, ct, dir, seq) 0
+#endif
+
static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
@@ -506,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
+ s16 receiver_offset;
bool res;
/*
@@ -519,11 +535,16 @@ static bool tcp_in_window(const struct nf_conn *ct,
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
+ /* Take into account NAT sequence number mangling */
+ receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
+ ack -= receiver_offset;
+ sack -= receiver_offset;
+
pr_debug("tcp_in_window: START\n");
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
- seq, ack, sack, win, end);
+ pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+ seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -613,8 +634,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
- seq, ack, sack, win, end);
+ pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+ seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -700,7 +721,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
before(seq, sender->td_maxend + 1) ?
after(end, sender->td_end - receiver->td_maxwin - 1) ?
before(sack, receiver->td_end + 1) ?
- after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
+ after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
@@ -715,39 +736,6 @@ static bool tcp_in_window(const struct nf_conn *ct,
return res;
}
-#ifdef CONFIG_NF_NAT_NEEDED
-/* Update sender->td_end after NAT successfully mangled the packet */
-/* Caller must linearize skb at tcp header. */
-void nf_conntrack_tcp_update(const struct sk_buff *skb,
- unsigned int dataoff,
- struct nf_conn *ct, int dir,
- s16 offset)
-{
- const struct tcphdr *tcph = (const void *)skb->data + dataoff;
- const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
- const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir];
- __u32 end;
-
- end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
-
- spin_lock_bh(&ct->lock);
- /*
- * We have to worry for the ack in the reply packet only...
- */
- if (ct->proto.tcp.seen[dir].td_end + offset == end)
- ct->proto.tcp.seen[dir].td_end = end;
- ct->proto.tcp.last_end = end;
- spin_unlock_bh(&ct->lock);
- pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
-#endif
-
#define TH_FIN 0x01
#define TH_SYN 0x02
#define TH_RST 0x04
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index c93494fef8ef..d65d3481919c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
#ifdef CONFIG_PROC_FS
static void *seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
{
- rcu_read_lock();
+ mutex_lock(&nf_log_mutex);
if (*pos >= ARRAY_SIZE(nf_loggers))
return NULL;
@@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void seq_stop(struct seq_file *s, void *v)
- __releases(RCU)
{
- rcu_read_unlock();
+ mutex_unlock(&nf_log_mutex);
}
static int seq_show(struct seq_file *s, void *v)
@@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
struct nf_logger *t;
int ret;
- logger = rcu_dereference(nf_loggers[*pos]);
+ logger = nf_loggers[*pos];
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
if (ret < 0)
return ret;
- mutex_lock(&nf_log_mutex);
list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
ret = seq_printf(s, "%s", t->name);
- if (ret < 0) {
- mutex_unlock(&nf_log_mutex);
+ if (ret < 0)
return ret;
- }
if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
ret = seq_printf(s, ",");
- if (ret < 0) {
- mutex_unlock(&nf_log_mutex);
+ if (ret < 0)
return ret;
- }
}
}
- mutex_unlock(&nf_log_mutex);
return seq_printf(s, ")\n");
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 680980954395..38f03f75a636 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -103,7 +103,7 @@ static int count_them(struct xt_connlimit_data *data,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
- const struct xt_match *match)
+ u_int8_t family)
{
const struct nf_conntrack_tuple_hash *found;
struct xt_connlimit_conn *conn;
@@ -113,8 +113,7 @@ static int count_them(struct xt_connlimit_data *data,
bool addit = true;
int matches = 0;
-
- if (match->family == NFPROTO_IPV6)
+ if (family == NFPROTO_IPV6)
hash = &data->iphash[connlimit_iphash6(addr, mask)];
else
hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
@@ -157,8 +156,7 @@ static int count_them(struct xt_connlimit_data *data,
continue;
}
- if (same_source_net(addr, mask, &conn->tuple.src.u3,
- match->family))
+ if (same_source_net(addr, mask, &conn->tuple.src.u3, family))
/* same source network -> be counted! */
++matches;
nf_ct_put(found_ct);
@@ -207,7 +205,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
spin_lock_bh(&info->data->lock);
connections = count_them(info->data, tuple_ptr, &addr,
- &info->mask, par->match);
+ &info->mask, par->family);
spin_unlock_bh(&info->data->lock);
if (connections < 0) {
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2e8089ecd0af..2773be6a71dd 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
- return -ENOMEM;
+ return false;
/* For SMP, we only want to use one set of state. */
r->master = priv;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 63e190504656..4d1a41bbd5d7 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
{
struct xt_osf_user_finger *f;
struct xt_osf_finger *sf;
- int err = ENOENT;
+ int err = -ENOENT;
if (!osf_attrs[OSF_ATTR_FINGER])
return -EINVAL;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ba2efb960c60..a001f7c1f711 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1189,6 +1189,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
#endif
static const struct file_operations rfkill_fops = {
+ .owner = THIS_MODULE,
.open = rfkill_fop_open,
.read = rfkill_fop_read,
.write = rfkill_fop_write,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 9478d9b3d977..f3e21989b88c 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -578,18 +578,18 @@ static int rose_clear_routes(void)
/*
* Check that the device given is a valid AX.25 interface that is "up".
+ * called whith RTNL
*/
-static struct net_device *rose_ax25_dev_get(char *devname)
+static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
- if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
+ if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
- dev_put(dev);
return NULL;
}
@@ -720,27 +720,23 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
- if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+ if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
- if (rose_dev_exists(&rose_route.address)) { /* Can't add routes to ourself */
- dev_put(dev);
+ if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
- }
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
- dev_put(dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
- if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+ if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
- dev_put(dev);
return err;
case SIOCRSCLRRT:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8450960df24f..7eed77a39d0d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1485,15 +1485,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
* local endpoint and the remote peer.
*/
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
- gfp_t gfp)
+ sctp_scope_t scope, gfp_t gfp)
{
- sctp_scope_t scope;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- scope = sctp_scope(&asoc->peer.active_path->ipaddr);
flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c9f20e28521b..23e5e97aa617 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
if ((reason == SCTP_RTXR_FAST_RTX &&
(chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
- /* If this chunk was sent less then 1 rto ago, do not
- * retransmit this chunk, but give the peer time
- * to acknowlege it. Do this only when
- * retransmitting due to T3 timeout.
- */
- if (reason == SCTP_RTXR_T3_RTX &&
- time_before(jiffies, chunk->sent_at +
- transport->last_rto))
- continue;
-
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 29d8501bf156..9bc1bfc64b69 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* that indicates that we have an outstanding HB.
*/
if (!is_hb || transport->hb_sent) {
- transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
}
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ba2f66d5f0cd..dcc2a84c647f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if (!new_asoc)
goto nomem;
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)),
+ GFP_ATOMIC) < 0)
+ goto nomem_init;
+
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
- if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
- goto nomem_init;
-
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_init;
@@ -1452,6 +1454,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
if (!new_asoc)
goto nomem;
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
+ goto nomem;
+
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1494,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
sizeof(sctp_chunkhdr_t);
}
- if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
- goto nomem;
-
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c8d05758661d..3a95fcb17a9e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
err = -ENOMEM;
goto out_free;
}
+
+ err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
+ GFP_KERNEL);
+ if (err < 0) {
+ goto out_free;
+ }
+
}
/* Prime the peer's transport structures. */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
walk_size += af->sockaddr_len;
}
- err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
- if (err < 0) {
- goto out_free;
- }
-
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
}
/*
- * New (hopefully final) interface for the API. The option buffer is used
- * both for the returned association id and the addresses.
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+ * can avoid any unnecessary allocations. The only defferent part
+ * is that we store the actual length of the address buffer into the
+ * addrs_num structure member. That way we can re-use the existing
+ * code.
*/
SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
char __user *optval,
int __user *optlen)
{
+ struct sctp_getaddrs_old param;
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(assoc_id))
+ if (len < sizeof(param))
return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+
err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)(optval + sizeof(assoc_id)),
- len - sizeof(assoc_id), &assoc_id);
+ (struct sockaddr __user *)param.addrs,
+ param.addr_num, &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
asoc = new_asoc;
+ err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
+ if (err < 0) {
+ err = -ENOMEM;
+ goto out_free;
+ }
/* If the SCTP_INIT ancillary data is specified, set all
* the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
err = -ENOMEM;
goto out_free;
}
- err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
- if (err < 0) {
- err = -ENOMEM;
- goto out_free;
- }
}
/* ASSERT: we have a valid association at this point. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c256e4839316..37a1184d789f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'.
*/
- peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
+ peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0;
peer->rttvar = 0;
peer->srtt = 0;
@@ -308,7 +308,8 @@ void sctp_transport_route(struct sctp_transport *transport,
/* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
- if (asoc && (transport == asoc->peer.active_path))
+ if (asoc && (!asoc->peer.primary_path ||
+ (transport == asoc->peer.active_path)))
opt->pf->af->to_sk_saddr(&transport->saddr,
asoc->base.sk);
} else
@@ -385,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max;
tp->rtt = rtt;
- tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent.
@@ -601,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t)
*/
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t->ssthresh = asoc->peer.i.a_rwnd;
- t->last_rto = t->rto = asoc->rto_initial;
+ t->rto = asoc->rto_initial;
t->rtt = 0;
t->srtt = 0;
t->rttvar = 0;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 22e8fd89477f..c7450c8f0a7c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
* @sap: buffer into which to plant socket address
* @salen: size of buffer
*
+ * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
+ * rpc_pton() require proper string termination to be successful.
+ *
* Returns the size of the socket address if successful; otherwise
* zero is returned.
*/
size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
struct sockaddr *sap, const size_t salen)
{
- char *c, buf[RPCBIND_MAXUADDRLEN];
+ char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
unsigned long portlo, porthi;
unsigned short port;
- if (uaddr_len > sizeof(buf))
+ if (uaddr_len > RPCBIND_MAXUADDRLEN)
return 0;
memcpy(buf, uaddr, uaddr_len);
- buf[uaddr_len] = '\n';
- buf[uaddr_len + 1] = '\0';
-
+ buf[uaddr_len] = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
if (unlikely(portlo > 255))
return 0;
- c[0] = '\n';
- c[1] = '\0';
-
+ *c = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
port = (unsigned short)((porthi << 8) | portlo);
- c[0] = '\0';
-
+ *c = '\0';
if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
return 0;