summaryrefslogtreecommitdiffstats
path: root/net/netfilter/nfnetlink_queue.c
diff options
context:
space:
mode:
authorPablo Neira Ayuso2012-06-07 13:31:25 +0200
committerPablo Neira Ayuso2012-06-16 15:09:08 +0200
commit8c88f87cb27ad09086940bdd3e6955e5325ec89a (patch)
tree1acfa54ba78602eb4eb053110ffc0a54025318c7 /net/netfilter/nfnetlink_queue.c
parentnetfilter: add glue code to integrate nfnetlink_queue and ctnetlink (diff)
downloadkernel-qcow2-linux-8c88f87cb27ad09086940bdd3e6955e5325ec89a.tar.gz
kernel-qcow2-linux-8c88f87cb27ad09086940bdd3e6955e5325ec89a.tar.xz
kernel-qcow2-linux-8c88f87cb27ad09086940bdd3e6955e5325ec89a.zip
netfilter: nfnetlink_queue: add NAT TCP sequence adjustment if packet mangled
User-space programs that receive traffic via NFQUEUE may mangle packets. If NAT is enabled, this usually puzzles sequence tracking, leading to traffic disruptions. With this patch, nfnl_queue will make the corresponding NAT TCP sequence adjustment if: 1) The packet has been mangled, 2) the NFQA_CFG_F_CONNTRACK flag has been set, and 3) NAT is detected. There are some records on the Internet complaning about this issue: http://stackoverflow.com/questions/260757/packet-mangling-utilities-besides-iptables By now, we only support TCP since we have no helpers for DCCP or SCTP. Better to add this if we ever have some helper over those layer 4 protocols. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nfnetlink_queue.c')
-rw-r--r--net/netfilter/nfnetlink_queue.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 647923ae9230..ff82c7933dfd 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -502,12 +502,10 @@ err_out:
}
static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
+nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
{
struct sk_buff *nskb;
- int diff;
- diff = data_len - e->skb->len;
if (diff < 0) {
if (pskb_trim(e->skb, data_len))
return -ENOMEM;
@@ -767,6 +765,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
unsigned int verdict;
struct nf_queue_entry *entry;
struct nfq_ct_hook *nfq_ct;
+ enum ip_conntrack_info uninitialized_var(ctinfo);
+ struct nf_conn *ct = NULL;
queue = instance_lookup(queue_num);
if (!queue)
@@ -789,20 +789,23 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
nfq_ct = rcu_dereference(nfq_ct_hook);
if (nfq_ct != NULL &&
(queue->flags & NFQA_CFG_F_CONNTRACK) && nfqa[NFQA_CT]) {
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
-
ct = nf_ct_get(entry->skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct))
nfq_ct->parse(nfqa[NFQA_CT], ct);
}
- rcu_read_unlock();
if (nfqa[NFQA_PAYLOAD]) {
+ u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
+ int diff = payload_len - entry->skb->len;
+
if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
- nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
+ payload_len, entry, diff) < 0)
verdict = NF_DROP;
+
+ if (ct && (ct->status & IPS_NAT_MASK) && diff)
+ nfq_ct->seq_adjust(skb, ct, ctinfo, diff);
}
+ rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));