summaryrefslogtreecommitdiffstats
path: root/net/netfilter/nf_flow_table.c
diff options
context:
space:
mode:
authorPablo Neira Ayuso2018-02-06 13:22:47 +0100
committerPablo Neira Ayuso2018-02-07 00:58:57 +0100
commitb408c5b04f82fe4e20bceb8e4f219453d4f21f02 (patch)
treecd33d1bcfebc1bc0bdddef0379b57eb28ef20131 /net/netfilter/nf_flow_table.c
parentnetfilter: nft_flow_offload: move flowtable cleanup routines to nf_flow_table (diff)
downloadkernel-qcow2-linux-b408c5b04f82fe4e20bceb8e4f219453d4f21f02.tar.gz
kernel-qcow2-linux-b408c5b04f82fe4e20bceb8e4f219453d4f21f02.tar.xz
kernel-qcow2-linux-b408c5b04f82fe4e20bceb8e4f219453d4f21f02.zip
netfilter: nf_tables: fix flowtable free
Every flow_offload entry is added into the table twice. Because of this, rhashtable_free_and_destroy can't be used, since it would call kfree for each flow_offload object twice. This patch cleans up the flowtable via nf_flow_table_iterate() to schedule removal of entries by setting on the dying bit, then there is an explicitly invocation of the garbage collector to release resources. Based on patch from Felix Fietkau. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_flow_table.c')
-rw-r--r--net/netfilter/nf_flow_table.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c
index 04c08f6b9015..c17f1af42daa 100644
--- a/net/netfilter/nf_flow_table.c
+++ b/net/netfilter/nf_flow_table.c
@@ -232,19 +232,16 @@ static inline bool nf_flow_is_dying(const struct flow_offload *flow)
return flow->flags & FLOW_OFFLOAD_DYING;
}
-void nf_flow_offload_work_gc(struct work_struct *work)
+static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{
struct flow_offload_tuple_rhash *tuplehash;
- struct nf_flowtable *flow_table;
struct rhashtable_iter hti;
struct flow_offload *flow;
int err;
- flow_table = container_of(work, struct nf_flowtable, gc_work.work);
-
err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
if (err)
- goto schedule;
+ return 0;
rhashtable_walk_start(&hti);
@@ -270,7 +267,16 @@ void nf_flow_offload_work_gc(struct work_struct *work)
out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
-schedule:
+
+ return 1;
+}
+
+void nf_flow_offload_work_gc(struct work_struct *work)
+{
+ struct nf_flowtable *flow_table;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+ nf_flow_offload_gc_step(flow_table);
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
@@ -449,5 +455,12 @@ void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
}
EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
+void nf_flow_table_free(struct nf_flowtable *flow_table)
+{
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ WARN_ON(!nf_flow_offload_gc_step(flow_table));
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_free);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");