summaryrefslogtreecommitdiffstats
path: root/net/core/filter.c
diff options
context:
space:
mode:
authorJohn Fastabend2017-07-17 18:29:40 +0200
committerDavid S. Miller2017-07-17 18:48:06 +0200
commit11393cc9b9be2a1f61559e6fb9c27bc8fa20b1ff (patch)
tree9983db89954e9d34d04d16fa023e5df752efd90d /net/core/filter.c
parentbpf: add bpf_redirect_map helper routine (diff)
downloadkernel-qcow2-linux-11393cc9b9be2a1f61559e6fb9c27bc8fa20b1ff.tar.gz
kernel-qcow2-linux-11393cc9b9be2a1f61559e6fb9c27bc8fa20b1ff.tar.xz
kernel-qcow2-linux-11393cc9b9be2a1f61559e6fb9c27bc8fa20b1ff.zip
xdp: Add batching support to redirect map
For performance reasons we want to avoid updating the tail pointer in the driver tx ring as much as possible. To accomplish this we add batching support to the redirect path in XDP. This adds another ndo op "xdp_flush" that is used to inform the driver that it should bump the tail pointer on the TX ring. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/filter.c')
-rw-r--r--net/core/filter.c55
1 files changed, 45 insertions, 10 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index e93a558324b5..e23aa6fa1119 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1780,6 +1780,7 @@ struct redirect_info {
u32 ifindex;
u32 flags;
struct bpf_map *map;
+ struct bpf_map *map_to_flush;
};
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -2438,34 +2439,68 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
.arg2_type = ARG_ANYTHING,
};
-static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp)
+static int __bpf_tx_xdp(struct net_device *dev,
+ struct bpf_map *map,
+ struct xdp_buff *xdp,
+ u32 index)
{
- if (dev->netdev_ops->ndo_xdp_xmit) {
- dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
- return 0;
+ int err;
+
+ if (!dev->netdev_ops->ndo_xdp_xmit) {
+ bpf_warn_invalid_xdp_redirect(dev->ifindex);
+ return -EOPNOTSUPP;
}
- bpf_warn_invalid_xdp_redirect(dev->ifindex);
- return -EOPNOTSUPP;
+
+ err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
+ if (err)
+ return err;
+
+ if (map)
+ __dev_map_insert_ctx(map, index);
+ else
+ dev->netdev_ops->ndo_xdp_flush(dev);
+
+ return err;
}
+void xdp_do_flush_map(void)
+{
+ struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_map *map = ri->map_to_flush;
+
+ ri->map = NULL;
+ ri->map_to_flush = NULL;
+
+ if (map)
+ __dev_map_flush(map);
+}
+EXPORT_SYMBOL_GPL(xdp_do_flush_map);
+
int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
struct bpf_map *map = ri->map;
+ u32 index = ri->ifindex;
struct net_device *fwd;
int err = -EINVAL;
ri->ifindex = 0;
ri->map = NULL;
- fwd = __dev_map_lookup_elem(map, ri->ifindex);
+ fwd = __dev_map_lookup_elem(map, index);
if (!fwd)
goto out;
- trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
- err = __bpf_tx_xdp(fwd, xdp);
+ if (ri->map_to_flush && (ri->map_to_flush != map))
+ xdp_do_flush_map();
+
+ err = __bpf_tx_xdp(fwd, map, xdp, index);
+ if (likely(!err))
+ ri->map_to_flush = map;
+
out:
+ trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
return err;
}
@@ -2488,7 +2523,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
- return __bpf_tx_xdp(fwd, xdp);
+ return __bpf_tx_xdp(fwd, NULL, xdp, 0);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);