summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/bpf/verifier.c16
-rw-r--r--net/core/filter.c21
2 files changed, 35 insertions, 2 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d690c7dd1f1a..477b6932c3c1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4203,6 +4203,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
continue;
}
+ if (insn->imm == BPF_FUNC_redirect_map) {
+ u64 addr = (unsigned long)prog;
+ struct bpf_insn r4_ld[] = {
+ BPF_LD_IMM64(BPF_REG_4, addr),
+ *insn,
+ };
+ cnt = ARRAY_SIZE(r4_ld);
+
+ new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ }
patch_call_imm:
fn = prog->aux->ops->get_func_proto(insn->imm);
/* all functions that have prototype and verifier allowed
diff --git a/net/core/filter.c b/net/core/filter.c
index 5912c738a7b2..0848df2cd9bf 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1794,6 +1794,7 @@ struct redirect_info {
u32 flags;
struct bpf_map *map;
struct bpf_map *map_to_flush;
+ const struct bpf_prog *map_owner;
};
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -1807,7 +1808,6 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
ri->ifindex = ifindex;
ri->flags = flags;
- ri->map = NULL;
return TC_ACT_REDIRECT;
}
@@ -2504,6 +2504,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ const struct bpf_prog *map_owner = ri->map_owner;
struct bpf_map *map = ri->map;
u32 index = ri->ifindex;
struct net_device *fwd;
@@ -2511,6 +2512,15 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
ri->ifindex = 0;
ri->map = NULL;
+ ri->map_owner = NULL;
+
+ /* This is really only caused by a deliberately crappy
+ * BPF program, normally we would never hit that case,
+ * so no need to inform someone via tracepoints either,
+ * just bail out.
+ */
+ if (unlikely(map_owner != xdp_prog))
+ return -EINVAL;
fwd = __dev_map_lookup_elem(map, index);
if (!fwd) {
@@ -2607,6 +2617,8 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
ri->ifindex = ifindex;
ri->flags = flags;
+ ri->map = NULL;
+ ri->map_owner = NULL;
return XDP_REDIRECT;
}
@@ -2619,7 +2631,8 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
+BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
+ const struct bpf_prog *, map_owner)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
@@ -2629,10 +2642,14 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags
ri->ifindex = ifindex;
ri->flags = flags;
ri->map = map;
+ ri->map_owner = map_owner;
return XDP_REDIRECT;
}
+/* Note, arg4 is hidden from users and populated by the verifier
+ * with the right pointer.
+ */
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
.func = bpf_xdp_redirect_map,
.gpl_only = false,