summaryrefslogtreecommitdiffstats
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorYonghong Song2017-10-24 08:53:08 +0200
committerDavid S. Miller2017-10-25 03:47:47 +0200
commite87c6bc3852b981e71c757be20771546ce9f76f3 (patch)
treebad3be630137d8e873f4ad5a1ea77b4aa1853184 /include/linux/bpf.h
parentbpf: use the same condition in perf event set/free bpf handler (diff)
downloadkernel-qcow2-linux-e87c6bc3852b981e71c757be20771546ce9f76f3.tar.gz
kernel-qcow2-linux-e87c6bc3852b981e71c757be20771546ce9f76f3.tar.xz
kernel-qcow2-linux-e87c6bc3852b981e71c757be20771546ce9f76f3.zip
bpf: permit multiple bpf attachments for a single perf event
This patch enables multiple bpf attachments for a kprobe/uprobe/tracepoint single trace event. Each trace_event keeps a list of attached perf events. When an event happens, all attached bpf programs will be executed based on the order of attachment. A global bpf_event_mutex lock is introduced to protect prog_array attaching and detaching. An alternative will be introduce a mutex lock in every trace_event_call structure, but it takes a lot of extra memory. So a global bpf_event_mutex lock is a good compromise. The bpf prog detachment involves allocation of memory. If the allocation fails, a dummy do-nothing program will replace to-be-detached program in-place. Signed-off-by: Yonghong Song <yhs@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h30
1 files changed, 25 insertions, 5 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1e334b248ff6..172be7faf7ba 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -273,18 +273,38 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt);
-#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
+void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+ struct bpf_prog *old_prog);
+int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ struct bpf_prog *exclude_prog,
+ struct bpf_prog *include_prog,
+ struct bpf_prog_array **new_array);
+
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
- struct bpf_prog **_prog; \
+ struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array *_array; \
u32 _ret = 1; \
rcu_read_lock(); \
- _prog = rcu_dereference(array)->progs; \
- for (; *_prog; _prog++) \
- _ret &= func(*_prog, ctx); \
+ _array = rcu_dereference(array); \
+ if (unlikely(check_non_null && !_array))\
+ goto _out; \
+ _prog = _array->progs; \
+ while ((__prog = READ_ONCE(*_prog))) { \
+ _ret &= func(__prog, ctx); \
+ _prog++; \
+ } \
+_out: \
rcu_read_unlock(); \
_ret; \
})
+#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+
+#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);