summaryrefslogtreecommitdiffstats
path: root/samples/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'samples/bpf')
-rw-r--r--samples/bpf/Makefile14
-rw-r--r--samples/bpf/bpf_helpers.h16
-rw-r--r--samples/bpf/bpf_load.c7
-rw-r--r--samples/bpf/libbpf.h8
-rw-r--r--samples/bpf/sampleip_kern.c38
-rw-r--r--samples/bpf/sampleip_user.c196
-rw-r--r--samples/bpf/sockex2_kern.c10
-rw-r--r--samples/bpf/sockex3_kern.c8
-rw-r--r--samples/bpf/sockex3_user.c4
-rw-r--r--samples/bpf/tcbpf2_kern.c381
-rw-r--r--samples/bpf/test_current_task_under_cgroup_kern.c43
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c145
-rwxr-xr-xsamples/bpf/test_ipip.sh178
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh167
-rw-r--r--samples/bpf/test_verifier.c888
-rw-r--r--samples/bpf/trace_event_kern.c65
-rw-r--r--samples/bpf/trace_event_user.c213
-rw-r--r--samples/bpf/tracex5_kern.c16
-rw-r--r--samples/bpf/tracex5_user.c3
19 files changed, 2369 insertions, 31 deletions
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 90ebf7d35c07..12b7304d55dc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -24,6 +24,9 @@ hostprogs-y += test_overhead
hostprogs-y += test_cgrp2_array_pin
hostprogs-y += xdp1
hostprogs-y += xdp2
+hostprogs-y += test_current_task_under_cgroup
+hostprogs-y += trace_event
+hostprogs-y += sampleip
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
@@ -49,6 +52,10 @@ test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
+test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
+ test_current_task_under_cgroup_user.o
+trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
+sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -64,6 +71,7 @@ always += tracex6_kern.o
always += test_probe_write_user_kern.o
always += trace_output_kern.o
always += tcbpf1_kern.o
+always += tcbpf2_kern.o
always += lathist_kern.o
always += offwaketime_kern.o
always += spintest_kern.o
@@ -74,6 +82,9 @@ always += parse_varlen.o parse_simple.o parse_ldabs.o
always += test_cgrp2_tc_kern.o
always += xdp1_kern.o
always += xdp2_kern.o
+always += test_current_task_under_cgroup_kern.o
+always += trace_event_kern.o
+always += sampleip_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -97,6 +108,9 @@ HOSTLOADLIBES_map_perf_test += -lelf -lrt
HOSTLOADLIBES_test_overhead += -lelf -lrt
HOSTLOADLIBES_xdp1 += -lelf
HOSTLOADLIBES_xdp2 += -lelf
+HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
+HOSTLOADLIBES_trace_event += -lelf
+HOSTLOADLIBES_sampleip += -lelf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 7927a090fa0d..90f44bd2045e 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -37,12 +37,26 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect;
-static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) =
+static int (*bpf_perf_event_output)(void *ctx, void *map,
+ unsigned long long flags, void *data,
+ int size) =
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid;
static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
(void *) BPF_FUNC_probe_write_user;
+static int (*bpf_current_task_under_cgroup)(void *map, int index) =
+ (void *) BPF_FUNC_current_task_under_cgroup;
+static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
+ (void *) BPF_FUNC_skb_get_tunnel_key;
+static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
+ (void *) BPF_FUNC_skb_set_tunnel_key;
+static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
+ (void *) BPF_FUNC_skb_get_tunnel_opt;
+static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
+ (void *) BPF_FUNC_skb_set_tunnel_opt;
+static unsigned long long (*bpf_get_prandom_u32)(void) =
+ (void *) BPF_FUNC_get_prandom_u32;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 0cfda2320320..97913e109b14 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -51,6 +51,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
bool is_xdp = strncmp(event, "xdp", 3) == 0;
+ bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
enum bpf_prog_type prog_type;
char buf[256];
int fd, efd, err, id;
@@ -69,6 +70,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_type = BPF_PROG_TYPE_TRACEPOINT;
} else if (is_xdp) {
prog_type = BPF_PROG_TYPE_XDP;
+ } else if (is_perf_event) {
+ prog_type = BPF_PROG_TYPE_PERF_EVENT;
} else {
printf("Unknown event '%s'\n", event);
return -1;
@@ -82,7 +85,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_fd[prog_cnt++] = fd;
- if (is_xdp)
+ if (is_xdp || is_perf_event)
return 0;
if (is_socket) {
@@ -326,6 +329,7 @@ int load_bpf_file(char *path)
memcmp(shname_prog, "kretprobe/", 10) == 0 ||
memcmp(shname_prog, "tracepoint/", 11) == 0 ||
memcmp(shname_prog, "xdp", 3) == 0 ||
+ memcmp(shname_prog, "perf_event", 10) == 0 ||
memcmp(shname_prog, "socket", 6) == 0)
load_and_attach(shname_prog, insns, data_prog->d_size);
}
@@ -344,6 +348,7 @@ int load_bpf_file(char *path)
memcmp(shname, "kretprobe/", 10) == 0 ||
memcmp(shname, "tracepoint/", 11) == 0 ||
memcmp(shname, "xdp", 3) == 0 ||
+ memcmp(shname, "perf_event", 10) == 0 ||
memcmp(shname, "socket", 6) == 0)
load_and_attach(shname, data->d_buf, data->d_size);
}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 364582b77888..ac6edb61b64a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -85,6 +85,14 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
.off = 0, \
.imm = IMM })
+#define BPF_MOV32_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
new file mode 100644
index 000000000000..774a681f374a
--- /dev/null
+++ b/samples/bpf/sampleip_kern.c
@@ -0,0 +1,38 @@
+/* Copyright 2016 Netflix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include "bpf_helpers.h"
+
+#define MAX_IPS 8192
+
+struct bpf_map_def SEC("maps") ip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(u32),
+ .max_entries = MAX_IPS,
+};
+
+SEC("perf_event")
+int do_sample(struct bpf_perf_event_data *ctx)
+{
+ u64 ip;
+ u32 *value, init_val = 1;
+
+ ip = ctx->regs.ip;
+ value = bpf_map_lookup_elem(&ip_map, &ip);
+ if (value)
+ *value += 1;
+ else
+ /* E2BIG not tested for this example only */
+ bpf_map_update_elem(&ip_map, &ip, &init_val, BPF_NOEXIST);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
new file mode 100644
index 000000000000..260a6bdd6413
--- /dev/null
+++ b/samples/bpf/sampleip_user.c
@@ -0,0 +1,196 @@
+/*
+ * sampleip: sample instruction pointer and frequency count in a BPF map.
+ *
+ * Copyright 2016 Netflix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define DEFAULT_FREQ 99
+#define DEFAULT_SECS 5
+#define MAX_IPS 8192
+#define PAGE_OFFSET 0xffff880000000000
+
+static int nr_cpus;
+
+static void usage(void)
+{
+ printf("USAGE: sampleip [-F freq] [duration]\n");
+ printf(" -F freq # sample frequency (Hertz), default 99\n");
+ printf(" duration # sampling duration (seconds), default 5\n");
+}
+
+static int sampling_start(int *pmu_fd, int freq)
+{
+ int i;
+
+ struct perf_event_attr pe_sample_attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .freq = 1,
+ .sample_period = freq,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .inherit = 1,
+ };
+
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd[i] = perf_event_open(&pe_sample_attr, -1 /* pid */, i,
+ -1 /* group_fd */, 0 /* flags */);
+ if (pmu_fd[i] < 0) {
+ fprintf(stderr, "ERROR: Initializing perf sampling\n");
+ return 1;
+ }
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF,
+ prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ }
+
+ return 0;
+}
+
+static void sampling_end(int *pmu_fd)
+{
+ int i;
+
+ for (i = 0; i < nr_cpus; i++)
+ close(pmu_fd[i]);
+}
+
+struct ipcount {
+ __u64 ip;
+ __u32 count;
+};
+
+/* used for sorting */
+struct ipcount counts[MAX_IPS];
+
+static int count_cmp(const void *p1, const void *p2)
+{
+ return ((struct ipcount *)p1)->count - ((struct ipcount *)p2)->count;
+}
+
+static void print_ip_map(int fd)
+{
+ struct ksym *sym;
+ __u64 key, next_key;
+ __u32 value;
+ int i, max;
+
+ printf("%-19s %-32s %s\n", "ADDR", "KSYM", "COUNT");
+
+ /* fetch IPs and counts */
+ key = 0, i = 0;
+ while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_lookup_elem(fd, &next_key, &value);
+ counts[i].ip = next_key;
+ counts[i++].count = value;
+ key = next_key;
+ }
+ max = i;
+
+ /* sort and print */
+ qsort(counts, max, sizeof(struct ipcount), count_cmp);
+ for (i = 0; i < max; i++) {
+ if (counts[i].ip > PAGE_OFFSET) {
+ sym = ksym_search(counts[i].ip);
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
+ counts[i].count);
+ } else {
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)",
+ counts[i].count);
+ }
+ }
+
+ if (max == MAX_IPS) {
+ printf("WARNING: IP hash was full (max %d entries); ", max);
+ printf("may have dropped samples\n");
+ }
+}
+
+static void int_exit(int sig)
+{
+ printf("\n");
+ print_ip_map(map_fd[0]);
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ int *pmu_fd, opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS;
+
+ /* process arguments */
+ while ((opt = getopt(argc, argv, "F:h")) != -1) {
+ switch (opt) {
+ case 'F':
+ freq = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage();
+ return 0;
+ }
+ }
+ if (argc - optind == 1)
+ secs = atoi(argv[optind]);
+ if (freq == 0 || secs == 0) {
+ usage();
+ return 1;
+ }
+
+ /* initialize kernel symbol translation */
+ if (load_kallsyms()) {
+ fprintf(stderr, "ERROR: loading /proc/kallsyms\n");
+ return 2;
+ }
+
+ /* create perf FDs for each CPU */
+ nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ pmu_fd = malloc(nr_cpus * sizeof(int));
+ if (pmu_fd == NULL) {
+ fprintf(stderr, "ERROR: malloc of pmu_fd\n");
+ return 1;
+ }
+
+ /* load BPF program */
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (load_bpf_file(filename)) {
+ fprintf(stderr, "ERROR: loading BPF program (errno %d):\n",
+ errno);
+ if (strcmp(bpf_log_buf, "") == 0)
+ fprintf(stderr, "Try: ulimit -l unlimited\n");
+ else
+ fprintf(stderr, "%s", bpf_log_buf);
+ return 1;
+ }
+ signal(SIGINT, int_exit);
+
+ /* do sampling */
+ printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
+ freq, secs);
+ if (sampling_start(pmu_fd, freq) != 0)
+ return 1;
+ sleep(secs);
+ sampling_end(pmu_fd);
+ free(pmu_fd);
+
+ /* output sample counts */
+ print_ip_map(map_fd[0]);
+
+ return 0;
+}
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
index ba0e177ff561..44e5846c988f 100644
--- a/samples/bpf/sockex2_kern.c
+++ b/samples/bpf/sockex2_kern.c
@@ -14,7 +14,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -59,7 +59,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct flow_keys *flow)
+ struct bpf_flow_keys *flow)
{
__u64 verlen;
@@ -83,7 +83,7 @@ static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto
}
static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct flow_keys *flow)
+ struct bpf_flow_keys *flow)
{
*ip_proto = load_byte(skb,
nhoff + offsetof(struct ipv6hdr, nexthdr));
@@ -96,7 +96,7 @@ static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_pro
return nhoff;
}
-static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow)
+static inline bool flow_dissector(struct __sk_buff *skb, struct bpf_flow_keys *flow)
{
__u64 nhoff = ETH_HLEN;
__u64 ip_proto;
@@ -198,7 +198,7 @@ struct bpf_map_def SEC("maps") hash_map = {
SEC("socket2")
int bpf_prog2(struct __sk_buff *skb)
{
- struct flow_keys flow;
+ struct bpf_flow_keys flow;
struct pair *value;
u32 key;
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index 41ae2fd21b13..95907f8d2b17 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -61,7 +61,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -88,7 +88,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
struct globals {
- struct flow_keys flow;
+ struct bpf_flow_keys flow;
};
struct bpf_map_def SEC("maps") percpu_map = {
@@ -114,14 +114,14 @@ struct pair {
struct bpf_map_def SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct flow_keys),
+ .key_size = sizeof(struct bpf_flow_keys),
.value_size = sizeof(struct pair),
.max_entries = 1024,
};
static void update_stats(struct __sk_buff *skb, struct globals *g)
{
- struct flow_keys key = g->flow;
+ struct bpf_flow_keys key = g->flow;
struct pair *value;
value = bpf_map_lookup_elem(&hash_map, &key);
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index d4184ab5f3ac..3fcfd8c4b2a3 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -7,7 +7,7 @@
#include <arpa/inet.h>
#include <sys/resource.h>
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -49,7 +49,7 @@ int main(int argc, char **argv)
(void) f;
for (i = 0; i < 5; i++) {
- struct flow_keys key = {}, next_key;
+ struct bpf_flow_keys key = {}, next_key;
struct pair value;
sleep(1);
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
new file mode 100644
index 000000000000..3303bb85593b
--- /dev/null
+++ b/samples/bpf/tcbpf2_kern.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2016 VMware
+ * Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include "bpf_helpers.h"
+
+#define _htonl __builtin_bswap32
+#define ERROR(ret) do {\
+ char fmt[] = "ERROR line:%d ret:%d\n";\
+ bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
+ } while(0)
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+ u8 length:5;
+ u8 r3:1;
+ u8 r2:1;
+ u8 r1:1;
+ u8 opt_data[8]; /* hard-coded to 8 byte */
+};
+
+struct vxlan_metadata {
+ u32 gbp;
+};
+
+SEC("gre_set_tunnel")
+int _gre_set_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("gre_get_tunnel")
+int _gre_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "key %d remote ip 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4);
+ return TC_ACT_OK;
+}
+
+SEC("vxlan_set_tunnel")
+int _vxlan_set_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
+ ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("vxlan_get_tunnel")
+int _vxlan_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+ char fmt[] = "key %d remote ip 0x%x vxlan gbp 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv4, md.gbp);
+
+ return TC_ACT_OK;
+}
+
+SEC("geneve_set_tunnel")
+int _geneve_set_tunnel(struct __sk_buff *skb)
+{
+ int ret, ret2;
+ struct bpf_tunnel_key key;
+ struct geneve_opt gopt;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ __builtin_memset(&gopt, 0x0, sizeof(gopt));
+ gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */
+ gopt.type = 0x08;
+ gopt.r1 = 1;
+ gopt.r2 = 0;
+ gopt.r3 = 1;
+ gopt.length = 2; /* 4-byte multiple */
+ *(int *) &gopt.opt_data = 0xdeadbeef;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("geneve_get_tunnel")
+int _geneve_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct geneve_opt gopt;
+ char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ return TC_ACT_OK;
+}
+
+SEC("ipip_set_tunnel")
+int _ipip_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ if (tcp->dest == htons(5200))
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ else if (tcp->dest == htons(5201))
+ key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
+ else
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip_get_tunnel")
+int _ipip_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_set_tunnel")
+int _ipip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) {
+ ERROR(iph->protocol);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_get_tunnel")
+int _ipip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_set_tunnel")
+int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct ipv6hdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->nexthdr == NEXTHDR_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->nexthdr != NEXTHDR_TCP) {
+ ERROR(iph->nexthdr);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_get_tunnel")
+int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c
new file mode 100644
index 000000000000..86b28d7d6c99
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_kern.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/utsname.h>
+
+struct bpf_map_def SEC("maps") cgroup_map = {
+ .type = BPF_MAP_TYPE_CGROUP_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") perf_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+
+/* Writes the last PID that called sync to a map at index 0 */
+SEC("kprobe/sys_sync")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ u64 pid = bpf_get_current_pid_tgid();
+ int idx = 0;
+
+ if (!bpf_current_task_under_cgroup(&cgroup_map, 0))
+ return 0;
+
+ bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
new file mode 100644
index 000000000000..30b0bce884f9
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -0,0 +1,145 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/bpf.h>
+#include <sched.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <linux/limits.h>
+
+#define CGROUP_MOUNT_PATH "/mnt"
+#define CGROUP_PATH "/mnt/my-cgroup"
+
+#define clean_errno() (errno == 0 ? "None" : strerror(errno))
+#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
+
+static int join_cgroup(char *path)
+{
+ int fd, rc = 0;
+ pid_t pid = getpid();
+ char cgroup_path[PATH_MAX + 1];
+
+ snprintf(cgroup_path, sizeof(cgroup_path), "%s/cgroup.procs", path);
+
+ fd = open(cgroup_path, O_WRONLY);
+ if (fd < 0) {
+ log_err("Opening Cgroup");
+ return 1;
+ }
+
+ if (dprintf(fd, "%d\n", pid) < 0) {
+ log_err("Joining Cgroup");
+ rc = 1;
+ }
+ close(fd);
+ return rc;
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ int cg2, idx = 0;
+ pid_t remote_pid, local_pid = getpid();
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ /*
+ * This is to avoid interfering with existing cgroups. Unfortunately,
+ * most people don't have cgroupv2 enabled at this point in time.
+ * It's easier to create our own mount namespace and manage it
+ * ourselves.
+ */
+ if (unshare(CLONE_NEWNS)) {
+ log_err("unshare");
+ return 1;
+ }
+
+ if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
+ log_err("mount fakeroot");
+ return 1;
+ }
+
+ if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL)) {
+ log_err("mount cgroup2");
+ return 1;
+ }
+
+ if (mkdir(CGROUP_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup");
+ return 1;
+ }
+
+ cg2 = open(CGROUP_PATH, O_RDONLY);
+ if (cg2 < 0) {
+ log_err("opening target cgroup");
+ goto cleanup_cgroup_err;
+ }
+
+ if (bpf_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
+ log_err("Adding target cgroup to map");
+ goto cleanup_cgroup_err;
+ }
+ if (join_cgroup("/mnt/my-cgroup")) {
+ log_err("Leaving target cgroup");
+ goto cleanup_cgroup_err;
+ }
+
+ /*
+ * The installed helper program catched the sync call, and should
+ * write it to the map.
+ */
+
+ sync();
+ bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid != remote_pid) {
+ fprintf(stderr,
+ "BPF Helper didn't write correct PID to map, but: %d\n",
+ remote_pid);
+ goto leave_cgroup_err;
+ }
+
+ /* Verify the negative scenario; leave the cgroup */
+ if (join_cgroup(CGROUP_MOUNT_PATH))
+ goto leave_cgroup_err;
+
+ remote_pid = 0;
+ bpf_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
+
+ sync();
+ bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid == remote_pid) {
+ fprintf(stderr, "BPF cgroup negative test did not work\n");
+ goto cleanup_cgroup_err;
+ }
+
+ rmdir(CGROUP_PATH);
+ return 0;
+
+ /* Error condition, cleanup */
+leave_cgroup_err:
+ join_cgroup(CGROUP_MOUNT_PATH);
+cleanup_cgroup_err:
+ rmdir(CGROUP_PATH);
+ return 1;
+}
diff --git a/samples/bpf/test_ipip.sh b/samples/bpf/test_ipip.sh
new file mode 100755
index 000000000000..196925403ab4
--- /dev/null
+++ b/samples/bpf/test_ipip.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+
+function config_device {
+ ip netns add at_ns0
+ ip netns add at_ns1
+ ip netns add at_ns2
+ ip link add veth0 type veth peer name veth0b
+ ip link add veth1 type veth peer name veth1b
+ ip link add veth2 type veth peer name veth2b
+ ip link set veth0b up
+ ip link set veth1b up
+ ip link set veth2b up
+ ip link set dev veth0b mtu 1500
+ ip link set dev veth1b mtu 1500
+ ip link set dev veth2b mtu 1500
+ ip link set veth0 netns at_ns0
+ ip link set veth1 netns at_ns1
+ ip link set veth2 netns at_ns2
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip netns exec at_ns1 ip addr add 172.16.1.101/24 dev veth1
+ ip netns exec at_ns1 ip addr add 2401:db00::2/64 dev veth1 nodad
+ ip netns exec at_ns1 ip link set dev veth1 up
+ ip netns exec at_ns2 ip addr add 172.16.1.200/24 dev veth2
+ ip netns exec at_ns2 ip addr add 2401:db00::3/64 dev veth2 nodad
+ ip netns exec at_ns2 ip link set dev veth2 up
+ ip link add br0 type bridge
+ ip link set br0 up
+ ip link set dev br0 mtu 1500
+ ip link set veth0b master br0
+ ip link set veth1b master br0
+ ip link set veth2b master br0
+}
+
+function add_ipip_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.101 remote 172.16.1.200
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ipip external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ipip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ip6ip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 2601:646::1/64
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 2601:646::1/64
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ip6ip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 2601:646::2/64
+}
+
+function attach_bpf {
+ DEV=$1
+ SET_TUNNEL=$2
+ GET_TUNNEL=$3
+ ip netns exec at_ns2 tc qdisc add dev $DEV clsact
+ ip netns exec at_ns2 tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+ ip netns exec at_ns2 tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_ipip {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip_tunnel
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv4 over IPv6 tunnel
+function test_ipip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip6_tunnel
+ attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv6 over IPv6 tunnel
+function test_ip6ip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ip6ip6_tunnel
+ attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
+
+ ip netns exec at_ns0 ping -6 -c 1 2601:646::2
+ ip netns exec at_ns2 ping -6 -c 1 2601:646::1
+ ip netns exec at_ns0 iperf -6sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -6sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5201
+ cleanup
+}
+
+function cleanup {
+ set +ex
+ pkill iperf
+ ip netns delete at_ns0
+ ip netns delete at_ns1
+ ip netns delete at_ns2
+ ip link del veth0
+ ip link del veth1
+ ip link del veth2
+ ip link del br0
+ pkill tcpdump
+ pkill cat
+ set -ex
+}
+
+cleanup
+echo "Testing IP tunnels..."
+test_ipip
+test_ipip6
+test_ip6ip6
+echo "*** PASS ***"
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
new file mode 100755
index 000000000000..1ff634f187b7
--- /dev/null
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# In Namespace 0 (at_ns0) using native tunnel
+# Overlay IP: 10.1.1.100
+# local 192.16.1.100 remote 192.16.1.200
+# veth0 IP: 172.16.1.100, tunnel dev <type>00
+
+# Out of Namespace using BPF set/get on lwtunnel
+# Overlay IP: 10.1.1.200
+# local 172.16.1.200 remote 172.16.1.100
+# veth1 IP: 172.16.1.200, tunnel dev <type>11
+
+function config_device {
+ ip netns add at_ns0
+ ip link add veth0 type veth peer name veth1
+ ip link set veth0 netns at_ns0
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip link set dev veth1 up mtu 1500
+ ip addr add dev veth1 172.16.1.200/24
+}
+
+function add_gre_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE key 2 local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE key 2 external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_vxlan_tunnel {
+ # Set static ARP entry here because iptables set-mark works
+ # on L3 packet, as a result not applying to ARP packets,
+ # causing errors at get_tunnel_{key/opt}.
+
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE id 2 dstport 4789 gbp remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00
+ ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external gbp dstport 4789
+ ip link set dev $DEV address 52:54:00:d9:02:00 up
+ ip addr add dev $DEV 10.1.1.200/24
+ arp -s 10.1.1.100 52:54:00:d9:01:00
+}
+
+function add_geneve_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE id 2 dstport 6081 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE dstport 6081 external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function attach_bpf {
+ DEV=$1
+ SET_TUNNEL=$2
+ GET_TUNNEL=$3
+ tc qdisc add dev $DEV clsact
+ tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+ tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_gre {
+ TYPE=gretap
+ DEV_NS=gretap00
+ DEV=gretap11
+ config_device
+ add_gre_tunnel
+ attach_bpf $DEV gre_set_tunnel gre_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_vxlan {
+ TYPE=vxlan
+ DEV_NS=vxlan00
+ DEV=vxlan11
+ config_device
+ add_vxlan_tunnel
+ attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_geneve {
+ TYPE=geneve
+ DEV_NS=geneve00
+ DEV=geneve11
+ config_device
+ add_geneve_tunnel
+ attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_ipip {
+ TYPE=ipip
+ DEV_NS=ipip00
+ DEV=ipip11
+ config_device
+ tcpdump -nei veth1 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+ add_ipip_tunnel
+ ethtool -K veth1 gso off gro off rx off tx off
+ ip link set dev veth1 mtu 1500
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ sleep 0.2
+ iperf -c 10.1.1.100 -n 5k -p 5200
+ cleanup
+}
+
+function cleanup {
+ set +ex
+ pkill iperf
+ ip netns delete at_ns0
+ ip link del veth1
+ ip link del ipip11
+ ip link del gretap11
+ ip link del geneve11
+ pkill tcpdump
+ pkill cat
+ set -ex
+}
+
+cleanup
+echo "Testing GRE tunnel..."
+test_gre
+echo "Testing VXLAN tunnel..."
+test_vxlan
+echo "Testing GENEVE tunnel..."
+test_geneve
+echo "Testing IPIP tunnel..."
+test_ipip
+echo "*** PASS ***"
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index fe2fcec98c1f..369ffaad3799 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -29,6 +29,7 @@ struct bpf_test {
struct bpf_insn insns[MAX_INSNS];
int fixup[MAX_FIXUPS];
int prog_array_fixup[MAX_FIXUPS];
+ int test_val_map_fixup[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
enum {
@@ -39,6 +40,19 @@ struct bpf_test {
enum bpf_prog_type prog_type;
};
+/* Note we want this to be 64 bit aligned so that the end of our array is
+ * actually the end of the structure.
+ */
+#define MAX_ENTRIES 11
+struct test_val {
+ unsigned index;
+ int foo[MAX_ENTRIES];
+};
+
+struct other_val {
+ unsigned int action[32];
+};
+
static struct bpf_test tests[] = {
{
"add+sub+mul",
@@ -291,6 +305,29 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
@@ -1210,6 +1247,54 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
@@ -1449,7 +1534,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test1",
+ "direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1466,7 +1551,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test2",
+ "direct packet access: test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
@@ -1499,7 +1584,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test3",
+ "direct packet access: test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1511,7 +1596,105 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
- "pkt: test4",
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1519,15 +1702,687 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "cannot write",
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {5},
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {1},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {11},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {7},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {6},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {5},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {1},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {11},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {7},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {6},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test14, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=inv expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test15, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test16, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test17, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test18, cls helper fail range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test19, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test20, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
.result = REJECT,
+ .errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+ {
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3, 11},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
@@ -1541,12 +2396,12 @@ static int probe_filter_length(struct bpf_insn *fp)
return len + 1;
}
-static int create_map(void)
+static int create_map(size_t val_size, int num)
{
int map_fd;
map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
- sizeof(long long), sizeof(long long), 1024, 0);
+ sizeof(long long), val_size, num, 0);
if (map_fd < 0)
printf("failed to create map '%s'\n", strerror(errno));
@@ -1576,12 +2431,13 @@ static int test(void)
int prog_len = probe_filter_length(prog);
int *fixup = tests[i].fixup;
int *prog_array_fixup = tests[i].prog_array_fixup;
+ int *test_val_map_fixup = tests[i].test_val_map_fixup;
int expected_result;
const char *expected_errstr;
- int map_fd = -1, prog_array_fd = -1;
+ int map_fd = -1, prog_array_fd = -1, test_val_map_fd = -1;
if (*fixup) {
- map_fd = create_map();
+ map_fd = create_map(sizeof(long long), 1024);
do {
prog[*fixup].imm = map_fd;
@@ -1596,6 +2452,18 @@ static int test(void)
prog_array_fixup++;
} while (*prog_array_fixup);
}
+ if (*test_val_map_fixup) {
+ /* Unprivileged can't create a hash map.*/
+ if (unpriv)
+ continue;
+ test_val_map_fd = create_map(sizeof(struct test_val),
+ 256);
+ do {
+ prog[*test_val_map_fixup].imm = test_val_map_fd;
+ test_val_map_fixup++;
+ } while (*test_val_map_fixup);
+ }
+
printf("#%d %s ", i, tests[i].descr);
prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
@@ -1642,6 +2510,8 @@ fail:
close(map_fd);
if (prog_array_fd >= 0)
close(prog_array_fd);
+ if (test_val_map_fd >= 0)
+ close(test_val_map_fd);
close(prog_fd);
}
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
new file mode 100644
index 000000000000..71a8ed32823e
--- /dev/null
+++ b/samples/bpf/trace_event_kern.c
@@ -0,0 +1,65 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include <uapi/linux/perf_event.h>
+#include "bpf_helpers.h"
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ u32 kernstack;
+ u32 userstack;
+};
+
+struct bpf_map_def SEC("maps") counts = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct key_t),
+ .value_size = sizeof(u64),
+ .max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(u32),
+ .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
+ .max_entries = 10000,
+};
+
+#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
+#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
+
+SEC("perf_event")
+int bpf_prog1(struct bpf_perf_event_data *ctx)
+{
+ char fmt[] = "CPU-%d period %lld ip %llx";
+ u32 cpu = bpf_get_smp_processor_id();
+ struct key_t key;
+ u64 *val, one = 1;
+
+ if (ctx->sample_period < 10000)
+ /* ignore warmup */
+ return 0;
+ bpf_get_current_comm(&key.comm, sizeof(key.comm));
+ key.kernstack = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS);
+ key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
+ if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
+ bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
+ ctx->regs.ip);
+ return 0;
+ }
+
+ val = bpf_map_lookup_elem(&counts, &key);
+ if (val)
+ (*val)++;
+ else
+ bpf_map_update_elem(&counts, &key, &one, BPF_NOEXIST);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
new file mode 100644
index 000000000000..9a130d31ecf2
--- /dev/null
+++ b/samples/bpf/trace_event_user.c
@@ -0,0 +1,213 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <signal.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/resource.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_FREQ 50
+
+static bool sys_read_seen, sys_write_seen;
+
+static void print_ksym(__u64 addr)
+{
+ struct ksym *sym;
+
+ if (!addr)
+ return;
+ sym = ksym_search(addr);
+ printf("%s;", sym->name);
+ if (!strcmp(sym->name, "sys_read"))
+ sys_read_seen = true;
+ else if (!strcmp(sym->name, "sys_write"))
+ sys_write_seen = true;
+}
+
+static void print_addr(__u64 addr)
+{
+ if (!addr)
+ return;
+ printf("%llx;", addr);
+}
+
+#define TASK_COMM_LEN 16
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ __u32 kernstack;
+ __u32 userstack;
+};
+
+static void print_stack(struct key_t *key, __u64 count)
+{
+ __u64 ip[PERF_MAX_STACK_DEPTH] = {};
+ static bool warned;
+ int i;
+
+ printf("%3lld %s;", count, key->comm);
+ if (bpf_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_ksym(ip[i]);
+ }
+ printf("-;");
+ if (bpf_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_addr(ip[i]);
+ }
+ printf("\n");
+
+ if (key->kernstack == -EEXIST && !warned) {
+ printf("stackmap collisions seen. Consider increasing size\n");
+ warned = true;
+ } else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
+ printf("err stackid %d %d\n", key->kernstack, key->userstack);
+ }
+}
+
+static void int_exit(int sig)
+{
+ kill(0, SIGKILL);
+ exit(0);
+}
+
+static void print_stacks(void)
+{
+ struct key_t key = {}, next_key;
+ __u64 value;
+ __u32 stackid = 0, next_id;
+ int fd = map_fd[0], stack_map = map_fd[1];
+
+ sys_read_seen = sys_write_seen = false;
+ while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_lookup_elem(fd, &next_key, &value);
+ print_stack(&next_key, value);
+ bpf_delete_elem(fd, &next_key);
+ key = next_key;
+ }
+
+ if (!sys_read_seen || !sys_write_seen) {
+ printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
+ int_exit(0);
+ }
+
+ /* clear stack map */
+ while (bpf_get_next_key(stack_map, &stackid, &next_id) == 0) {
+ bpf_delete_elem(stack_map, &next_id);
+ stackid = next_id;
+ }
+}
+
+static void test_perf_event_all_cpu(struct perf_event_attr *attr)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ int *pmu_fd = malloc(nr_cpus * sizeof(int));
+ int i;
+
+ /* open perf_event on all cpus */
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd[i] = perf_event_open(attr, -1, i, -1, 0);
+ if (pmu_fd[i] < 0) {
+ printf("perf_event_open failed\n");
+ goto all_cpu_err;
+ }
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ }
+ system("dd if=/dev/zero of=/dev/null count=5000k");
+ print_stacks();
+all_cpu_err:
+ for (i--; i >= 0; i--)
+ close(pmu_fd[i]);
+ free(pmu_fd);
+}
+
+static void test_perf_event_task(struct perf_event_attr *attr)
+{
+ int pmu_fd;
+
+ /* open task bound event */
+ pmu_fd = perf_event_open(attr, 0, -1, -1, 0);
+ if (pmu_fd < 0) {
+ printf("perf_event_open failed\n");
+ return;
+ }
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
+ system("dd if=/dev/zero of=/dev/null count=5000k");
+ print_stacks();
+ close(pmu_fd);
+}
+
+static void test_bpf_perf_event(void)
+{
+ struct perf_event_attr attr_type_hw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .inherit = 1,
+ };
+ struct perf_event_attr attr_type_sw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .inherit = 1,
+ };
+
+ test_perf_event_all_cpu(&attr_type_hw);
+ test_perf_event_task(&attr_type_hw);
+ test_perf_event_all_cpu(&attr_type_sw);
+ test_perf_event_task(&attr_type_sw);
+}
+
+
+int main(int argc, char **argv)
+{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ setrlimit(RLIMIT_MEMLOCK, &r);
+
+ signal(SIGINT, int_exit);
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ return 1;
+ }
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 2;
+ }
+
+ if (fork() == 0) {
+ read_trace_pipe();
+ return 0;
+ }
+ test_bpf_perf_event();
+
+ int_exit(0);
+ return 0;
+}
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index f95f232cbab9..fd12d7154d42 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -19,20 +19,18 @@ struct bpf_map_def SEC("maps") progs = {
.max_entries = 1024,
};
-SEC("kprobe/seccomp_phase1")
+SEC("kprobe/__seccomp_filter")
int bpf_prog1(struct pt_regs *ctx)
{
- struct seccomp_data sd;
-
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ int sc_nr = (int)PT_REGS_PARM1(ctx);
/* dispatch into next BPF program depending on syscall number */
- bpf_tail_call(ctx, &progs, sd.nr);
+ bpf_tail_call(ctx, &progs, sc_nr);
/* fall through -> unknown syscall */
- if (sd.nr >= __NR_getuid && sd.nr <= __NR_getsid) {
+ if (sc_nr >= __NR_getuid && sc_nr <= __NR_getsid) {
char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
- bpf_trace_printk(fmt, sizeof(fmt), sd.nr);
+ bpf_trace_printk(fmt, sizeof(fmt), sc_nr);
}
return 0;
}
@@ -42,7 +40,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] == 512) {
char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +53,7 @@ PROG(__NR_read)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] > 128 && sd.args[2] <= 1024) {
char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index a04dd3cd4358..36b5925bb137 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -6,6 +6,7 @@
#include <sys/prctl.h>
#include "libbpf.h"
#include "bpf_load.h"
+#include <sys/resource.h>
/* install fake seccomp program to enable seccomp code path inside the kernel,
* so that our kprobe attached to seccomp_phase1() can be triggered
@@ -27,8 +28,10 @@ int main(int ac, char **argv)
{
FILE *f;
char filename[256];
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ setrlimit(RLIMIT_MEMLOCK, &r);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);