Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH v5 bpf-next 0/3] enable BPF_PROG_TEST_RUN for raw_tp
@ 2020-09-24 23:02 Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint Song Liu
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Song Liu @ 2020-09-24 23:02 UTC (permalink / raw)
To: netdev, bpf; +Cc: kernel-team, ast, daniel, john.fastabend, kpsingh, Song Liu
This set enables BPF_PROG_TEST_RUN for raw_tracepoint type programs. This
set also enables running the raw_tp program on a specific CPU. This feature
can be used by user space to trigger programs that access percpu resources,
e.g. perf_event, percpu variables.
Changes v4 => v5:
1.Fail test_run with non-zero test.cpu but no BPF_F_TEST_RUN_ON_CPU.
(Andrii)
2. Add extra check for invalid test.cpu value. (Andrii)
3. Shuffle bpf_test_run_opts to remove holes. (Andrii)
4. Fixes in selftests. (Andrii)
Changes v3 => v4:
1. Use cpu+flags instead of cpu_plus. (Andrii)
2. Rework libbpf support. (Andrii)
Changes v2 => v3:
1. Fix memory leak in the selftest. (Andrii)
2. Use __u64 instead of unsigned long long. (Andrii)
Changes v1 => v2:
1. More checks for retval in the selftest. (John)
2. Remove unnecessary goto in bpf_prog_test_run_raw_tp. (John)
Song Liu (3):
bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint
libbpf: support test run of raw tracepoint programs
selftests/bpf: add raw_tp_test_run
include/linux/bpf.h | 3 +
include/uapi/linux/bpf.h | 7 ++
kernel/bpf/syscall.c | 2 +-
kernel/trace/bpf_trace.c | 1 +
net/bpf/test_run.c | 91 +++++++++++++++++
tools/include/uapi/linux/bpf.h | 7 ++
tools/lib/bpf/bpf.c | 31 ++++++
tools/lib/bpf/bpf.h | 26 +++++
tools/lib/bpf/libbpf.map | 1 +
tools/lib/bpf/libbpf_internal.h | 5 +
.../bpf/prog_tests/raw_tp_test_run.c | 98 +++++++++++++++++++
.../bpf/progs/test_raw_tp_test_run.c | 24 +++++
12 files changed, 295 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
create mode 100644 tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
--
2.24.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint
2020-09-24 23:02 [PATCH v5 bpf-next 0/3] enable BPF_PROG_TEST_RUN for raw_tp Song Liu
@ 2020-09-24 23:02 ` Song Liu
2020-09-25 17:21 ` Andrii Nakryiko
2020-09-24 23:02 ` [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run Song Liu
2 siblings, 1 reply; 10+ messages in thread
From: Song Liu @ 2020-09-24 23:02 UTC (permalink / raw)
To: netdev, bpf; +Cc: kernel-team, ast, daniel, john.fastabend, kpsingh, Song Liu
Add .test_run for raw_tracepoint. Also, introduce a new feature that runs
the target program on a specific CPU. This is achieved by a new flag in
bpf_attr.test, BPF_F_TEST_RUN_ON_CPU. When this flag is set, the program
is triggered on cpu with id bpf_attr.test.cpu. This feature is needed for
BPF programs that handle perf_event and other percpu resources, as the
program can access these resource locally.
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
---
include/linux/bpf.h | 3 ++
include/uapi/linux/bpf.h | 7 +++
kernel/bpf/syscall.c | 2 +-
kernel/trace/bpf_trace.c | 1 +
net/bpf/test_run.c | 91 ++++++++++++++++++++++++++++++++++
tools/include/uapi/linux/bpf.h | 7 +++
6 files changed, 110 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index fc5c901c75421..efa7245ed76e0 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1381,6 +1381,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a22812561064a..05e480f66f475 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -424,6 +424,11 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
/* enabled run_time_ns and run_cnt */
@@ -566,6 +571,8 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2740df19f55e9..3bc2ed2e171be 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2979,7 +2979,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
}
}
-#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
+#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
static int bpf_prog_test_run(const union bpf_attr *attr,
union bpf_attr __user *uattr)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 36508f46a8dbf..2834866d379ae 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1678,6 +1678,7 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
};
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
+ .test_run = bpf_prog_test_run_raw_tp,
};
const struct bpf_verifier_ops tracing_verifier_ops = {
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index a66f211726e7c..fde5db93507c4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -11,6 +11,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/error-injection.h>
+#include <linux/smp.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>
@@ -204,6 +205,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
int b = 2, err = -EFAULT;
u32 retval = 0;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
switch (prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
@@ -236,6 +240,87 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
return err;
}
+struct bpf_raw_tp_test_run_info {
+ struct bpf_prog *prog;
+ void *ctx;
+ u32 retval;
+};
+
+static void
+__bpf_prog_test_run_raw_tp(void *data)
+{
+ struct bpf_raw_tp_test_run_info *info = data;
+
+ rcu_read_lock();
+ migrate_disable();
+ info->retval = BPF_PROG_RUN(info->prog, info->ctx);
+ migrate_enable();
+ rcu_read_unlock();
+}
+
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
+ __u32 ctx_size_in = kattr->test.ctx_size_in;
+ struct bpf_raw_tp_test_run_info info;
+ int cpu = kattr->test.cpu, err = 0;
+
+ /* doesn't support data_in/out, ctx_out, duration, or repeat */
+ if (kattr->test.data_in || kattr->test.data_out ||
+ kattr->test.ctx_out || kattr->test.duration ||
+ kattr->test.repeat)
+ return -EINVAL;
+
+ if (ctx_size_in < prog->aux->max_ctx_offset)
+ return -EINVAL;
+
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
+ return -EINVAL;
+
+ if (ctx_size_in) {
+ info.ctx = kzalloc(ctx_size_in, GFP_USER);
+ if (!info.ctx)
+ return -ENOMEM;
+ if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ info.ctx = NULL;
+ }
+
+ info.prog = prog;
+
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
+ cpu == smp_processor_id()) {
+ __bpf_prog_test_run_raw_tp(&info);
+ } else {
+ /* smp_call_function_single() also checks cpu_online()
+ * after csd_lock(). However, since cpu is from user
+ * space, let's do an extra quick check to filter out
+ * invalid value before smp_call_function_single().
+ */
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
+ &info, 1);
+ if (err)
+ goto out;
+ }
+
+ if (copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
+ err = -EFAULT;
+
+out:
+ kfree(info.ctx);
+ return err;
+}
+
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
{
void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
@@ -410,6 +495,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data;
int ret;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (IS_ERR(data))
@@ -607,6 +695,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
return -EINVAL;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
if (size < ETH_HLEN)
return -EINVAL;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a22812561064a..05e480f66f475 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -424,6 +424,11 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
/* enabled run_time_ns and run_cnt */
@@ -566,6 +571,8 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
--
2.24.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs
2020-09-24 23:02 [PATCH v5 bpf-next 0/3] enable BPF_PROG_TEST_RUN for raw_tp Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint Song Liu
@ 2020-09-24 23:02 ` Song Liu
2020-09-25 17:22 ` Andrii Nakryiko
2020-09-24 23:02 ` [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run Song Liu
2 siblings, 1 reply; 10+ messages in thread
From: Song Liu @ 2020-09-24 23:02 UTC (permalink / raw)
To: netdev, bpf; +Cc: kernel-team, ast, daniel, john.fastabend, kpsingh, Song Liu
Add bpf_prog_test_run_opts() with support of new fields in bpf_attr.test,
namely, flags and cpu. Also extend _opts operations to support outputs via
opts.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
tools/lib/bpf/bpf.c | 31 +++++++++++++++++++++++++++++++
tools/lib/bpf/bpf.h | 26 ++++++++++++++++++++++++++
tools/lib/bpf/libbpf.map | 1 +
tools/lib/bpf/libbpf_internal.h | 5 +++++
4 files changed, 63 insertions(+)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 2baa1308737c8..c5a4d8444bf68 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -712,6 +712,37 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
return ret;
}
+int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
+{
+ union bpf_attr attr;
+ int ret;
+
+ if (!OPTS_VALID(opts, bpf_test_run_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.test.prog_fd = prog_fd;
+ attr.test.cpu = OPTS_GET(opts, cpu, 0);
+ attr.test.flags = OPTS_GET(opts, flags, 0);
+ attr.test.repeat = OPTS_GET(opts, repeat, 0);
+ attr.test.duration = OPTS_GET(opts, duration, 0);
+ attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
+ attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
+ attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
+ attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
+ attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
+ attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
+ attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
+ attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
+
+ ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+ OPTS_SET(opts, data_size_out, attr.test.data_size_out);
+ OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
+ OPTS_SET(opts, duration, attr.test.duration);
+ OPTS_SET(opts, retval, attr.test.retval);
+ return ret;
+}
+
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{
union bpf_attr attr;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 8c1ac4b42f908..4f3568e55527c 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -251,6 +251,32 @@ struct bpf_prog_bind_opts {
LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
const struct bpf_prog_bind_opts *opts);
+
+struct bpf_test_run_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ const void *data_in; /* optional */
+ void *data_out; /* optional */
+ __u32 data_size_in;
+ __u32 data_size_out; /* in: max length of data_out
+ * out: length of data_out
+ */
+ const void *ctx_in; /* optional */
+ void *ctx_out; /* optional */
+ __u32 ctx_size_in;
+ __u32 ctx_size_out; /* in: max length of ctx_out
+ * out: length of cxt_out
+ */
+ __u32 retval; /* out: return code of the BPF program */
+ int repeat;
+ __u32 duration; /* out: average per repetition in ns */
+ __u32 flags;
+ __u32 cpu;
+};
+#define bpf_test_run_opts__last_field cpu
+
+LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
+ struct bpf_test_run_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 5f054dadf0829..0623e7a99b1ec 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -303,6 +303,7 @@ LIBBPF_0.1.0 {
LIBBPF_0.2.0 {
global:
bpf_prog_bind_map;
+ bpf_prog_test_run_opts;
bpf_program__section_name;
perf_buffer__buffer_cnt;
perf_buffer__buffer_fd;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 4d1c366fca2ca..d2fff18f4cd12 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -136,6 +136,11 @@ static inline bool libbpf_validate_opts(const char *opts,
((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
#define OPTS_GET(opts, field, fallback_value) \
(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
+#define OPTS_SET(opts, field, value) \
+ do { \
+ if (OPTS_HAS(opts, field)) \
+ (opts)->field = value; \
+ } while (0)
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
--
2.24.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run
2020-09-24 23:02 [PATCH v5 bpf-next 0/3] enable BPF_PROG_TEST_RUN for raw_tp Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs Song Liu
@ 2020-09-24 23:02 ` Song Liu
2020-09-25 1:01 ` John Fastabend
2020-09-25 17:31 ` Andrii Nakryiko
2 siblings, 2 replies; 10+ messages in thread
From: Song Liu @ 2020-09-24 23:02 UTC (permalink / raw)
To: netdev, bpf; +Cc: kernel-team, ast, daniel, john.fastabend, kpsingh, Song Liu
This test runs test_run for raw_tracepoint program. The test covers ctx
input, retval output, and running on correct cpu.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
.../bpf/prog_tests/raw_tp_test_run.c | 98 +++++++++++++++++++
.../bpf/progs/test_raw_tp_test_run.c | 24 +++++
2 files changed, 122 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
create mode 100644 tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
new file mode 100644
index 0000000000000..5b07259781610
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+#include <linux/bpf.h>
+#include "bpf/libbpf_internal.h"
+#include "test_raw_tp_test_run.skel.h"
+
+static int duration;
+
+void test_raw_tp_test_run(void)
+{
+ struct bpf_prog_test_run_attr test_attr = {};
+ int comm_fd = -1, err, nr_online, i, prog_fd;
+ __u64 args[2] = {0x1234ULL, 0x5678ULL};
+ int expected_retval = 0x1234 + 0x5678;
+ struct test_raw_tp_test_run *skel;
+ char buf[] = "new_name";
+ bool *online = NULL;
+
+ err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
+ &nr_online);
+ if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err))
+ return;
+
+ skel = test_raw_tp_test_run__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ goto cleanup;
+
+ err = test_raw_tp_test_run__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
+ if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno))
+ goto cleanup;
+
+ err = write(comm_fd, buf, sizeof(buf));
+ CHECK(err < 0, "task rename", "err %d", errno);
+
+ CHECK(skel->bss->count == 0, "check_count", "didn't increase\n");
+ CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n");
+
+ prog_fd = bpf_program__fd(skel->progs.rename);
+ test_attr.prog_fd = prog_fd;
+ test_attr.ctx_in = args;
+ test_attr.ctx_size_in = sizeof(__u64);
+
+ err = bpf_prog_test_run_xattr(&test_attr);
+ CHECK(err == 0, "test_run", "should fail for too small ctx\n");
+
+ test_attr.ctx_size_in = sizeof(args);
+ err = bpf_prog_test_run_xattr(&test_attr);
+ CHECK(err < 0, "test_run", "err %d\n", errno);
+ CHECK(test_attr.retval != expected_retval, "check_retval",
+ "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
+
+ for (i = 0; i < nr_online; i++) {
+ if (online[i]) {
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .retval = 0,
+ .cpu = i,
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err < 0, "test_run_opts", "err %d\n", errno);
+ CHECK(skel->data->on_cpu != i, "check_on_cpu",
+ "expect %d got %d\n", i, skel->data->on_cpu);
+ CHECK(opts.retval != expected_retval,
+ "check_retval", "expect 0x%x, got 0x%x\n",
+ expected_retval, opts.retval);
+
+ if (i == 0) {
+ /* invalid cpu ID should fail with ENXIO */
+ opts.cpu = 0xffffffff;
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err != -1 || errno != ENXIO,
+ "test_run_opts_fail",
+ "should failed with ENXIO\n");
+ } else {
+ /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU
+ * should fail with EINVAL
+ */
+ opts.flags = 0;
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err != -1 || errno != EINVAL,
+ "test_run_opts_fail",
+ "should failed with EINVAL\n");
+ }
+ }
+ }
+cleanup:
+ close(comm_fd);
+ test_raw_tp_test_run__destroy(skel);
+ free(online);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c b/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
new file mode 100644
index 0000000000000..1521853597d70
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+__u32 count = 0;
+__u32 on_cpu = 0xffffffff;
+
+SEC("raw_tp/task_rename")
+int BPF_PROG(rename, struct task_struct *task, char *comm)
+{
+
+ count++;
+ if ((__u64) task == 0x1234ULL && (__u64) comm == 0x5678ULL) {
+ on_cpu = bpf_get_smp_processor_id();
+ return (int)task + (int)comm;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.24.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* RE: [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run
2020-09-24 23:02 ` [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run Song Liu
@ 2020-09-25 1:01 ` John Fastabend
2020-09-25 3:01 ` Song Liu
2020-09-25 17:31 ` Andrii Nakryiko
1 sibling, 1 reply; 10+ messages in thread
From: John Fastabend @ 2020-09-25 1:01 UTC (permalink / raw)
To: Song Liu, netdev, bpf
Cc: kernel-team, ast, daniel, john.fastabend, kpsingh, Song Liu
Song Liu wrote:
> This test runs test_run for raw_tracepoint program. The test covers ctx
> input, retval output, and running on correct cpu.
>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
[...]
> +void test_raw_tp_test_run(void)
> +{
> + struct bpf_prog_test_run_attr test_attr = {};
> + int comm_fd = -1, err, nr_online, i, prog_fd;
> + __u64 args[2] = {0x1234ULL, 0x5678ULL};
> + int expected_retval = 0x1234 + 0x5678;
> + struct test_raw_tp_test_run *skel;
> + char buf[] = "new_name";
> + bool *online = NULL;
> +
> + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
> + &nr_online);
> + if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err))
> + return;
> +
> + skel = test_raw_tp_test_run__open_and_load();
> + if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
> + goto cleanup;
> +
> + err = test_raw_tp_test_run__attach(skel);
> + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
> + goto cleanup;
> +
> + comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
> + if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno))
> + goto cleanup;
> +
> + err = write(comm_fd, buf, sizeof(buf));
> + CHECK(err < 0, "task rename", "err %d", errno);
> +
> + CHECK(skel->bss->count == 0, "check_count", "didn't increase\n");
> + CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n");
> +
> + prog_fd = bpf_program__fd(skel->progs.rename);
> + test_attr.prog_fd = prog_fd;
> + test_attr.ctx_in = args;
> + test_attr.ctx_size_in = sizeof(__u64);
> +
> + err = bpf_prog_test_run_xattr(&test_attr);
> + CHECK(err == 0, "test_run", "should fail for too small ctx\n");
> +
> + test_attr.ctx_size_in = sizeof(args);
> + err = bpf_prog_test_run_xattr(&test_attr);
> + CHECK(err < 0, "test_run", "err %d\n", errno);
> + CHECK(test_attr.retval != expected_retval, "check_retval",
> + "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
> +
> + for (i = 0; i < nr_online; i++) {
> + if (online[i]) {
> + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> + .ctx_in = args,
> + .ctx_size_in = sizeof(args),
> + .flags = BPF_F_TEST_RUN_ON_CPU,
> + .retval = 0,
> + .cpu = i,
> + );
> +
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err < 0, "test_run_opts", "err %d\n", errno);
> + CHECK(skel->data->on_cpu != i, "check_on_cpu",
> + "expect %d got %d\n", i, skel->data->on_cpu);
> + CHECK(opts.retval != expected_retval,
> + "check_retval", "expect 0x%x, got 0x%x\n",
> + expected_retval, opts.retval);
> +
> + if (i == 0) {
> + /* invalid cpu ID should fail with ENXIO */
> + opts.cpu = 0xffffffff;
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err != -1 || errno != ENXIO,
> + "test_run_opts_fail",
> + "should failed with ENXIO\n");
> + } else {
One more request...
How about pull this if/else branch out of the for loop here? It feels a bit
clumsy as-is imo. Also is it worthwhile to bang on the else branch for evey
cpu I would think testing for any non-zero value should be sufficient.
> + /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU
> + * should fail with EINVAL
> + */
> + opts.flags = 0;
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err != -1 || errno != EINVAL,
> + "test_run_opts_fail",
> + "should failed with EINVAL\n");
> + }
> + }
> + }
> +cleanup:
> + close(comm_fd);
> + test_raw_tp_test_run__destroy(skel);
> + free(online);
> +}
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run
2020-09-25 1:01 ` John Fastabend
@ 2020-09-25 3:01 ` Song Liu
0 siblings, 0 replies; 10+ messages in thread
From: Song Liu @ 2020-09-25 3:01 UTC (permalink / raw)
To: John Fastabend; +Cc: netdev, bpf, Kernel Team, ast, daniel, kpsingh
> On Sep 24, 2020, at 6:01 PM, John Fastabend <john.fastabend@gmail.com> wrote:
>
> Song Liu wrote:
>> This test runs test_run for raw_tracepoint program. The test covers ctx
>> input, retval output, and running on correct cpu.
>>
>> Signed-off-by: Song Liu <songliubraving@fb.com>
>> ---
>
> [...]
>
>> +void test_raw_tp_test_run(void)
>> +{
>> + struct bpf_prog_test_run_attr test_attr = {};
>> + int comm_fd = -1, err, nr_online, i, prog_fd;
>> + __u64 args[2] = {0x1234ULL, 0x5678ULL};
>> + int expected_retval = 0x1234 + 0x5678;
>> + struct test_raw_tp_test_run *skel;
>> + char buf[] = "new_name";
>> + bool *online = NULL;
>> +
>> + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
>> + &nr_online);
>> + if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err))
>> + return;
>> +
>> + skel = test_raw_tp_test_run__open_and_load();
>> + if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
>> + goto cleanup;
>> +
>> + err = test_raw_tp_test_run__attach(skel);
>> + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
>> + goto cleanup;
>> +
>> + comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
>> + if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno))
>> + goto cleanup;
>> +
>> + err = write(comm_fd, buf, sizeof(buf));
>> + CHECK(err < 0, "task rename", "err %d", errno);
>> +
>> + CHECK(skel->bss->count == 0, "check_count", "didn't increase\n");
>> + CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n");
>> +
>> + prog_fd = bpf_program__fd(skel->progs.rename);
>> + test_attr.prog_fd = prog_fd;
>> + test_attr.ctx_in = args;
>> + test_attr.ctx_size_in = sizeof(__u64);
>> +
>> + err = bpf_prog_test_run_xattr(&test_attr);
>> + CHECK(err == 0, "test_run", "should fail for too small ctx\n");
>> +
>> + test_attr.ctx_size_in = sizeof(args);
>> + err = bpf_prog_test_run_xattr(&test_attr);
>> + CHECK(err < 0, "test_run", "err %d\n", errno);
>> + CHECK(test_attr.retval != expected_retval, "check_retval",
>> + "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
>> +
>> + for (i = 0; i < nr_online; i++) {
>> + if (online[i]) {
>> + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
>> + .ctx_in = args,
>> + .ctx_size_in = sizeof(args),
>> + .flags = BPF_F_TEST_RUN_ON_CPU,
>> + .retval = 0,
>> + .cpu = i,
>> + );
>> +
>> + err = bpf_prog_test_run_opts(prog_fd, &opts);
>> + CHECK(err < 0, "test_run_opts", "err %d\n", errno);
>> + CHECK(skel->data->on_cpu != i, "check_on_cpu",
>> + "expect %d got %d\n", i, skel->data->on_cpu);
>> + CHECK(opts.retval != expected_retval,
>> + "check_retval", "expect 0x%x, got 0x%x\n",
>> + expected_retval, opts.retval);
>> +
>> + if (i == 0) {
>> + /* invalid cpu ID should fail with ENXIO */
>> + opts.cpu = 0xffffffff;
>> + err = bpf_prog_test_run_opts(prog_fd, &opts);
>> + CHECK(err != -1 || errno != ENXIO,
>> + "test_run_opts_fail",
>> + "should failed with ENXIO\n");
>> + } else {
>
> One more request...
>
> How about pull this if/else branch out of the for loop here? It feels a bit
> clumsy as-is imo. Also is it worthwhile to bang on the else branch for evey
> cpu I would think testing for any non-zero value should be sufficient.
I thought about both these two directions. The biggest benefit of current
version is that we can reuse the DECLARE_LIBBPF_OPTS() in this loop. Moving
it to the beginning of the function bothers me a little bit..
Thanks,
Song
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint
2020-09-24 23:02 ` [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint Song Liu
@ 2020-09-25 17:21 ` Andrii Nakryiko
0 siblings, 0 replies; 10+ messages in thread
From: Andrii Nakryiko @ 2020-09-25 17:21 UTC (permalink / raw)
To: Song Liu
Cc: Networking, bpf, Kernel Team, Alexei Starovoitov,
Daniel Borkmann, john fastabend, KP Singh
On Thu, Sep 24, 2020 at 4:03 PM Song Liu <songliubraving@fb.com> wrote:
>
> Add .test_run for raw_tracepoint. Also, introduce a new feature that runs
> the target program on a specific CPU. This is achieved by a new flag in
> bpf_attr.test, BPF_F_TEST_RUN_ON_CPU. When this flag is set, the program
> is triggered on cpu with id bpf_attr.test.cpu. This feature is needed for
> BPF programs that handle perf_event and other percpu resources, as the
> program can access these resource locally.
>
> Acked-by: John Fastabend <john.fastabend@gmail.com>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
LGTM.
Acked-by: Andrii Nakryiko <andriin@fb.com>
> include/linux/bpf.h | 3 ++
> include/uapi/linux/bpf.h | 7 +++
> kernel/bpf/syscall.c | 2 +-
> kernel/trace/bpf_trace.c | 1 +
> net/bpf/test_run.c | 91 ++++++++++++++++++++++++++++++++++
> tools/include/uapi/linux/bpf.h | 7 +++
> 6 files changed, 110 insertions(+), 1 deletion(-)
[...]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs
2020-09-24 23:02 ` [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs Song Liu
@ 2020-09-25 17:22 ` Andrii Nakryiko
0 siblings, 0 replies; 10+ messages in thread
From: Andrii Nakryiko @ 2020-09-25 17:22 UTC (permalink / raw)
To: Song Liu
Cc: Networking, bpf, Kernel Team, Alexei Starovoitov,
Daniel Borkmann, john fastabend, KP Singh
On Thu, Sep 24, 2020 at 4:03 PM Song Liu <songliubraving@fb.com> wrote:
>
> Add bpf_prog_test_run_opts() with support of new fields in bpf_attr.test,
> namely, flags and cpu. Also extend _opts operations to support outputs via
> opts.
>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
Looks nice!
Acked-by: Andrii Nakryiko <andriin@fb.com>
> tools/lib/bpf/bpf.c | 31 +++++++++++++++++++++++++++++++
> tools/lib/bpf/bpf.h | 26 ++++++++++++++++++++++++++
> tools/lib/bpf/libbpf.map | 1 +
> tools/lib/bpf/libbpf_internal.h | 5 +++++
> 4 files changed, 63 insertions(+)
[...]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run
2020-09-24 23:02 ` [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run Song Liu
2020-09-25 1:01 ` John Fastabend
@ 2020-09-25 17:31 ` Andrii Nakryiko
2020-09-25 19:49 ` Song Liu
1 sibling, 1 reply; 10+ messages in thread
From: Andrii Nakryiko @ 2020-09-25 17:31 UTC (permalink / raw)
To: Song Liu
Cc: Networking, bpf, Kernel Team, Alexei Starovoitov,
Daniel Borkmann, john fastabend, KP Singh
On Thu, Sep 24, 2020 at 4:03 PM Song Liu <songliubraving@fb.com> wrote:
>
> This test runs test_run for raw_tracepoint program. The test covers ctx
> input, retval output, and running on correct cpu.
>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
Few suggestions below, but overall looks good to me:
Acked-by: Andrii Nakryiko <andriin@fb.com>
> .../bpf/prog_tests/raw_tp_test_run.c | 98 +++++++++++++++++++
> .../bpf/progs/test_raw_tp_test_run.c | 24 +++++
> 2 files changed, 122 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
> create mode 100644 tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
>
[...]
> +
> + err = bpf_prog_test_run_xattr(&test_attr);
> + CHECK(err == 0, "test_run", "should fail for too small ctx\n");
> +
> + test_attr.ctx_size_in = sizeof(args);
> + err = bpf_prog_test_run_xattr(&test_attr);
> + CHECK(err < 0, "test_run", "err %d\n", errno);
> + CHECK(test_attr.retval != expected_retval, "check_retval",
> + "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
> +
> + for (i = 0; i < nr_online; i++) {
> + if (online[i]) {
if (!online[i])
continue;
That will reduce nestedness by one level
> + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> + .ctx_in = args,
> + .ctx_size_in = sizeof(args),
> + .flags = BPF_F_TEST_RUN_ON_CPU,
> + .retval = 0,
> + .cpu = i,
> + );
this declares variable, so should be at the top of the lexical scope
> +
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err < 0, "test_run_opts", "err %d\n", errno);
> + CHECK(skel->data->on_cpu != i, "check_on_cpu",
> + "expect %d got %d\n", i, skel->data->on_cpu);
> + CHECK(opts.retval != expected_retval,
> + "check_retval", "expect 0x%x, got 0x%x\n",
> + expected_retval, opts.retval);
> +
> + if (i == 0) {
I agree that this looks a bit obscure. You can still re-use
DECLARE_LIBBPF_OPTS, just move it outside the loop. And then you can
just modify it in place to adjust to a particular case. And in log
output, we'll see 30+ similar success messages for the else branch,
which is indeed unnecessary.
> + /* invalid cpu ID should fail with ENXIO */
> + opts.cpu = 0xffffffff;
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err != -1 || errno != ENXIO,
> + "test_run_opts_fail",
> + "should failed with ENXIO\n");
> + } else {
> + /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU
> + * should fail with EINVAL
> + */
> + opts.flags = 0;
> + err = bpf_prog_test_run_opts(prog_fd, &opts);
> + CHECK(err != -1 || errno != EINVAL,
> + "test_run_opts_fail",
> + "should failed with EINVAL\n");
> + }
> + }
> + }
[...]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run
2020-09-25 17:31 ` Andrii Nakryiko
@ 2020-09-25 19:49 ` Song Liu
0 siblings, 0 replies; 10+ messages in thread
From: Song Liu @ 2020-09-25 19:49 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Networking, bpf, Kernel Team, Alexei Starovoitov,
Daniel Borkmann, john fastabend, KP Singh
> On Sep 25, 2020, at 10:31 AM, Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote:
>
> On Thu, Sep 24, 2020 at 4:03 PM Song Liu <songliubraving@fb.com> wrote:
>>
>> This test runs test_run for raw_tracepoint program. The test covers ctx
>> input, retval output, and running on correct cpu.
>>
>> Signed-off-by: Song Liu <songliubraving@fb.com>
>> ---
>
> Few suggestions below, but overall looks good to me:
>
> Acked-by: Andrii Nakryiko <andriin@fb.com>
>
>> .../bpf/prog_tests/raw_tp_test_run.c | 98 +++++++++++++++++++
>> .../bpf/progs/test_raw_tp_test_run.c | 24 +++++
>> 2 files changed, 122 insertions(+)
>> create mode 100644 tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
>> create mode 100644 tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
>>
>
> [...]
>
>> +
>> + err = bpf_prog_test_run_xattr(&test_attr);
>> + CHECK(err == 0, "test_run", "should fail for too small ctx\n");
>> +
>> + test_attr.ctx_size_in = sizeof(args);
>> + err = bpf_prog_test_run_xattr(&test_attr);
>> + CHECK(err < 0, "test_run", "err %d\n", errno);
>> + CHECK(test_attr.retval != expected_retval, "check_retval",
>> + "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
>> +
>> + for (i = 0; i < nr_online; i++) {
>> + if (online[i]) {
>
> if (!online[i])
> continue;
>
> That will reduce nestedness by one level
>
>> + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
>> + .ctx_in = args,
>> + .ctx_size_in = sizeof(args),
>> + .flags = BPF_F_TEST_RUN_ON_CPU,
>> + .retval = 0,
>> + .cpu = i,
>> + );
>
> this declares variable, so should be at the top of the lexical scope
>
>
>> +
>> + err = bpf_prog_test_run_opts(prog_fd, &opts);
>> + CHECK(err < 0, "test_run_opts", "err %d\n", errno);
>> + CHECK(skel->data->on_cpu != i, "check_on_cpu",
>> + "expect %d got %d\n", i, skel->data->on_cpu);
>> + CHECK(opts.retval != expected_retval,
>> + "check_retval", "expect 0x%x, got 0x%x\n",
>> + expected_retval, opts.retval);
>> +
>> + if (i == 0) {
>
> I agree that this looks a bit obscure. You can still re-use
> DECLARE_LIBBPF_OPTS, just move it outside the loop. And then you can
> just modify it in place to adjust to a particular case. And in log
> output, we'll see 30+ similar success messages for the else branch,
> which is indeed unnecessary.
OK.. 2:1, I will change this in v6.
Thanks,
Song
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2020-09-25 20:19 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-24 23:02 [PATCH v5 bpf-next 0/3] enable BPF_PROG_TEST_RUN for raw_tp Song Liu
2020-09-24 23:02 ` [PATCH v5 bpf-next 1/3] bpf: enable BPF_PROG_TEST_RUN for raw_tracepoint Song Liu
2020-09-25 17:21 ` Andrii Nakryiko
2020-09-24 23:02 ` [PATCH v5 bpf-next 2/3] libbpf: support test run of raw tracepoint programs Song Liu
2020-09-25 17:22 ` Andrii Nakryiko
2020-09-24 23:02 ` [PATCH v5 bpf-next 3/3] selftests/bpf: add raw_tp_test_run Song Liu
2020-09-25 1:01 ` John Fastabend
2020-09-25 3:01 ` Song Liu
2020-09-25 17:31 ` Andrii Nakryiko
2020-09-25 19:49 ` Song Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).