* [PATCH bpf-next 01/13] bpf/tests: Allow different number of runs per test case
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 02/13] bpf/tests: Reduce memory footprint of test suite Johan Almbladh
` (11 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch allows a test cast to specify the number of runs to use. For
compatibility with existing test case definitions, the default value 0
is interpreted as MAX_TESTRUNS.
A reduced number of runs is useful for complex test programs where 1000
runs may take a very long time. Instead of reducing what is tested, one
can instead reduce the number of times the test is run.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 830a18ecffc8..c8bd3e9ab10a 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -80,6 +80,7 @@ struct bpf_test {
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
+ int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
};
/* Large test cases need separate allocation and fill handler. */
@@ -8631,6 +8632,9 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
+ if (test->nr_testruns)
+ runs = min(test->nr_testruns, MAX_TESTRUNS);
+
for (i = 0; i < MAX_SUBTESTS; i++) {
void *data;
u64 duration;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 02/13] bpf/tests: Reduce memory footprint of test suite
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 01/13] bpf/tests: Allow different number of runs per test case Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 03/13] bpf/tests: Add exhaustive tests of ALU shift values Johan Almbladh
` (10 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
The test suite used to call any fill_helper callbacks to generate eBPF
program data for all test cases at once. This caused ballooning memory
requirements as more extensive test cases were added. Now the each
fill_helper is called before the test is run and the allocated memory
released afterwards, before the next test case is processed.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 26 ++++++++++++--------------
1 file changed, 12 insertions(+), 14 deletions(-)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c8bd3e9ab10a..f0651dc6450b 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -8694,8 +8694,6 @@ static __init int find_test_index(const char *test_name)
static __init int prepare_bpf_tests(void)
{
- int i;
-
if (test_id >= 0) {
/*
* if a test_id was specified, use test_range to
@@ -8739,23 +8737,11 @@ static __init int prepare_bpf_tests(void)
}
}
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper &&
- tests[i].fill_helper(&tests[i]) < 0)
- return -ENOMEM;
- }
-
return 0;
}
static __init void destroy_bpf_tests(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper)
- kfree(tests[i].u.ptr.insns);
- }
}
static bool exclude_test(int test_id)
@@ -8959,7 +8945,19 @@ static __init int test_bpf(void)
pr_info("#%d %s ", i, tests[i].descr);
+ if (tests[i].fill_helper &&
+ tests[i].fill_helper(&tests[i]) < 0) {
+ pr_cont("FAIL to prog_fill\n");
+ continue;
+ }
+
fp = generate_filter(i, &err);
+
+ if (tests[i].fill_helper) {
+ kfree(tests[i].u.ptr.insns);
+ tests[i].u.ptr.insns = NULL;
+ }
+
if (fp == NULL) {
if (err == 0) {
pass_cnt++;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 03/13] bpf/tests: Add exhaustive tests of ALU shift values
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 01/13] bpf/tests: Allow different number of runs per test case Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 02/13] bpf/tests: Reduce memory footprint of test suite Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-03 6:39 ` kernel test robot
2021-09-02 18:52 ` [PATCH bpf-next 04/13] bpf/tests: Add exhaustive tests of ALU operand magnitudes Johan Almbladh
` (9 subsequent siblings)
12 siblings, 1 reply; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a set of tests for ALU64 and ALU32 shift operations to
verify correctness for all possible values of the shift value. Mainly
intended for JIT testing.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 257 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 257 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f0651dc6450b..69f8d4c1df33 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -497,6 +497,165 @@ static int bpf_fill_long_jmp(struct bpf_test *self)
return 0;
}
+static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
+{
+ struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
+
+ memcpy(insns, tmp, sizeof(tmp));
+ return 2;
+}
+
+/* Test an ALU shift operation for all valid shift values */
+static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
+ u8 mode, bool alu32)
+{
+ static const s64 regs[] = {
+ 0x0123456789abcdefLL, /* dword > 0, word < 0 */
+ 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
+ 0x0123458967abcdefLL, /* dword > 0, word > 0 */
+ };
+ int bits = alu32 ? 32 : 64;
+ int len = (2 + 8 * bits) * ARRAY_SIZE(regs) + 2;
+ struct bpf_insn *insn;
+ int imm, k;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (k = 0; k < ARRAY_SIZE(regs); k++) {
+ s64 reg = regs[k];
+
+ i += __bpf_ld_imm64(&insn[i], R3, reg);
+
+ for (imm = 0; imm < bits; imm++) {
+ u64 val;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
+ if (alu32) {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU32_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU32_REG(op, R1, R2);
+ switch (op) {
+ case BPF_LSH:
+ val = (u32)reg << imm;
+ break;
+ case BPF_RSH:
+ val = (u32)reg >> imm;
+ break;
+ case BPF_ARSH:
+ val = (u32)reg >> imm;
+ if (imm > 0 && (reg & 0x80000000))
+ val |= ~(u32)0 << (32 - imm);
+ break;
+ }
+ } else {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU64_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R2);
+ switch (op) {
+ case BPF_LSH:
+ val = (u64)reg << imm;
+ break;
+ case BPF_RSH:
+ val = (u64)reg >> imm;
+ break;
+ case BPF_ARSH:
+ val = (u64)reg >> imm;
+ if (imm > 0 && reg < 0)
+ val |= ~(u64)0 << (64 - imm);
+ break;
+ }
+ }
+
+ /* Load reference */
+ i += __bpf_ld_imm64(&insn[i], R4, val);
+
+ /* For diagnostic purposes */
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, i);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i > len);
+
+ return 0;
+}
+
+static int bpf_fill_alu_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+}
+
+static int bpf_fill_alu_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+}
+
+static int bpf_fill_alu_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+}
+
+static int bpf_fill_alu_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+}
+
+static int bpf_fill_alu_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+}
+
+static int bpf_fill_alu_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+}
+
+static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -8414,6 +8573,104 @@ static struct bpf_test tests[] = {
{},
{ { 0, 2 } },
},
+ /* Exhaustive test of ALU64 shift operations */
+ {
+ "ALU64_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_lsh_imm,
+ },
+ {
+ "ALU64_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_rsh_imm,
+ },
+ {
+ "ALU64_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_arsh_imm,
+ },
+ {
+ "ALU64_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_lsh_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_rsh_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu_arsh_reg,
+ },
+ /* Exhaustive test of ALU32 shift operations */
+ {
+ "ALU32_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm,
+ },
+ {
+ "ALU32_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm,
+ },
+ {
+ "ALU32_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm,
+ },
+ {
+ "ALU32_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg,
+ },
};
static struct net_device dev;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH bpf-next 03/13] bpf/tests: Add exhaustive tests of ALU shift values
2021-09-02 18:52 ` [PATCH bpf-next 03/13] bpf/tests: Add exhaustive tests of ALU shift values Johan Almbladh
@ 2021-09-03 6:39 ` kernel test robot
0 siblings, 0 replies; 16+ messages in thread
From: kernel test robot @ 2021-09-03 6:39 UTC (permalink / raw)
To: Johan Almbladh, ast, daniel, andrii, iii
Cc: llvm, kbuild-all, kafai, songliubraving, yhs, john.fastabend,
kpsingh, netdev
[-- Attachment #1: Type: text/plain, Size: 4550 bytes --]
Hi Johan,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on bpf-next/master]
url: https://github.com/0day-ci/linux/commits/Johan-Almbladh/bpf-tests-Extend-JIT-test-suite-coverage/20210903-025430
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
config: riscv-randconfig-r001-20210903 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project c9948e9254fbb6ea00f66c7b4542311d21e060be)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install riscv cross compiling tool for clang build
# apt-get install binutils-riscv64-linux-gnu
# https://github.com/0day-ci/linux/commit/ceabc579a2dfd55d025c0e65dcdb4f8fd313990c
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Johan-Almbladh/bpf-tests-Extend-JIT-test-suite-coverage/20210903-025430
git checkout ceabc579a2dfd55d025c0e65dcdb4f8fd313990c
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=riscv
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> lib/test_bpf.c:581:10: warning: unsequenced modification and access to 'i' [-Wunsequenced]
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, i);
^ ~
1 warning generated.
vim +/i +581 lib/test_bpf.c
507
508 /* Test an ALU shift operation for all valid shift values */
509 static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
510 u8 mode, bool alu32)
511 {
512 static const s64 regs[] = {
513 0x0123456789abcdefLL, /* dword > 0, word < 0 */
514 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
515 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
516 0x0123458967abcdefLL, /* dword > 0, word > 0 */
517 };
518 int bits = alu32 ? 32 : 64;
519 int len = (2 + 8 * bits) * ARRAY_SIZE(regs) + 2;
520 struct bpf_insn *insn;
521 int imm, k;
522 int i = 0;
523
524 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
525 if (!insn)
526 return -ENOMEM;
527
528 for (k = 0; k < ARRAY_SIZE(regs); k++) {
529 s64 reg = regs[k];
530
531 i += __bpf_ld_imm64(&insn[i], R3, reg);
532
533 for (imm = 0; imm < bits; imm++) {
534 u64 val;
535
536 /* Perform operation */
537 insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
538 insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
539 if (alu32) {
540 if (mode == BPF_K)
541 insn[i++] = BPF_ALU32_IMM(op, R1, imm);
542 else
543 insn[i++] = BPF_ALU32_REG(op, R1, R2);
544 switch (op) {
545 case BPF_LSH:
546 val = (u32)reg << imm;
547 break;
548 case BPF_RSH:
549 val = (u32)reg >> imm;
550 break;
551 case BPF_ARSH:
552 val = (u32)reg >> imm;
553 if (imm > 0 && (reg & 0x80000000))
554 val |= ~(u32)0 << (32 - imm);
555 break;
556 }
557 } else {
558 if (mode == BPF_K)
559 insn[i++] = BPF_ALU64_IMM(op, R1, imm);
560 else
561 insn[i++] = BPF_ALU64_REG(op, R1, R2);
562 switch (op) {
563 case BPF_LSH:
564 val = (u64)reg << imm;
565 break;
566 case BPF_RSH:
567 val = (u64)reg >> imm;
568 break;
569 case BPF_ARSH:
570 val = (u64)reg >> imm;
571 if (imm > 0 && reg < 0)
572 val |= ~(u64)0 << (64 - imm);
573 break;
574 }
575 }
576
577 /* Load reference */
578 i += __bpf_ld_imm64(&insn[i], R4, val);
579
580 /* For diagnostic purposes */
> 581 insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, i);
582
583 /* Check result */
584 insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
585 insn[i++] = BPF_EXIT_INSN();
586 }
587 }
588
589 insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
590 insn[i++] = BPF_EXIT_INSN();
591
592 self->u.ptr.insns = insn;
593 self->u.ptr.len = len;
594 BUG_ON(i > len);
595
596 return 0;
597 }
598
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 30099 bytes --]
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 04/13] bpf/tests: Add exhaustive tests of ALU operand magnitudes
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (2 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 03/13] bpf/tests: Add exhaustive tests of ALU shift values Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 05/13] bpf/tests: Add exhaustive tests of JMP " Johan Almbladh
` (8 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a set of tests for ALU64 and ALU32 arithmetic and bitwise
logical operations to verify correctness for all possible magnitudes of
the register and immediate operands. Mainly intended for JIT testing.
The patch introduces a pattern generator that can be used to drive
extensive tests of different kinds of operations. It is parameterized
to allow tuning of the operand combinations to test.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 772 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 772 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 69f8d4c1df33..7b7f81516c26 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -656,6 +656,451 @@ static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
}
+/*
+ * Common operand pattern generator for exhaustive power-of-two magnitudes
+ * tests. The block size parameters can be adjusted to increase/reduce the
+ * number of combinatons tested and thereby execution speed and memory
+ * footprint.
+ */
+
+static inline s64 value(int msb, int delta, int sign)
+{
+ return sign * (1LL << msb) + delta;
+}
+
+static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
+ int dbits, int sbits, int block1, int block2,
+ int (*emit)(struct bpf_test*, void*,
+ struct bpf_insn*, s64, s64))
+{
+ static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
+ struct bpf_insn *insns;
+ int di, si, bt, db, sb;
+ int count, len, k;
+ int extra = 1 + 2;
+ int i = 0;
+
+ /* Total number of iterations for the two pattern */
+ count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
+ count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
+
+ /* Compute the maximum number of insns and allocate the buffer */
+ len = extra + count * (*emit)(self, arg, NULL, 0, 0);
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Add head instruction(s) */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, -1);
+
+ /*
+ * Pattern 1: all combinations of power-of-two magnitudes and sign,
+ * and with a block of contiguous values around each magnitude.
+ */
+ for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */
+ for (si = 0; si < sbits - 1; si++) /* Src magnitudes */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block1 / 2);
+ db < (block1 + 1) / 2; db++)
+ for (sb = -(block1 / 2);
+ sb < (block1 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(di, db, sgn[k][0]);
+ src = value(si, sb, sgn[k][1]);
+ i += (*emit)(self, arg,
+ &insns[i],
+ dst, src);
+ }
+ /*
+ * Pattern 2: all combinations for a larger block of values
+ * for each power-of-two magnitude and sign, where the magnitude is
+ * the same for both operands.
+ */
+ for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
+ for (sb = -(block2 / 2);
+ sb < (block2 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(bt % dbits, db, sgn[k][0]);
+ src = value(bt % sbits, sb, sgn[k][1]);
+ i += (*emit)(self, arg, &insns[i],
+ dst, src);
+ }
+
+ /* Append tail instructions */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ BUG_ON(i > len);
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = i;
+
+ return 0;
+}
+
+/*
+ * Block size parameters used in pattern tests below. une as needed to
+ * increase/reduce the number combinations tested, see following examples.
+ * block values per operand MSB
+ * ----------------------------------------
+ * 0 none
+ * 1 (1 << MSB)
+ * 2 (1 << MSB) + [-1, 0]
+ * 3 (1 << MSB) + [-1, 0, 1]
+ */
+#define PATTERN_BLOCK1 1
+#define PATTERN_BLOCK2 5
+
+/* Number of test runs for a pattern test */
+#define NR_PATTERN_RUNS 1
+
+/*
+ * Exhaustive tests of ALU operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+ *res = 0;
+ switch (op) {
+ case BPF_MOV:
+ *res = v2;
+ break;
+ case BPF_AND:
+ *res = v1 & v2;
+ break;
+ case BPF_OR:
+ *res = v1 | v2;
+ break;
+ case BPF_XOR:
+ *res = v1 ^ v2;
+ break;
+ case BPF_ADD:
+ *res = v1 + v2;
+ break;
+ case BPF_SUB:
+ *res = v1 - v2;
+ break;
+ case BPF_MUL:
+ *res = v1 * v2;
+ break;
+ case BPF_DIV:
+ if (v2 == 0)
+ return false;
+ *res = div64_u64(v1, v2);
+ break;
+ case BPF_MOD:
+ if (v2 == 0)
+ return false;
+ div64_u64_rem(v1, v2, res);
+ break;
+ }
+ return true;
+}
+
+static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, dst, src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_imm);
+}
+
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_imm);
+}
+
+static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_reg);
+}
+
+static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_reg);
+}
+
+/* ALU64 immediate operations */
+static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOD);
+}
+
+/* ALU32 immediate operations */
+static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOD);
+}
+
+/* ALU64 register operations */
+static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOD);
+}
+
+/* ALU32 register operations */
+static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOD);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -8671,6 +9116,333 @@ static struct bpf_test tests[] = {
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_reg,
},
+ /* ALU64 immediate magnitudes */
+ {
+ "ALU64_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 immediate magnitudes */
+ {
+ "ALU32_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm,
+ },
+ /* ALU64 register magnitudes */
+ {
+ "ALU64_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 register magnitudes */
+ {
+ "ALU32_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
};
static struct net_device dev;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 05/13] bpf/tests: Add exhaustive tests of JMP operand magnitudes
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (3 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 04/13] bpf/tests: Add exhaustive tests of ALU operand magnitudes Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 06/13] bpf/tests: Add staggered JMP and JMP32 tests Johan Almbladh
` (7 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a set of tests for conditional JMP and JMP32 operations to
verify correctness for all possible magnitudes of the immediate and
register operands. Mainly intended for JIT testing.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 779 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 779 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7b7f81516c26..431f8d072f78 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1101,6 +1101,384 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
return __bpf_fill_alu32_reg(self, BPF_MOD);
}
+
+/*
+ * Exhaustive tests of JMP operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
+{
+ switch (op) {
+ case BPF_JSET:
+ return !!(v1 & v2);
+ case BPF_JEQ:
+ return v1 == v2;
+ case BPF_JNE:
+ return v1 != v2;
+ case BPF_JGT:
+ return (u64)v1 > (u64)v2;
+ case BPF_JGE:
+ return (u64)v1 >= (u64)v2;
+ case BPF_JLT:
+ return (u64)v1 < (u64)v2;
+ case BPF_JLE:
+ return (u64)v1 <= (u64)v2;
+ case BPF_JSGT:
+ return v1 > v2;
+ case BPF_JSGE:
+ return v1 >= v2;
+ case BPF_JSLT:
+ return v1 < v2;
+ case BPF_JSLE:
+ return v1 <= v2;
+ }
+ return false;
+}
+
+static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
+ int i = 0;
+
+ insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5 + 1;
+}
+
+static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5;
+}
+
+static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_imm);
+}
+
+static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_imm);
+}
+
+static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_reg);
+}
+
+static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_reg);
+}
+
+/* JMP immediate tests */
+static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLE);
+}
+
+/* JMP32 immediate tests */
+static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLE);
+}
+
+/* JMP register tests */
+static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLE);
+}
+
+/* JMP32 register tests */
+static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLE);
+}
+
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -9278,6 +9656,7 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
},
/* ALU64 register magnitudes */
{
@@ -9443,6 +9822,406 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_alu32_mod_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
+ /* JMP immediate magnitudes */
+ {
+ "JMP_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP register magnitudes */
+ {
+ "JMP_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 immediate magnitudes */
+ {
+ "JMP32_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 register magnitudes */
+ {
+ "JMP32_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
};
static struct net_device dev;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 06/13] bpf/tests: Add staggered JMP and JMP32 tests
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (4 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 05/13] bpf/tests: Add exhaustive tests of JMP " Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-03 8:22 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 07/13] bpf/tests: Add exhaustive test of LD_IMM64 immediate magnitudes Johan Almbladh
` (6 subsequent siblings)
12 siblings, 1 reply; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a new type of jump test where the program jumps forwards
and backwards with increasing offset. It mainly tests JITs where a
relative jump may generate different JITed code depending on the offset
size, read MIPS.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 829 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 829 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 431f8d072f78..ea29e42418e3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1478,6 +1478,426 @@ static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
return __bpf_fill_jmp32_reg(self, BPF_JSLE);
}
+/*
+ * Set up a sequence of staggered jumps, forwards and backwards with
+ * increasing offset. This tests the conversion of relative jumps to
+ * JITed native jumps. On some architectures, for example MIPS, a large
+ * PC-relative jump offset may overflow the immediate field of the native
+ * conditional branch instruction, triggering a conversion to use an
+ * absolute jump instead. Since this changes the jump offsets, another
+ * offset computation pass is necessary, and that may in turn trigger
+ * another branch conversion. This jump sequence is particularly nasty
+ * in that regard.
+ *
+ * The sequence generation is parameterized by size and jump type.
+ * The size must be even, and the expected result is always size + 1.
+ * Below is an example with size=8 and result=9.
+ *
+ * ________________________Start
+ * R0 = 0
+ * R1 = r1
+ * R2 = r2
+ * ,------- JMP +4 * 3______________Preamble: 4 insns
+ * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
+ * | | R0 = 8 |
+ * | ,--------|-----1- JMP +7 * 3 ------------------------.
+ * | | | if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
+ * | | | R0 = 6 | | |
+ * | | ,------|-----2- JMP +5 * 3 ------------------. | |
+ * | | | | if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
+ * | | | | R0 = 4 | | | | |
+ * | | | ,----|-----3- JMP +3 * 3 ------------. | | | |
+ * | | | | | if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
+ * | | | | | R0 = 2 | | | | | | |
+ * | | | | ,--|-----4- JMP +1 * 3 ------. | | | | | |
+ * | | | | | `------> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
+ * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
+ * | | | | | ,------5- JMP -2 * 3 ---' | | | | | | |
+ * | | | | | | if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
+ * | | | | | | R0 = 3 | | | | | |
+ * | | | | | | JMP -4 * 3 ---------' | | | | |
+ * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
+ * | | | | | | | R0 = 5 | | | |
+ * | | | | | | | JMP -6 * 3 ---------------' | | |
+ * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
+ * | | | | | | | | R0 = 7 | |
+ * | | Error | | | JMP -8 * 3 ---------------------' |
+ * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
+ * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
+ * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
+ *
+ */
+
+/* The maximum size parameter */
+#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
+
+/* We use a reduced number of iterations to get a reasonable execution time */
+#define NR_STAGGERED_JMP_RUNS 10
+
+static int __bpf_fill_staggered_jumps(struct bpf_test *self,
+ const struct bpf_insn *jmp,
+ u64 r1, u64 r2)
+{
+ int size = self->test[0].result - 1;
+ int len = 4 + 3 * (size + 1);
+ struct bpf_insn *insns;
+ int off, ind;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Preamble */
+ insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+ insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
+ insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
+ insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
+
+ /* Sequence */
+ for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
+ struct bpf_insn *ins = &insns[4 + 3 * ind];
+ int loc;
+
+ if (off == 0)
+ off--;
+
+ loc = abs(off);
+ ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
+ 3 * (size - ind) + 1);
+ ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
+ ins[2] = *jmp;
+ ins[2].off = 3 * (off - 1);
+ }
+
+ /* Return */
+ insns[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+/* 64-bit unconditional jump */
+static int bpf_fill_staggered_ja(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
+}
+
+/* 64-bit immediate jumps */
+static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 64-bit register jumps */
+static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+/* 32-bit immediate jumps */
+static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 32-bit register jumps */
+static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
static struct bpf_test tests[] = {
{
@@ -10222,6 +10642,415 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_jmp32_jsle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
+ /* Staggered jump sequences, immediate */
+ {
+ "Staggered jumps: JMP_JA",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_ja,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, register */
+ {
+ "Staggered jumps: JMP_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 immediate */
+ {
+ "Staggered jumps: JMP32_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 register */
+ {
+ "Staggered jumps: JMP32_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
};
static struct net_device dev;
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH bpf-next 06/13] bpf/tests: Add staggered JMP and JMP32 tests
2021-09-02 18:52 ` [PATCH bpf-next 06/13] bpf/tests: Add staggered JMP and JMP32 tests Johan Almbladh
@ 2021-09-03 8:22 ` Johan Almbladh
0 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-03 8:22 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Networking, bpf, Ilya Leoshkevich
On Thu, Sep 2, 2021 at 8:52 PM Johan Almbladh
<johan.almbladh@anyfinetworks.com> wrote:
>
> This patch adds a new type of jump test where the program jumps forwards
> and backwards with increasing offset. It mainly tests JITs where a
> relative jump may generate different JITed code depending on the offset
> size, read MIPS.
>
> Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
> ---
> lib/test_bpf.c | 829 +++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 829 insertions(+)
>
> diff --git a/lib/test_bpf.c b/lib/test_bpf.c
> index 431f8d072f78..ea29e42418e3 100644
> --- a/lib/test_bpf.c
> +++ b/lib/test_bpf.c
> @@ -1478,6 +1478,426 @@ static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
> return __bpf_fill_jmp32_reg(self, BPF_JSLE);
> }
>
> +/*
> + * Set up a sequence of staggered jumps, forwards and backwards with
> + * increasing offset. This tests the conversion of relative jumps to
> + * JITed native jumps. On some architectures, for example MIPS, a large
> + * PC-relative jump offset may overflow the immediate field of the native
> + * conditional branch instruction, triggering a conversion to use an
> + * absolute jump instead. Since this changes the jump offsets, another
> + * offset computation pass is necessary, and that may in turn trigger
> + * another branch conversion. This jump sequence is particularly nasty
> + * in that regard.
> + *
> + * The sequence generation is parameterized by size and jump type.
> + * The size must be even, and the expected result is always size + 1.
> + * Below is an example with size=8 and result=9.
> + *
> + * ________________________Start
> + * R0 = 0
> + * R1 = r1
> + * R2 = r2
> + * ,------- JMP +4 * 3______________Preamble: 4 insns
> + * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
> + * | | R0 = 8 |
> + * | ,--------|-----1- JMP +7 * 3 ------------------------.
> + * | | | if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
> + * | | | R0 = 6 | | |
> + * | | ,------|-----2- JMP +5 * 3 ------------------. | |
> + * | | | | if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
> + * | | | | R0 = 4 | | | | |
> + * | | | ,----|-----3- JMP +3 * 3 ------------. | | | |
> + * | | | | | if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
> + * | | | | | R0 = 2 | | | | | | |
> + * | | | | ,--|-----4- JMP +1 * 3 ------. | | | | | |
> + * | | | | | `------> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
> + * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
> + * | | | | | ,------5- JMP -2 * 3 ---' | | | | | | |
> + * | | | | | | if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
> + * | | | | | | R0 = 3 | | | | | |
> + * | | | | | | JMP -4 * 3 ---------' | | | | |
> + * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
> + * | | | | | | | R0 = 5 | | | |
> + * | | | | | | | JMP -6 * 3 ---------------' | | |
> + * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
> + * | | | | | | | | R0 = 7 | |
> + * | | Error | | | JMP -8 * 3 ---------------------' |
> + * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
> + * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
> + * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
> + *
> + */
I see that error path jumps at index 1-5 in this ASCII diagram is
drawn from the wrong instruction. They should start at the "if"
condition one step below. I will fix it.
> +
> +/* The maximum size parameter */
> +#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
> +
> +/* We use a reduced number of iterations to get a reasonable execution time */
> +#define NR_STAGGERED_JMP_RUNS 10
> +
> +static int __bpf_fill_staggered_jumps(struct bpf_test *self,
> + const struct bpf_insn *jmp,
> + u64 r1, u64 r2)
> +{
> + int size = self->test[0].result - 1;
> + int len = 4 + 3 * (size + 1);
> + struct bpf_insn *insns;
> + int off, ind;
> +
> + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
> + if (!insns)
> + return -ENOMEM;
> +
> + /* Preamble */
> + insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
> + insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
> + insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
> + insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
> +
> + /* Sequence */
> + for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
> + struct bpf_insn *ins = &insns[4 + 3 * ind];
> + int loc;
> +
> + if (off == 0)
> + off--;
> +
> + loc = abs(off);
> + ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
> + 3 * (size - ind) + 1);
> + ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
> + ins[2] = *jmp;
> + ins[2].off = 3 * (off - 1);
> + }
> +
> + /* Return */
> + insns[len - 1] = BPF_EXIT_INSN();
> +
> + self->u.ptr.insns = insns;
> + self->u.ptr.len = len;
> +
> + return 0;
> +}
> +
> +/* 64-bit unconditional jump */
> +static int bpf_fill_staggered_ja(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
> +}
> +
> +/* 64-bit immediate jumps */
> +static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
> +}
> +
> +static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
> +}
> +
> +static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
> +}
> +
> +static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
> +}
> +
> +static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
> +}
> +
> +static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
> +}
> +
> +static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
> +}
> +
> +/* 64-bit register jumps */
> +static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
> +}
> +
> +static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
> +}
> +
> +static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
> +}
> +
> +static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
> +}
> +
> +static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
> +}
> +
> +static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
> +}
> +
> +static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
> +}
> +
> +static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
> +}
> +
> +/* 32-bit immediate jumps */
> +static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
> +}
> +
> +static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
> +}
> +
> +static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
> +}
> +
> +static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
> +}
> +
> +static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
> +}
> +
> +static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
> +}
> +
> +static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
> +}
> +
> +static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
> +}
> +
> +/* 32-bit register jumps */
> +static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
> +}
> +
> +static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
> +}
> +
> +static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
> +}
> +
> +static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
> +}
> +
> +static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
> +}
> +
> +static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
> +}
> +
> +static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
> +}
> +
> +static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
> +}
> +
> +static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
> +{
> + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
> +
> + return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
> +}
> +
>
> static struct bpf_test tests[] = {
> {
> @@ -10222,6 +10642,415 @@ static struct bpf_test tests[] = {
> .fill_helper = bpf_fill_jmp32_jsle_reg,
> .nr_testruns = NR_PATTERN_RUNS,
> },
> + /* Staggered jump sequences, immediate */
> + {
> + "Staggered jumps: JMP_JA",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_ja,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JEQ_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jeq_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JNE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jne_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSET_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jset_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JGT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jgt_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JGE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jge_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JLT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jlt_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JLE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jle_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSGT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsgt_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSGE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsge_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSLT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jslt_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSLE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsle_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + /* Staggered jump sequences, register */
> + {
> + "Staggered jumps: JMP_JEQ_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jeq_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JNE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jne_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSET_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jset_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JGT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jgt_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JGE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jge_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JLT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jlt_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JLE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jle_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSGT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsgt_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSGE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsge_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSLT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jslt_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP_JSLE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsle_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + /* Staggered jump sequences, JMP32 immediate */
> + {
> + "Staggered jumps: JMP32_JEQ_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jeq32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JNE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jne32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSET_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jset32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JGT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jgt32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JGE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jge32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JLT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jlt32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JLE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jle32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSGT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsgt32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSGE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsge32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSLT_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jslt32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSLE_K",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsle32_imm,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + /* Staggered jump sequences, JMP32 register */
> + {
> + "Staggered jumps: JMP32_JEQ_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jeq32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JNE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jne32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSET_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jset32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JGT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jgt32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JGE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jge32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JLT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jlt32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JLE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jle32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSGT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsgt32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSGE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsge32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSLT_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jslt32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> + {
> + "Staggered jumps: JMP32_JSLE_X",
> + { },
> + INTERNAL | FLAG_NO_DATA,
> + { },
> + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
> + .fill_helper = bpf_fill_staggered_jsle32_reg,
> + .nr_testruns = NR_STAGGERED_JMP_RUNS,
> + },
> };
>
> static struct net_device dev;
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 07/13] bpf/tests: Add exhaustive test of LD_IMM64 immediate magnitudes
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (5 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 06/13] bpf/tests: Add staggered JMP and JMP32 tests Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 08/13] bpf/tests: Add test case flag for verifier zero-extension Johan Almbladh
` (5 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
this patch adds a test for the 64-bit immediate load, a two-instruction
operation, to verify correctness for all possible magnitudes of the
immediate operand. Mainly intended for JIT testing.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ea29e42418e3..3af8421ceb94 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1101,6 +1101,61 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
return __bpf_fill_alu32_reg(self, BPF_MOD);
}
+/*
+ * Test the two-instruction 64-bit immediate load operation for all
+ * power-of-two magnitudes of the immediate operand. For each MSB, a block
+ * of immediate values centered around the power-of-two MSB are tested,
+ * both for positive and negative values. The test is designed to verify
+ * the operation for JITs that emit different code depending on the magnitude
+ * of the immediate value. This is often the case if the native instruction
+ * immediate field width is narrower than 32 bits.
+ */
+static int bpf_fill_ld_imm64(struct bpf_test *self)
+{
+ int block = 64; /* Increase for more tests per MSB position */
+ int len = 2 + 9 * 63 * block * 2;
+ struct bpf_insn *insn;
+ int bit, adj, sign;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (bit = 0; bit <= 62; bit++) {
+ for (adj = -block / 2; adj < block / 2; adj++) {
+ for (sign = -1; sign <= 1; sign += 2) {
+ s64 imm = sign * ((1LL << bit) + adj);
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
+ (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* For diagnostic purposes */
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, i);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
/*
* Exhaustive tests of JMP operations for all combinations of power-of-two
@@ -10242,6 +10297,15 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_alu32_mod_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
+ /* LD_IMM64 immediate magnitudes */
+ {
+ "LD_IMM64: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64,
+ },
/* JMP immediate magnitudes */
{
"JMP_JSET_K: all immediate value magnitudes",
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 08/13] bpf/tests: Add test case flag for verifier zero-extension
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (6 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 07/13] bpf/tests: Add exhaustive test of LD_IMM64 immediate magnitudes Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 09/13] bpf/tests: Add JMP tests with small offsets Johan Almbladh
` (4 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a new flag to indicate that the verified did insert
zero-extensions, even though the verifier is not being run for any
of the tests.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 3af8421ceb94..183ead9445ba 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -52,6 +52,7 @@
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
+#define FLAG_VERIFIER_ZEXT BIT(3)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -11278,6 +11279,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
fp->aux->stack_depth = tests[which].stack_depth;
+ fp->aux->verifier_zext = !!(tests[which].aux &
+ FLAG_VERIFIER_ZEXT);
/* We cannot error here as we don't need type compatibility
* checks.
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 09/13] bpf/tests: Add JMP tests with small offsets
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (7 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 08/13] bpf/tests: Add test case flag for verifier zero-extension Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 10/13] bpf/tests: Add JMP tests with degenerate conditional Johan Almbladh
` (3 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a set of tests for JMP to verify that the JITed jump
offset is calculated correctly. We pretend that the verifier has inserted
any zero extensions to make the jump-over operations JIT to one
instruction each, in order to control the exact JITed jump offset.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 183ead9445ba..c3d772f663da 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -10707,6 +10707,77 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_jmp32_jsle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
+ /* Short relative jumps */
+ {
+ "Short relative jump: offset=0",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=1",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=3",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=4",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
/* Staggered jump sequences, immediate */
{
"Staggered jumps: JMP_JA",
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 10/13] bpf/tests: Add JMP tests with degenerate conditional
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (8 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 09/13] bpf/tests: Add JMP tests with small offsets Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 11/13] bpf/tests: Expand branch conversion JIT test Johan Almbladh
` (2 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a set of tests for JMP and JMP32 operations where the
branch decision is know at JIT time. Mainly testing JIT behaviour.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 229 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 229 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c3d772f663da..b28cd815b6b7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -10707,6 +10707,235 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_jmp32_jsle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
+ /* Conditional jumps with constant decision */
+ {
+ "JMP_JSET_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGE_K: imm = 0 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: imm = 0xffffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLE_K: imm = 0xffffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSGE_K: imm = -0x80000000 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSLT_K: imm = -0x80000000 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JEQ_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JNE_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
/* Short relative jumps */
{
"Short relative jump: offset=0",
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 11/13] bpf/tests: Expand branch conversion JIT test
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (9 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 10/13] bpf/tests: Add JMP tests with degenerate conditional Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 12/13] bpf/tests: Add more BPF_END byte order conversion tests Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 13/13] bpf/tests: Add tail call limit test with external function call Johan Almbladh
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch expands the branch conversion test introduced by 66e5eb84
("bpf, tests: Add branch conversion JIT test"). The test now includes
a JMP with maximum eBPF offset. This triggers branch conversion for the
64-bit MIPS JIT. Additional variants are also added for cases when the
branch is taken or not taken.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 143 ++++++++++++++++++++++++++++++++++---------------
1 file changed, 100 insertions(+), 43 deletions(-)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index b28cd815b6b7..3eb25d4b58af 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -463,41 +463,6 @@ static int bpf_fill_stxdw(struct bpf_test *self)
return __bpf_fill_stxdw(self, BPF_DW);
}
-static int bpf_fill_long_jmp(struct bpf_test *self)
-{
- unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
- int i;
-
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
- if (!insn)
- return -ENOMEM;
-
- insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
- insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1);
-
- /*
- * Fill with a complex 64-bit operation that expands to a lot of
- * instructions on 32-bit JITs. The large jump offset can then
- * overflow the conditional branch field size, triggering a branch
- * conversion mechanism in some JITs.
- *
- * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch
- * conversion on the 32-bit MIPS JIT. For other JITs, the instruction
- * count and/or operation may need to be modified to trigger the
- * branch conversion.
- */
- for (i = 2; i < len - 1; i++)
- insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i);
-
- insn[len - 1] = BPF_EXIT_INSN();
-
- self->u.ptr.insns = insn;
- self->u.ptr.len = len;
-
- return 0;
-}
-
static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
{
struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
@@ -506,6 +471,73 @@ static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
return 2;
}
+/*
+ * Branch conversion tests. Complex operations can expand to a lot
+ * of instructions when JITed. This in turn may cause jump offsets
+ * to overflow the field size of the native instruction, triggering
+ * a branch conversion mechanism in some JITs.
+ */
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+{
+ struct bpf_insn *insns;
+ int len = S16_MAX + 5;
+ int i;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
+ insns[i++] = BPF_EXIT_INSN();
+
+ while (i < len - 1) {
+ static const int ops[] = {
+ BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
+ BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
+ };
+ int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
+
+ if (i & 1)
+ insns[i++] = BPF_ALU32_REG(op, R0, R1);
+ else
+ insns[i++] = BPF_ALU64_REG(op, R0, R1);
+ }
+
+ insns[i++] = BPF_EXIT_INSN();
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+}
+
+/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+}
+
+/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+}
+
+/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+}
+
/* Test an ALU shift operation for all valid shift values */
static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
u8 mode, bool alu32)
@@ -8651,14 +8683,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
- { /* Mainly checking JIT here. */
- "BPF_MAXINSNS: Very long conditional jump",
- { },
- INTERNAL | FLAG_NO_DATA,
- { },
- { { 0, 1 } },
- .fill_helper = bpf_fill_long_jmp,
- },
{
"JMP_JA: Jump, gap, jump, ...",
{ },
@@ -11007,6 +11031,39 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0 } },
},
+ /* Conditional branch conversions */
+ {
+ "Long conditional jump: taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken,
+ },
+ {
+ "Long conditional jump: not taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken,
+ },
/* Staggered jump sequences, immediate */
{
"Staggered jumps: JMP_JA",
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 12/13] bpf/tests: Add more BPF_END byte order conversion tests
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (10 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 11/13] bpf/tests: Expand branch conversion JIT test Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
2021-09-02 18:52 ` [PATCH bpf-next 13/13] bpf/tests: Add tail call limit test with external function call Johan Almbladh
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds tests of the high 32 bits of 64-bit BPF_END conversions.
It also adds a mirrored set of tests where the source bytes are reversed.
The MSB of each byte is now set on the high word instead, possibly
affecting sign-extension during conversion in a different way. Mainly
for JIT testing.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 122 insertions(+)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 3eb25d4b58af..f138c6fad5ec 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6746,6 +6746,67 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
+ {
+ "ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
+ },
/* BPF_ALU | BPF_END | BPF_FROM_LE */
{
"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
@@ -6783,6 +6844,67 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
+ {
+ "ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
+ },
/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
{
"ST_MEM_B: Store/Load byte: max negative",
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next 13/13] bpf/tests: Add tail call limit test with external function call
2021-09-02 18:52 [PATCH bpf-next 00/13] bpf/tests: Extend JIT test suite coverage Johan Almbladh
` (11 preceding siblings ...)
2021-09-02 18:52 ` [PATCH bpf-next 12/13] bpf/tests: Add more BPF_END byte order conversion tests Johan Almbladh
@ 2021-09-02 18:52 ` Johan Almbladh
12 siblings, 0 replies; 16+ messages in thread
From: Johan Almbladh @ 2021-09-02 18:52 UTC (permalink / raw)
To: ast, daniel, andrii, iii
Cc: kafai, songliubraving, yhs, john.fastabend, kpsingh, netdev, bpf,
Johan Almbladh
This patch adds a tail call limit test where the program also emits
a BPF_CALL to an external function prior to the tail call. Mainly
testing that JITed programs preserve its internal register state, for
example tail call count, across such external calls.
Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
lib/test_bpf.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 48 insertions(+), 3 deletions(-)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f138c6fad5ec..33c3fcc4c9f8 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -12257,6 +12257,20 @@ static struct tail_call_test tail_call_tests[] = {
},
.result = MAX_TAIL_CALL_CNT + 1,
},
+ {
+ "Tail call count preserved across function calls",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_CALL_REL(0),
+ BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .stack_depth = 8,
+ .result = MAX_TAIL_CALL_CNT + 1,
+ },
{
"Tail call error path, NULL target",
.insns = {
@@ -12279,6 +12293,29 @@ static struct tail_call_test tail_call_tests[] = {
},
};
+/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(test_bpf_func, u64, arg)
+{
+ char buf[64];
+ long a = 0;
+ long b = 1;
+ long c = 2;
+ long d = 3;
+ long e = 4;
+ long f = 5;
+ long g = 6;
+ long h = 7;
+
+ return snprintf(buf, sizeof(buf),
+ "%ld %lu %lx %ld %lu %lx %ld %lu %x",
+ a, b, c, d, e, f, g, h, (int)arg);
+}
+
static void __init destroy_tail_call_tests(struct bpf_array *progs)
{
int i;
@@ -12332,16 +12369,17 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
for (i = 0; i < len; i++) {
struct bpf_insn *insn = &fp->insnsi[i];
- if (insn->imm != TAIL_CALL_MARKER)
- continue;
-
switch (insn->code) {
case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
insn[0].imm = (u32)(long)progs;
insn[1].imm = ((u64)(long)progs) >> 32;
break;
case BPF_ALU | BPF_MOV | BPF_K:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
if (insn->off == TAIL_CALL_NULL)
insn->imm = ntests;
else if (insn->off == TAIL_CALL_INVALID)
@@ -12349,6 +12387,13 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
else
insn->imm = which + insn->off;
insn->off = 0;
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ break;
+ *insn = BPF_EMIT_CALL(test_bpf_func);
+ break;
}
}
--
2.25.1
^ permalink raw reply [flat|nested] 16+ messages in thread