xref: /linux/tools/testing/selftests/bpf/verifier/atomic_or.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 {
2 	"BPF_ATOMIC OR without fetch",
3 	.insns = {
4 		/* val = 0x110; */
5 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
6 		/* atomic_or(&val, 0x011); */
7 		BPF_MOV64_IMM(BPF_REG_1, 0x011),
8 		BPF_ATOMIC_OP(BPF_DW, BPF_OR, BPF_REG_10, BPF_REG_1, -8),
9 		/* if (val != 0x111) exit(2); */
10 		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x111, 2),
12 		BPF_MOV64_IMM(BPF_REG_0, 2),
13 		BPF_EXIT_INSN(),
14 		/* r1 should not be clobbered, no BPF_FETCH flag */
15 		BPF_MOV64_IMM(BPF_REG_0, 0),
16 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
17 		BPF_MOV64_IMM(BPF_REG_0, 1),
18 		BPF_EXIT_INSN(),
19 	},
20 	.result = ACCEPT,
21 },
22 {
23 	"BPF_ATOMIC OR with fetch",
24 	.insns = {
25 		BPF_MOV64_IMM(BPF_REG_0, 123),
26 		/* val = 0x110; */
27 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
28 		/* old = atomic_fetch_or(&val, 0x011); */
29 		BPF_MOV64_IMM(BPF_REG_1, 0x011),
30 		BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
31 		/* if (old != 0x110) exit(3); */
32 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
33 		BPF_MOV64_IMM(BPF_REG_0, 3),
34 		BPF_EXIT_INSN(),
35 		/* if (val != 0x111) exit(2); */
36 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
37 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
38 		BPF_MOV64_IMM(BPF_REG_1, 2),
39 		BPF_EXIT_INSN(),
40 		/* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
41 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
42 		BPF_MOV64_IMM(BPF_REG_0, 1),
43 		BPF_EXIT_INSN(),
44 		/* exit(0); */
45 		BPF_MOV64_IMM(BPF_REG_0, 0),
46 		BPF_EXIT_INSN(),
47 	},
48 	.result = ACCEPT,
49 },
50 {
51 	"BPF_ATOMIC OR with fetch 32bit",
52 	.insns = {
53 		/* r0 = (s64) -1 */
54 		BPF_MOV64_IMM(BPF_REG_0, 0),
55 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
56 		/* val = 0x110; */
57 		BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
58 		/* old = atomic_fetch_or(&val, 0x011); */
59 		BPF_MOV32_IMM(BPF_REG_1, 0x011),
60 		BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
61 		/* if (old != 0x110) exit(3); */
62 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
63 		BPF_MOV32_IMM(BPF_REG_0, 3),
64 		BPF_EXIT_INSN(),
65 		/* if (val != 0x111) exit(2); */
66 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
67 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
68 		BPF_MOV32_IMM(BPF_REG_1, 2),
69 		BPF_EXIT_INSN(),
70 		/* Check R0 wasn't clobbered (for fear of x86 JIT bug)
71 		 * It should be -1 so add 1 to get exit code.
72 		 */
73 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
74 		BPF_EXIT_INSN(),
75 	},
76 	.result = ACCEPT,
77 },
78 {
79 	"BPF_W atomic_fetch_or should zero top 32 bits",
80 	.insns = {
81 		/* r1 = U64_MAX; */
82 		BPF_MOV64_IMM(BPF_REG_1, 0),
83 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
84 		/* u64 val = r1; */
85 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
86 		/* r1 = (u32)atomic_fetch_or((u32 *)&val, 2); */
87 		BPF_MOV32_IMM(BPF_REG_1, 2),
88 		BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
89 		/* r2 = 0x00000000FFFFFFFF; */
90 		BPF_MOV64_IMM(BPF_REG_2, 1),
91 		BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
92 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 1),
93 		/* if (r2 != r1) exit(1); */
94 		BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_1, 2),
95 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
96 		BPF_EXIT_INSN(),
97 		/* exit(0); */
98 		BPF_MOV32_IMM(BPF_REG_0, 0),
99 		BPF_EXIT_INSN(),
100 	},
101 	.result = ACCEPT,
102 },
103