1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023 SUSE LLC */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../../../include/linux/filter.h"
6 #include "bpf_misc.h"
7
8 struct {
9 __uint(type, BPF_MAP_TYPE_ARRAY);
10 __uint(max_entries, 1);
11 __type(key, __u32);
12 __type(value, __u64);
13 } precision_map SEC(".maps");
14
15 SEC("?raw_tp")
16 __success __log_level(2)
17 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
18 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
19 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
20 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
bpf_neg(void)21 __naked int bpf_neg(void)
22 {
23 asm volatile (
24 "r2 = 8;"
25 "r2 = -r2;"
26 "if r2 != -8 goto 1f;"
27 "r1 = r10;"
28 "r1 += r2;"
29 "1:"
30 "r0 = 0;"
31 "exit;"
32 ::: __clobber_all);
33 }
34
35 SEC("?raw_tp")
36 __success __log_level(2)
37 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
38 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
39 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
40 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_le(void)41 __naked int bpf_end_to_le(void)
42 {
43 asm volatile (
44 "r2 = 0;"
45 "r2 = le16 r2;"
46 "if r2 != 0 goto 1f;"
47 "r1 = r10;"
48 "r1 += r2;"
49 "1:"
50 "r0 = 0;"
51 "exit;"
52 ::: __clobber_all);
53 }
54
55
56 SEC("?raw_tp")
57 __success __log_level(2)
58 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
59 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
60 __msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
61 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_be(void)62 __naked int bpf_end_to_be(void)
63 {
64 asm volatile (
65 "r2 = 0;"
66 "r2 = be16 r2;"
67 "if r2 != 0 goto 1f;"
68 "r1 = r10;"
69 "r1 += r2;"
70 "1:"
71 "r0 = 0;"
72 "exit;"
73 ::: __clobber_all);
74 }
75
76 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
77 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
78 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
79 __clang_major__ >= 18
80
81 SEC("?raw_tp")
82 __success __log_level(2)
83 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
84 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
85 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
86 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_bswap(void)87 __naked int bpf_end_bswap(void)
88 {
89 asm volatile (
90 "r2 = 0;"
91 "r2 = bswap16 r2;"
92 "if r2 != 0 goto 1f;"
93 "r1 = r10;"
94 "r1 += r2;"
95 "1:"
96 "r0 = 0;"
97 "exit;"
98 ::: __clobber_all);
99 }
100
101 #ifdef CAN_USE_LOAD_ACQ_STORE_REL
102
103 SEC("?raw_tp")
104 __success __log_level(2)
105 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
106 __msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
107 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
108 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_load_acquire(void)109 __naked int bpf_load_acquire(void)
110 {
111 asm volatile (
112 "r1 = 8;"
113 "*(u64 *)(r10 - 8) = r1;"
114 ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
115 "r3 = r10;"
116 "r3 += r2;" /* mark_precise */
117 "r0 = 0;"
118 "exit;"
119 :
120 : __imm_insn(load_acquire_insn,
121 BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
122 : __clobber_all);
123 }
124
125 SEC("?raw_tp")
126 __success __log_level(2)
127 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
128 __msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
129 __msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
130 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_store_release(void)131 __naked int bpf_store_release(void)
132 {
133 asm volatile (
134 "r1 = 8;"
135 ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
136 "r1 = *(u64 *)(r10 - 8);"
137 "r2 = r10;"
138 "r2 += r1;" /* mark_precise */
139 "r0 = 0;"
140 "exit;"
141 :
142 : __imm_insn(store_release_insn,
143 BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
144 : __clobber_all);
145 }
146
147 #endif /* CAN_USE_LOAD_ACQ_STORE_REL */
148 #endif /* v4 instruction */
149
150 SEC("?raw_tp")
151 __success __log_level(2)
152 /*
153 * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
154 * and "parent state regs=" lines. "R0=6" parts are here to help anchor
155 * expected log messages to the one specific mark_chain_precision operation.
156 *
157 * This is quite fragile: if verifier checkpointing heuristic changes, this
158 * might need adjusting.
159 */
160 __msg("2: (07) r0 += 1 ; R0=6")
161 __msg("3: (35) if r0 >= 0xa goto pc+1")
162 __msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
163 __msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
164 __msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
165 __msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
166 __msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
167 __msg("mark_precise: frame0: parent state regs= stack=: R0=P4")
168 __msg("3: R0=6")
state_loop_first_last_equal(void)169 __naked int state_loop_first_last_equal(void)
170 {
171 asm volatile (
172 "r0 = 0;"
173 "l0_%=:"
174 "r0 += 1;"
175 "r0 += 1;"
176 /* every few iterations we'll have a checkpoint here with
177 * first_idx == last_idx, potentially confusing precision
178 * backtracking logic
179 */
180 "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
181 "goto l0_%=;"
182 "l1_%=:"
183 "exit;"
184 ::: __clobber_common
185 );
186 }
187
__bpf_cond_op_r10(void)188 __used __naked static void __bpf_cond_op_r10(void)
189 {
190 asm volatile (
191 "r2 = 2314885393468386424 ll;"
192 "goto +0;"
193 "if r2 <= r10 goto +3;"
194 "if r1 >= -1835016 goto +0;"
195 "if r2 <= 8 goto +0;"
196 "if r3 <= 0 goto +0;"
197 "exit;"
198 ::: __clobber_all);
199 }
200
201 SEC("?raw_tp")
202 __success __log_level(2)
203 __msg("8: (bd) if r2 <= r10 goto pc+3")
204 __msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
205 __msg("10: (b5) if r2 <= 0x8 goto pc+0")
206 __msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
207 __msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
208 __msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
209 __msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
bpf_cond_op_r10(void)210 __naked void bpf_cond_op_r10(void)
211 {
212 asm volatile (
213 "r3 = 0 ll;"
214 "call __bpf_cond_op_r10;"
215 "r0 = 0;"
216 "exit;"
217 ::: __clobber_all);
218 }
219
220 SEC("?raw_tp")
221 __success __log_level(2)
222 __msg("3: (bf) r3 = r10")
223 __msg("4: (bd) if r3 <= r2 goto pc+1")
224 __msg("5: (b5) if r2 <= 0x8 goto pc+2")
225 __msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
226 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
227 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
bpf_cond_op_not_r10(void)228 __naked void bpf_cond_op_not_r10(void)
229 {
230 asm volatile (
231 "r0 = 0;"
232 "r2 = 2314885393468386424 ll;"
233 "r3 = r10;"
234 "if r3 <= r2 goto +1;"
235 "if r2 <= 8 goto +2;"
236 "r0 = 2 ll;"
237 "exit;"
238 ::: __clobber_all);
239 }
240
241 SEC("lsm.s/socket_connect")
242 __success __log_level(2)
243 __msg("0: (b7) r0 = 1 ; R0=1")
244 __msg("1: (84) w0 = -w0 ; R0=0xffffffff")
245 __msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
246 __msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
247 __msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
bpf_neg_2(void)248 __naked int bpf_neg_2(void)
249 {
250 /*
251 * lsm.s/socket_connect requires a return value within [-4095, 0].
252 * Returning -1 is allowed
253 */
254 asm volatile (
255 "r0 = 1;"
256 "w0 = -w0;"
257 "exit;"
258 ::: __clobber_all);
259 }
260
261 SEC("lsm.s/socket_connect")
262 __failure __msg("At program exit the register R0 has")
bpf_neg_3(void)263 __naked int bpf_neg_3(void)
264 {
265 /*
266 * lsm.s/socket_connect requires a return value within [-4095, 0].
267 * Returning -10000 is not allowed.
268 */
269 asm volatile (
270 "r0 = 10000;"
271 "w0 = -w0;"
272 "exit;"
273 ::: __clobber_all);
274 }
275
276 SEC("lsm.s/socket_connect")
277 __success __log_level(2)
278 __msg("0: (b7) r0 = 1 ; R0=1")
279 __msg("1: (87) r0 = -r0 ; R0=-1")
280 __msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
281 __msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
282 __msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
bpf_neg_4(void)283 __naked int bpf_neg_4(void)
284 {
285 /*
286 * lsm.s/socket_connect requires a return value within [-4095, 0].
287 * Returning -1 is allowed
288 */
289 asm volatile (
290 "r0 = 1;"
291 "r0 = -r0;"
292 "exit;"
293 ::: __clobber_all);
294 }
295
296 SEC("lsm.s/socket_connect")
297 __failure __msg("At program exit the register R0 has")
bpf_neg_5(void)298 __naked int bpf_neg_5(void)
299 {
300 /*
301 * lsm.s/socket_connect requires a return value within [-4095, 0].
302 * Returning -10000 is not allowed.
303 */
304 asm volatile (
305 "r0 = 10000;"
306 "r0 = -r0;"
307 "exit;"
308 ::: __clobber_all);
309 }
310
311 SEC("?raw_tp")
312 __success __log_level(2)
313 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
314 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
315 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
316 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
317 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_fetch_add_precision(void)318 __naked int bpf_atomic_fetch_add_precision(void)
319 {
320 asm volatile (
321 "r1 = 8;"
322 "*(u64 *)(r10 - 8) = r1;"
323 "r2 = 0;"
324 ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
325 "r3 = r10;"
326 "r3 += r2;" /* mark_precise */
327 "r0 = 0;"
328 "exit;"
329 :
330 : __imm_insn(fetch_add_insn,
331 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
332 : __clobber_all);
333 }
334
335 SEC("?raw_tp")
336 __success __log_level(2)
337 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
338 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)")
339 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
340 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
341 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_xchg_precision(void)342 __naked int bpf_atomic_xchg_precision(void)
343 {
344 asm volatile (
345 "r1 = 8;"
346 "*(u64 *)(r10 - 8) = r1;"
347 "r2 = 0;"
348 ".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */
349 "r3 = r10;"
350 "r3 += r2;" /* mark_precise */
351 "r0 = 0;"
352 "exit;"
353 :
354 : __imm_insn(xchg_insn,
355 BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8))
356 : __clobber_all);
357 }
358
359 SEC("?raw_tp")
360 __success __log_level(2)
361 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
362 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)")
363 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
364 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
365 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_fetch_or_precision(void)366 __naked int bpf_atomic_fetch_or_precision(void)
367 {
368 asm volatile (
369 "r1 = 8;"
370 "*(u64 *)(r10 - 8) = r1;"
371 "r2 = 0;"
372 ".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */
373 "r3 = r10;"
374 "r3 += r2;" /* mark_precise */
375 "r0 = 0;"
376 "exit;"
377 :
378 : __imm_insn(fetch_or_insn,
379 BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
380 : __clobber_all);
381 }
382
383 SEC("?raw_tp")
384 __success __log_level(2)
385 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
386 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)")
387 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
388 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
389 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_fetch_and_precision(void)390 __naked int bpf_atomic_fetch_and_precision(void)
391 {
392 asm volatile (
393 "r1 = 8;"
394 "*(u64 *)(r10 - 8) = r1;"
395 "r2 = 0;"
396 ".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */
397 "r3 = r10;"
398 "r3 += r2;" /* mark_precise */
399 "r0 = 0;"
400 "exit;"
401 :
402 : __imm_insn(fetch_and_insn,
403 BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
404 : __clobber_all);
405 }
406
407 SEC("?raw_tp")
408 __success __log_level(2)
409 __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
410 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)")
411 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
412 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
413 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_fetch_xor_precision(void)414 __naked int bpf_atomic_fetch_xor_precision(void)
415 {
416 asm volatile (
417 "r1 = 8;"
418 "*(u64 *)(r10 - 8) = r1;"
419 "r2 = 0;"
420 ".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */
421 "r3 = r10;"
422 "r3 += r2;" /* mark_precise */
423 "r0 = 0;"
424 "exit;"
425 :
426 : __imm_insn(fetch_xor_insn,
427 BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
428 : __clobber_all);
429 }
430
431 SEC("?raw_tp")
432 __success __log_level(2)
433 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10")
434 __msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
435 __msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
436 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0")
437 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
438 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_cmpxchg_precision(void)439 __naked int bpf_atomic_cmpxchg_precision(void)
440 {
441 asm volatile (
442 "r1 = 8;"
443 "*(u64 *)(r10 - 8) = r1;"
444 "r0 = 0;"
445 "r2 = 0;"
446 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
447 "r3 = r10;"
448 "r3 += r0;" /* mark_precise */
449 "r0 = 0;"
450 "exit;"
451 :
452 : __imm_insn(cmpxchg_insn,
453 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
454 : __clobber_all);
455 }
456
457 /* Regression test for dual precision: Both the fetched value (r2) and
458 * a reread of the same stack slot (r3) are tracked for precision. After
459 * the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at
460 * insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot
461 * propagates solely through the atomic fetch's load side (insn 3).
462 */
463 SEC("?raw_tp")
464 __success __log_level(2)
465 __msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)")
466 __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
467 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
468 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
469 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_fetch_add_dual_precision(void)470 __naked int bpf_atomic_fetch_add_dual_precision(void)
471 {
472 asm volatile (
473 "r1 = 8;"
474 "*(u64 *)(r10 - 8) = r1;"
475 "r2 = 0;"
476 ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
477 "r3 = *(u64 *)(r10 - 8);"
478 "r4 = r2;"
479 "r4 += r3;"
480 "r4 &= 7;"
481 "r5 = r10;"
482 "r5 += r4;" /* mark_precise */
483 "r0 = 0;"
484 "exit;"
485 :
486 : __imm_insn(fetch_add_insn,
487 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
488 : __clobber_all);
489 }
490
491 SEC("?raw_tp")
492 __success __log_level(2)
493 __msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)")
494 __msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
495 __msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
496 __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8")
497 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
498 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_atomic_cmpxchg_dual_precision(void)499 __naked int bpf_atomic_cmpxchg_dual_precision(void)
500 {
501 asm volatile (
502 "r1 = 8;"
503 "*(u64 *)(r10 - 8) = r1;"
504 "r0 = 8;"
505 "r2 = 0;"
506 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
507 "r3 = *(u64 *)(r10 - 8);"
508 "r4 = r0;"
509 "r4 += r3;"
510 "r4 &= 7;"
511 "r5 = r10;"
512 "r5 += r4;" /* mark_precise */
513 "r0 = 0;"
514 "exit;"
515 :
516 : __imm_insn(cmpxchg_insn,
517 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
518 : __clobber_all);
519 }
520
521 SEC("?raw_tp")
522 __success __log_level(2)
523 __msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
524 __msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
525 __not_msg("falling back to forcing all scalars precise")
bpf_atomic_fetch_add_map_precision(void)526 __naked int bpf_atomic_fetch_add_map_precision(void)
527 {
528 asm volatile (
529 "r1 = 0;"
530 "*(u64 *)(r10 - 8) = r1;"
531 "r2 = r10;"
532 "r2 += -8;"
533 "r1 = %[precision_map] ll;"
534 "call %[bpf_map_lookup_elem];"
535 "if r0 == 0 goto 1f;"
536 "r1 = 0;"
537 ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */
538 "r1 &= 7;"
539 "r2 = r10;"
540 "r2 += r1;" /* mark_precise */
541 "1: r0 = 0;"
542 "exit;"
543 :
544 : __imm_addr(precision_map),
545 __imm(bpf_map_lookup_elem),
546 __imm_insn(fetch_add_insn,
547 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
548 : __clobber_all);
549 }
550
551 SEC("?raw_tp")
552 __success __log_level(2)
553 __msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
554 __msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)")
555 __not_msg("falling back to forcing all scalars precise")
bpf_atomic_cmpxchg_map_precision(void)556 __naked int bpf_atomic_cmpxchg_map_precision(void)
557 {
558 asm volatile (
559 "r1 = 0;"
560 "*(u64 *)(r10 - 8) = r1;"
561 "r2 = r10;"
562 "r2 += -8;"
563 "r1 = %[precision_map] ll;"
564 "call %[bpf_map_lookup_elem];"
565 "if r0 == 0 goto 1f;"
566 "r6 = r0;"
567 "r0 = 0;"
568 "r1 = 0;"
569 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */
570 "r0 &= 7;"
571 "r2 = r10;"
572 "r2 += r0;" /* mark_precise */
573 "1: r0 = 0;"
574 "exit;"
575 :
576 : __imm_addr(precision_map),
577 __imm(bpf_map_lookup_elem),
578 __imm_insn(cmpxchg_insn,
579 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
580 : __clobber_all);
581 }
582
583 SEC("?raw_tp")
584 __success __log_level(2)
585 __msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
586 __msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)")
587 __not_msg("falling back to forcing all scalars precise")
bpf_atomic_fetch_add_32bit_precision(void)588 __naked int bpf_atomic_fetch_add_32bit_precision(void)
589 {
590 asm volatile (
591 "r1 = 0;"
592 "*(u64 *)(r10 - 8) = r1;"
593 "r2 = r10;"
594 "r2 += -8;"
595 "r1 = %[precision_map] ll;"
596 "call %[bpf_map_lookup_elem];"
597 "if r0 == 0 goto 1f;"
598 "r1 = 0;"
599 ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */
600 "r1 &= 7;"
601 "r2 = r10;"
602 "r2 += r1;" /* mark_precise */
603 "1: r0 = 0;"
604 "exit;"
605 :
606 : __imm_addr(precision_map),
607 __imm(bpf_map_lookup_elem),
608 __imm_insn(fetch_add_insn,
609 BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
610 : __clobber_all);
611 }
612
613 SEC("?raw_tp")
614 __success __log_level(2)
615 __msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
616 __msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)")
617 __not_msg("falling back to forcing all scalars precise")
bpf_atomic_cmpxchg_32bit_precision(void)618 __naked int bpf_atomic_cmpxchg_32bit_precision(void)
619 {
620 asm volatile (
621 "r1 = 0;"
622 "*(u64 *)(r10 - 8) = r1;"
623 "r2 = r10;"
624 "r2 += -8;"
625 "r1 = %[precision_map] ll;"
626 "call %[bpf_map_lookup_elem];"
627 "if r0 == 0 goto 1f;"
628 "r6 = r0;"
629 "r0 = 0;"
630 "r1 = 0;"
631 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */
632 "r0 &= 7;"
633 "r2 = r10;"
634 "r2 += r0;" /* mark_precise */
635 "1: r0 = 0;"
636 "exit;"
637 :
638 : __imm_addr(precision_map),
639 __imm(bpf_map_lookup_elem),
640 __imm_insn(cmpxchg_insn,
641 BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
642 : __clobber_all);
643 }
644
645 char _license[] SEC("license") = "GPL";
646