xref: /linux/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c (revision a6021aa24f6417416d93318bbfa022ab229c33c8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 
7 /* Check that precision marks propagate through scalar IDs.
8  * Registers r{0,1,2} have the same scalar ID.
9  * Range information is propagated for scalars sharing same ID.
10  * Check that precision mark for r0 causes precision marks for r{1,2}
11  * when range information is propagated for 'if <reg> <op> <const>' insn.
12  */
13 SEC("socket")
14 __success __log_level(2)
15 /* first 'if' branch */
16 __msg("6: (0f) r3 += r0")
17 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
18 __msg("frame0: parent state regs=r0,r1,r2 stack=:")
19 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
20 /* second 'if' branch */
21 __msg("from 4 to 5: ")
22 __msg("6: (0f) r3 += r0")
23 __msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10")
24 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
25 /* parent state already has r{0,1,2} as precise */
26 __msg("frame0: parent state regs= stack=:")
27 __flag(BPF_F_TEST_STATE_FREQ)
28 __naked void linked_regs_bpf_k(void)
29 {
30 	asm volatile (
31 	/* r0 = random number up to 0xff */
32 	"call %[bpf_ktime_get_ns];"
33 	"r0 &= 0xff;"
34 	/* tie r0.id == r1.id == r2.id */
35 	"r1 = r0;"
36 	"r2 = r0;"
37 	"if r1 > 7 goto +0;"
38 	/* force r0 to be precise, this eventually marks r1 and r2 as
39 	 * precise as well because of shared IDs
40 	 */
41 	"r3 = r10;"
42 	"r3 += r0;"
43 	"r0 = 0;"
44 	"exit;"
45 	:
46 	: __imm(bpf_ktime_get_ns)
47 	: __clobber_all);
48 }
49 
50 /* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed,
51  * check that verifier marks r{1,2} as precise while backtracking
52  * 'if r1 > ...' with r0 already marked.
53  */
54 SEC("socket")
55 __success __log_level(2)
56 __flag(BPF_F_TEST_STATE_FREQ)
57 __msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0")
58 __msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
59 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
60 __naked void linked_regs_bpf_x_src(void)
61 {
62 	asm volatile (
63 	/* r0 = random number up to 0xff */
64 	"call %[bpf_ktime_get_ns];"
65 	"r0 &= 0xff;"
66 	/* tie r0.id == r1.id == r2.id */
67 	"r1 = r0;"
68 	"r2 = r0;"
69 	"r3 = 7;"
70 	"if r1 > r3 goto +0;"
71 	/* force r0 to be precise, this eventually marks r1 and r2 as
72 	 * precise as well because of shared IDs
73 	 */
74 	"r4 = r10;"
75 	"r4 += r0;"
76 	"r0 = 0;"
77 	"exit;"
78 	:
79 	: __imm(bpf_ktime_get_ns)
80 	: __clobber_all);
81 }
82 
83 /* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed,
84  * check that verifier marks r{0,1,2} as precise while backtracking
85  * 'if r1 > r3' with r3 already marked.
86  */
87 SEC("socket")
88 __success __log_level(2)
89 __flag(BPF_F_TEST_STATE_FREQ)
90 __msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0")
91 __msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
92 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
93 __naked void linked_regs_bpf_x_dst(void)
94 {
95 	asm volatile (
96 	/* r0 = random number up to 0xff */
97 	"call %[bpf_ktime_get_ns];"
98 	"r0 &= 0xff;"
99 	/* tie r0.id == r1.id == r2.id */
100 	"r1 = r0;"
101 	"r2 = r0;"
102 	"r3 = 7;"
103 	"if r1 > r3 goto +0;"
104 	/* force r0 to be precise, this eventually marks r1 and r2 as
105 	 * precise as well because of shared IDs
106 	 */
107 	"r4 = r10;"
108 	"r4 += r3;"
109 	"r0 = 0;"
110 	"exit;"
111 	:
112 	: __imm(bpf_ktime_get_ns)
113 	: __clobber_all);
114 }
115 
116 /* Same as linked_regs_bpf_k, but break one of the
117  * links, note that r1 is absent from regs=... in __msg below.
118  */
119 SEC("socket")
120 __success __log_level(2)
121 __msg("7: (0f) r3 += r0")
122 __msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10")
123 __msg("frame0: parent state regs=r0 stack=:")
124 __msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0")
125 __msg("frame0: parent state regs=r0,r2 stack=:")
126 __flag(BPF_F_TEST_STATE_FREQ)
127 __naked void linked_regs_broken_link(void)
128 {
129 	asm volatile (
130 	/* r0 = random number up to 0xff */
131 	"call %[bpf_ktime_get_ns];"
132 	"r0 &= 0xff;"
133 	/* tie r0.id == r1.id == r2.id */
134 	"r1 = r0;"
135 	"r2 = r0;"
136 	/* break link for r1, this is the only line that differs
137 	 * compared to the previous test
138 	 */
139 	"r1 = 0;"
140 	"if r0 > 7 goto +0;"
141 	/* force r0 to be precise,
142 	 * this eventually marks r2 as precise because of shared IDs
143 	 */
144 	"r3 = r10;"
145 	"r3 += r0;"
146 	"r0 = 0;"
147 	"exit;"
148 	:
149 	: __imm(bpf_ktime_get_ns)
150 	: __clobber_all);
151 }
152 
153 /* Check that precision marks propagate through scalar IDs.
154  * Use the same scalar ID in multiple stack frames, check that
155  * precision information is propagated up the call stack.
156  */
157 SEC("socket")
158 __success __log_level(2)
159 __msg("12: (0f) r2 += r1")
160 /* Current state */
161 __msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
162 __msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
163 __msg("frame2: parent state regs=r1 stack=")
164 __msg("frame1: parent state regs= stack=")
165 __msg("frame0: parent state regs= stack=")
166 /* Parent state */
167 __msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
168 __msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
169 __msg("frame2: parent state regs=r1 stack=")
170 /* frame1.r{6,7} are marked because mark_precise_scalar_ids()
171  * looks for all registers with frame2.r1.id in the current state
172  */
173 __msg("frame1: parent state regs=r6,r7 stack=")
174 __msg("frame0: parent state regs=r6 stack=")
175 /* Parent state */
176 __msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
177 __msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
178 /* frame1.r1 is marked because of backtracking of call instruction */
179 __msg("frame1: parent state regs=r1,r6,r7 stack=")
180 __msg("frame0: parent state regs=r6 stack=")
181 /* Parent state */
182 __msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
183 __msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
184 __msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
185 __msg("frame1: parent state regs=r1 stack=")
186 __msg("frame0: parent state regs=r6 stack=")
187 /* Parent state */
188 __msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
189 __msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
190 __msg("frame0: parent state regs=r1,r6 stack=")
191 /* Parent state */
192 __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
193 __msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0")
194 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
195 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
196 __flag(BPF_F_TEST_STATE_FREQ)
197 __naked void precision_many_frames(void)
198 {
199 	asm volatile (
200 	/* r0 = random number up to 0xff */
201 	"call %[bpf_ktime_get_ns];"
202 	"r0 &= 0xff;"
203 	/* tie r0.id == r1.id == r6.id */
204 	"r1 = r0;"
205 	"r6 = r0;"
206 	"call precision_many_frames__foo;"
207 	"exit;"
208 	:
209 	: __imm(bpf_ktime_get_ns)
210 	: __clobber_all);
211 }
212 
213 static __naked __noinline __used
214 void precision_many_frames__foo(void)
215 {
216 	asm volatile (
217 	/* conflate one of the register numbers (r6) with outer frame,
218 	 * to verify that those are tracked independently
219 	 */
220 	"r6 = r1;"
221 	"r7 = r1;"
222 	"call precision_many_frames__bar;"
223 	"exit"
224 	::: __clobber_all);
225 }
226 
227 static __naked __noinline __used
228 void precision_many_frames__bar(void)
229 {
230 	asm volatile (
231 	"if r1 > 7 goto +0;"
232 	/* force r1 to be precise, this eventually marks:
233 	 * - bar frame r1
234 	 * - foo frame r{1,6,7}
235 	 * - main frame r{1,6}
236 	 */
237 	"r2 = r10;"
238 	"r2 += r1;"
239 	"r0 = 0;"
240 	"exit;"
241 	::: __clobber_all);
242 }
243 
244 /* Check that scalars with the same IDs are marked precise on stack as
245  * well as in registers.
246  */
247 SEC("socket")
248 __success __log_level(2)
249 __msg("11: (0f) r2 += r1")
250 /* foo frame */
251 __msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
252 __msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
253 __msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
254 __msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
255 __msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
256 /* main frame */
257 __msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
258 __msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
259 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
260 __flag(BPF_F_TEST_STATE_FREQ)
261 __naked void precision_stack(void)
262 {
263 	asm volatile (
264 	/* r0 = random number up to 0xff */
265 	"call %[bpf_ktime_get_ns];"
266 	"r0 &= 0xff;"
267 	/* tie r0.id == r1.id == fp[-8].id */
268 	"r1 = r0;"
269 	"*(u64*)(r10 - 8) = r1;"
270 	"call precision_stack__foo;"
271 	"r0 = 0;"
272 	"exit;"
273 	:
274 	: __imm(bpf_ktime_get_ns)
275 	: __clobber_all);
276 }
277 
278 static __naked __noinline __used
279 void precision_stack__foo(void)
280 {
281 	asm volatile (
282 	/* conflate one of the register numbers (r6) with outer frame,
283 	 * to verify that those are tracked independently
284 	 */
285 	"*(u64*)(r10 - 8) = r1;"
286 	"*(u64*)(r10 - 16) = r1;"
287 	"if r1 > 7 goto +0;"
288 	/* force r1 to be precise, this eventually marks:
289 	 * - foo frame r1,fp{-8,-16}
290 	 * - main frame r1,fp{-8}
291 	 */
292 	"r2 = r10;"
293 	"r2 += r1;"
294 	"exit"
295 	::: __clobber_all);
296 }
297 
298 /* Use two separate scalar IDs to check that these are propagated
299  * independently.
300  */
301 SEC("socket")
302 __success __log_level(2)
303 /* r{6,7} */
304 __msg("12: (0f) r3 += r7")
305 __msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10")
306 __msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0")
307 /* ... skip some insns ... */
308 __msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
309 __msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
310 /* r{8,9} */
311 __msg("13: (0f) r3 += r9")
312 __msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7")
313 /* ... skip some insns ... */
314 __msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0")
315 __msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
316 __msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
317 __flag(BPF_F_TEST_STATE_FREQ)
318 __naked void precision_two_ids(void)
319 {
320 	asm volatile (
321 	/* r6 = random number up to 0xff
322 	 * r6.id == r7.id
323 	 */
324 	"call %[bpf_ktime_get_ns];"
325 	"r0 &= 0xff;"
326 	"r6 = r0;"
327 	"r7 = r0;"
328 	/* same, but for r{8,9} */
329 	"call %[bpf_ktime_get_ns];"
330 	"r0 &= 0xff;"
331 	"r8 = r0;"
332 	"r9 = r0;"
333 	/* clear r0 id */
334 	"r0 = 0;"
335 	/* propagate equal scalars precision */
336 	"if r7 > 7 goto +0;"
337 	"if r9 > 7 goto +0;"
338 	"r3 = r10;"
339 	/* force r7 to be precise, this also marks r6 */
340 	"r3 += r7;"
341 	/* force r9 to be precise, this also marks r8 */
342 	"r3 += r9;"
343 	"exit;"
344 	:
345 	: __imm(bpf_ktime_get_ns)
346 	: __clobber_all);
347 }
348 
349 SEC("socket")
350 __success __log_level(2)
351 __flag(BPF_F_TEST_STATE_FREQ)
352 /* check thar r0 and r6 have different IDs after 'if',
353  * collect_linked_regs() can't tie more than 6 registers for a single insn.
354  */
355 __msg("8: (25) if r0 > 0x7 goto pc+0         ; R0=scalar(id=1")
356 __msg("9: (bf) r6 = r6                       ; R6_w=scalar(id=2")
357 /* check that r{0-5} are marked precise after 'if' */
358 __msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
359 __msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
360 __naked void linked_regs_too_many_regs(void)
361 {
362 	asm volatile (
363 	/* r0 = random number up to 0xff */
364 	"call %[bpf_ktime_get_ns];"
365 	"r0 &= 0xff;"
366 	/* tie r{0-6} IDs */
367 	"r1 = r0;"
368 	"r2 = r0;"
369 	"r3 = r0;"
370 	"r4 = r0;"
371 	"r5 = r0;"
372 	"r6 = r0;"
373 	/* propagate range for r{0-6} */
374 	"if r0 > 7 goto +0;"
375 	/* make r6 appear in the log */
376 	"r6 = r6;"
377 	/* force r0 to be precise,
378 	 * this would cause r{0-4} to be precise because of shared IDs
379 	 */
380 	"r7 = r10;"
381 	"r7 += r0;"
382 	"r0 = 0;"
383 	"exit;"
384 	:
385 	: __imm(bpf_ktime_get_ns)
386 	: __clobber_all);
387 }
388 
389 SEC("socket")
390 __failure __log_level(2)
391 __flag(BPF_F_TEST_STATE_FREQ)
392 __msg("regs=r7 stack= before 5: (3d) if r8 >= r0")
393 __msg("parent state regs=r0,r7,r8")
394 __msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1")
395 __msg("div by zero")
396 __naked void linked_regs_broken_link_2(void)
397 {
398 	asm volatile (
399 	"call %[bpf_get_prandom_u32];"
400 	"r7 = r0;"
401 	"r8 = r0;"
402 	"call %[bpf_get_prandom_u32];"
403 	"if r0 > 1 goto +0;"
404 	/* r7.id == r8.id,
405 	 * thus r7 precision implies r8 precision,
406 	 * which implies r0 precision because of the conditional below.
407 	 */
408 	"if r8 >= r0 goto 1f;"
409 	/* break id relation between r7 and r8 */
410 	"r8 += r8;"
411 	/* make r7 precise */
412 	"if r7 == 0 goto 1f;"
413 	"r0 /= 0;"
414 "1:"
415 	"r0 = 42;"
416 	"exit;"
417 	:
418 	: __imm(bpf_get_prandom_u32)
419 	: __clobber_all);
420 }
421 
422 /* Check that mark_chain_precision() for one of the conditional jump
423  * operands does not trigger equal scalars precision propagation.
424  */
425 SEC("socket")
426 __success __log_level(2)
427 __msg("3: (25) if r1 > 0x100 goto pc+0")
428 __msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
429 __naked void cjmp_no_linked_regs_trigger(void)
430 {
431 	asm volatile (
432 	/* r0 = random number up to 0xff */
433 	"call %[bpf_ktime_get_ns];"
434 	"r0 &= 0xff;"
435 	/* tie r0.id == r1.id */
436 	"r1 = r0;"
437 	/* the jump below would be predicted, thus r1 would be marked precise,
438 	 * this should not imply precision mark for r0
439 	 */
440 	"if r1 > 256 goto +0;"
441 	"r0 = 0;"
442 	"exit;"
443 	:
444 	: __imm(bpf_ktime_get_ns)
445 	: __clobber_all);
446 }
447 
448 /* Verify that check_ids() is used by regsafe() for scalars.
449  *
450  * r9 = ... some pointer with range X ...
451  * r6 = ... unbound scalar ID=a ...
452  * r7 = ... unbound scalar ID=b ...
453  * if (r6 > r7) goto +1
454  * r7 = r6
455  * if (r7 > X) goto exit
456  * r9 += r6
457  * ... access memory using r9 ...
458  *
459  * The memory access is safe only if r7 is bounded,
460  * which is true for one branch and not true for another.
461  */
462 SEC("socket")
463 __failure __msg("register with unbounded min value")
464 __flag(BPF_F_TEST_STATE_FREQ)
465 __naked void check_ids_in_regsafe(void)
466 {
467 	asm volatile (
468 	/* Bump allocated stack */
469 	"r1 = 0;"
470 	"*(u64*)(r10 - 8) = r1;"
471 	/* r9 = pointer to stack */
472 	"r9 = r10;"
473 	"r9 += -8;"
474 	/* r7 = ktime_get_ns() */
475 	"call %[bpf_ktime_get_ns];"
476 	"r7 = r0;"
477 	/* r6 = ktime_get_ns() */
478 	"call %[bpf_ktime_get_ns];"
479 	"r6 = r0;"
480 	/* if r6 > r7 is an unpredictable jump */
481 	"if r6 > r7 goto l1_%=;"
482 	"r7 = r6;"
483 "l1_%=:"
484 	/* if r7 > 4 ...; transfers range to r6 on one execution path
485 	 * but does not transfer on another
486 	 */
487 	"if r7 > 4 goto l2_%=;"
488 	/* Access memory at r9[r6], r6 is not always bounded */
489 	"r9 += r6;"
490 	"r0 = *(u8*)(r9 + 0);"
491 "l2_%=:"
492 	"r0 = 0;"
493 	"exit;"
494 	:
495 	: __imm(bpf_ktime_get_ns)
496 	: __clobber_all);
497 }
498 
499 /* Similar to check_ids_in_regsafe.
500  * The l0 could be reached in two states:
501  *
502  *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
503  *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
504  *
505  * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
506  * This example would be considered safe without changes to
507  * mark_chain_precision() to track scalar values with equal IDs.
508  */
509 SEC("socket")
510 __failure __msg("register with unbounded min value")
511 __flag(BPF_F_TEST_STATE_FREQ)
512 __naked void check_ids_in_regsafe_2(void)
513 {
514 	asm volatile (
515 	/* Bump allocated stack */
516 	"r1 = 0;"
517 	"*(u64*)(r10 - 8) = r1;"
518 	/* r9 = pointer to stack */
519 	"r9 = r10;"
520 	"r9 += -8;"
521 	/* r8 = ktime_get_ns() */
522 	"call %[bpf_ktime_get_ns];"
523 	"r8 = r0;"
524 	/* r7 = ktime_get_ns() */
525 	"call %[bpf_ktime_get_ns];"
526 	"r7 = r0;"
527 	/* r6 = ktime_get_ns() */
528 	"call %[bpf_ktime_get_ns];"
529 	"r6 = r0;"
530 	/* scratch .id from r0 */
531 	"r0 = 0;"
532 	/* if r6 > r7 is an unpredictable jump */
533 	"if r6 > r7 goto l1_%=;"
534 	/* tie r6 and r7 .id */
535 	"r6 = r7;"
536 "l0_%=:"
537 	/* if r7 > 4 exit(0) */
538 	"if r7 > 4 goto l2_%=;"
539 	/* Access memory at r9[r6] */
540 	"r9 += r6;"
541 	"r0 = *(u8*)(r9 + 0);"
542 "l2_%=:"
543 	"r0 = 0;"
544 	"exit;"
545 "l1_%=:"
546 	/* tie r6 and r8 .id */
547 	"r6 = r8;"
548 	"goto l0_%=;"
549 	:
550 	: __imm(bpf_ktime_get_ns)
551 	: __clobber_all);
552 }
553 
554 /* Check that scalar IDs *are not* generated on register to register
555  * assignments if source register is a constant.
556  *
557  * If such IDs *are* generated the 'l1' below would be reached in
558  * two states:
559  *
560  *   (1) r1{.id=A}, r2{.id=A}
561  *   (2) r1{.id=C}, r2{.id=C}
562  *
563  * Thus forcing 'if r1 == r2' verification twice.
564  */
565 SEC("socket")
566 __success __log_level(2)
567 __msg("11: (1d) if r3 == r4 goto pc+0")
568 __msg("frame 0: propagating r3,r4")
569 __msg("11: safe")
570 __msg("processed 15 insns")
571 __flag(BPF_F_TEST_STATE_FREQ)
572 __naked void no_scalar_id_for_const(void)
573 {
574 	asm volatile (
575 	"call %[bpf_ktime_get_ns];"
576 	/* unpredictable jump */
577 	"if r0 > 7 goto l0_%=;"
578 	/* possibly generate same scalar ids for r3 and r4 */
579 	"r1 = 0;"
580 	"r1 = r1;"
581 	"r3 = r1;"
582 	"r4 = r1;"
583 	"goto l1_%=;"
584 "l0_%=:"
585 	/* possibly generate different scalar ids for r3 and r4 */
586 	"r1 = 0;"
587 	"r2 = 0;"
588 	"r3 = r1;"
589 	"r4 = r2;"
590 "l1_%=:"
591 	/* predictable jump, marks r3 and r4 precise */
592 	"if r3 == r4 goto +0;"
593 	"r0 = 0;"
594 	"exit;"
595 	:
596 	: __imm(bpf_ktime_get_ns)
597 	: __clobber_all);
598 }
599 
600 /* Same as no_scalar_id_for_const() but for 32-bit values */
601 SEC("socket")
602 __success __log_level(2)
603 __msg("11: (1e) if w3 == w4 goto pc+0")
604 __msg("frame 0: propagating r3,r4")
605 __msg("11: safe")
606 __msg("processed 15 insns")
607 __flag(BPF_F_TEST_STATE_FREQ)
608 __naked void no_scalar_id_for_const32(void)
609 {
610 	asm volatile (
611 	"call %[bpf_ktime_get_ns];"
612 	/* unpredictable jump */
613 	"if r0 > 7 goto l0_%=;"
614 	/* possibly generate same scalar ids for r3 and r4 */
615 	"w1 = 0;"
616 	"w1 = w1;"
617 	"w3 = w1;"
618 	"w4 = w1;"
619 	"goto l1_%=;"
620 "l0_%=:"
621 	/* possibly generate different scalar ids for r3 and r4 */
622 	"w1 = 0;"
623 	"w2 = 0;"
624 	"w3 = w1;"
625 	"w4 = w2;"
626 "l1_%=:"
627 	/* predictable jump, marks r1 and r2 precise */
628 	"if w3 == w4 goto +0;"
629 	"r0 = 0;"
630 	"exit;"
631 	:
632 	: __imm(bpf_ktime_get_ns)
633 	: __clobber_all);
634 }
635 
636 /* Check that unique scalar IDs are ignored when new verifier state is
637  * compared to cached verifier state. For this test:
638  * - cached state has no id on r1
639  * - new state has a unique id on r1
640  */
641 SEC("socket")
642 __success __log_level(2)
643 __msg("6: (25) if r6 > 0x7 goto pc+1")
644 __msg("7: (57) r1 &= 255")
645 __msg("8: (bf) r2 = r10")
646 __msg("from 6 to 8: safe")
647 __msg("processed 12 insns")
648 __flag(BPF_F_TEST_STATE_FREQ)
649 __naked void ignore_unique_scalar_ids_cur(void)
650 {
651 	asm volatile (
652 	"call %[bpf_ktime_get_ns];"
653 	"r6 = r0;"
654 	"call %[bpf_ktime_get_ns];"
655 	"r0 &= 0xff;"
656 	/* r1.id == r0.id */
657 	"r1 = r0;"
658 	/* make r1.id unique */
659 	"r0 = 0;"
660 	"if r6 > 7 goto l0_%=;"
661 	/* clear r1 id, but keep the range compatible */
662 	"r1 &= 0xff;"
663 "l0_%=:"
664 	/* get here in two states:
665 	 * - first: r1 has no id (cached state)
666 	 * - second: r1 has a unique id (should be considered equivalent)
667 	 */
668 	"r2 = r10;"
669 	"r2 += r1;"
670 	"exit;"
671 	:
672 	: __imm(bpf_ktime_get_ns)
673 	: __clobber_all);
674 }
675 
676 /* Check that unique scalar IDs are ignored when new verifier state is
677  * compared to cached verifier state. For this test:
678  * - cached state has a unique id on r1
679  * - new state has no id on r1
680  */
681 SEC("socket")
682 __success __log_level(2)
683 __msg("6: (25) if r6 > 0x7 goto pc+1")
684 __msg("7: (05) goto pc+1")
685 __msg("9: (bf) r2 = r10")
686 __msg("9: safe")
687 __msg("processed 13 insns")
688 __flag(BPF_F_TEST_STATE_FREQ)
689 __naked void ignore_unique_scalar_ids_old(void)
690 {
691 	asm volatile (
692 	"call %[bpf_ktime_get_ns];"
693 	"r6 = r0;"
694 	"call %[bpf_ktime_get_ns];"
695 	"r0 &= 0xff;"
696 	/* r1.id == r0.id */
697 	"r1 = r0;"
698 	/* make r1.id unique */
699 	"r0 = 0;"
700 	"if r6 > 7 goto l1_%=;"
701 	"goto l0_%=;"
702 "l1_%=:"
703 	/* clear r1 id, but keep the range compatible */
704 	"r1 &= 0xff;"
705 "l0_%=:"
706 	/* get here in two states:
707 	 * - first: r1 has a unique id (cached state)
708 	 * - second: r1 has no id (should be considered equivalent)
709 	 */
710 	"r2 = r10;"
711 	"r2 += r1;"
712 	"exit;"
713 	:
714 	: __imm(bpf_ktime_get_ns)
715 	: __clobber_all);
716 }
717 
718 /* Check that two different scalar IDs in a verified state can't be
719  * mapped to the same scalar ID in current state.
720  */
721 SEC("socket")
722 __success __log_level(2)
723 /* The exit instruction should be reachable from two states,
724  * use two matches and "processed .. insns" to ensure this.
725  */
726 __msg("13: (95) exit")
727 __msg("13: (95) exit")
728 __msg("processed 18 insns")
729 __flag(BPF_F_TEST_STATE_FREQ)
730 __naked void two_old_ids_one_cur_id(void)
731 {
732 	asm volatile (
733 	/* Give unique scalar IDs to r{6,7} */
734 	"call %[bpf_ktime_get_ns];"
735 	"r0 &= 0xff;"
736 	"r6 = r0;"
737 	"call %[bpf_ktime_get_ns];"
738 	"r0 &= 0xff;"
739 	"r7 = r0;"
740 	"r0 = 0;"
741 	/* Maybe make r{6,7} IDs identical */
742 	"if r6 > r7 goto l0_%=;"
743 	"goto l1_%=;"
744 "l0_%=:"
745 	"r6 = r7;"
746 "l1_%=:"
747 	/* Mark r{6,7} precise.
748 	 * Get here in two states:
749 	 * - first:  r6{.id=A}, r7{.id=B} (cached state)
750 	 * - second: r6{.id=A}, r7{.id=A}
751 	 * Currently we don't want to consider such states equivalent.
752 	 * Thus "exit;" would be verified twice.
753 	 */
754 	"r2 = r10;"
755 	"r2 += r6;"
756 	"r2 += r7;"
757 	"exit;"
758 	:
759 	: __imm(bpf_ktime_get_ns)
760 	: __clobber_all);
761 }
762 
763 SEC("socket")
764 /* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
765 __flag(BPF_F_TEST_RND_HI32)
766 __success
767 /* This test was added because of a bug in verifier.c:sync_linked_regs(),
768  * upon range propagation it destroyed subreg_def marks for registers.
769  * The subreg_def mark is used to decide whether zero extension instructions
770  * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
771  * also causes generation of statements to randomize upper halves of
772  * read registers.
773  *
774  * The test is written in a way to return an upper half of a register
775  * that is affected by range propagation and must have it's subreg_def
776  * preserved. This gives a return value of 0 and leads to undefined
777  * return value if subreg_def mark is not preserved.
778  */
779 __retval(0)
780 /* Check that verifier believes r1/r0 are zero at exit */
781 __log_level(2)
782 __msg("4: (77) r1 >>= 32                     ; R1_w=0")
783 __msg("5: (bf) r0 = r1                       ; R0_w=0 R1_w=0")
784 __msg("6: (95) exit")
785 __msg("from 3 to 4")
786 __msg("4: (77) r1 >>= 32                     ; R1_w=0")
787 __msg("5: (bf) r0 = r1                       ; R0_w=0 R1_w=0")
788 __msg("6: (95) exit")
789 /* Verify that statements to randomize upper half of r1 had not been
790  * generated.
791  */
792 __xlated("call unknown")
793 __xlated("r0 &= 2147483647")
794 __xlated("w1 = w0")
795 /* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
796  * are the only CI archs that do not need zero extension for subregs.
797  */
798 #if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
799 __xlated("w1 = w1")
800 #endif
801 __xlated("if w0 < 0xa goto pc+0")
802 __xlated("r1 >>= 32")
803 __xlated("r0 = r1")
804 __xlated("exit")
805 __naked void linked_regs_and_subreg_def(void)
806 {
807 	asm volatile (
808 	"call %[bpf_ktime_get_ns];"
809 	/* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
810 	 * assign same IDs to registers.
811 	 */
812 	"r0 &= 0x7fffffff;"
813 	/* link w1 and w0 via ID */
814 	"w1 = w0;"
815 	/* 'if' statement propagates range info from w0 to w1,
816 	 * but should not affect w1->subreg_def property.
817 	 */
818 	"if w0 < 10 goto +0;"
819 	/* r1 is read here, on archs that require subreg zero
820 	 * extension this would cause zext patch generation.
821 	 */
822 	"r1 >>= 32;"
823 	"r0 = r1;"
824 	"exit;"
825 	:
826 	: __imm(bpf_ktime_get_ns)
827 	: __clobber_all);
828 }
829 
830 char _license[] SEC("license") = "GPL";
831