xref: /linux/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c (revision 8b7f4cd3ac300cad4446eeb4c9eb69d02ef52d6c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 
7 /* Check that precision marks propagate through scalar IDs.
8  * Registers r{0,1,2} have the same scalar ID.
9  * Range information is propagated for scalars sharing same ID.
10  * Check that precision mark for r0 causes precision marks for r{1,2}
11  * when range information is propagated for 'if <reg> <op> <const>' insn.
12  */
13 SEC("socket")
14 __success __log_level(2)
15 /* first 'if' branch */
16 __msg("6: (0f) r3 += r0")
17 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
18 __msg("frame0: parent state regs=r0,r1,r2 stack=:")
19 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
20 /* second 'if' branch */
21 __msg("from 4 to 5: ")
22 __msg("6: (0f) r3 += r0")
23 __msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10")
24 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
25 /* parent state already has r{0,1,2} as precise */
26 __msg("frame0: parent state regs= stack=:")
__flag(BPF_F_TEST_STATE_FREQ)27 __flag(BPF_F_TEST_STATE_FREQ)
28 __naked void linked_regs_bpf_k(void)
29 {
30 	asm volatile (
31 	/* r0 = random number up to 0xff */
32 	"call %[bpf_ktime_get_ns];"
33 	"r0 &= 0xff;"
34 	/* tie r0.id == r1.id == r2.id */
35 	"r1 = r0;"
36 	"r2 = r0;"
37 	"if r1 > 7 goto +0;"
38 	/* force r0 to be precise, this eventually marks r1 and r2 as
39 	 * precise as well because of shared IDs
40 	 */
41 	"r3 = r10;"
42 	"r3 += r0;"
43 	/* Mark r1 and r2 as alive. */
44 	"r1 = r1;"
45 	"r2 = r2;"
46 	"r0 = 0;"
47 	"exit;"
48 	:
49 	: __imm(bpf_ktime_get_ns)
50 	: __clobber_all);
51 }
52 
53 /* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed,
54  * check that verifier marks r{1,2} as precise while backtracking
55  * 'if r1 > ...' with r0 already marked.
56  */
57 SEC("socket")
58 __success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)59 __flag(BPF_F_TEST_STATE_FREQ)
60 __msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0")
61 __msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
62 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
63 __naked void linked_regs_bpf_x_src(void)
64 {
65 	asm volatile (
66 	/* r0 = random number up to 0xff */
67 	"call %[bpf_ktime_get_ns];"
68 	"r0 &= 0xff;"
69 	/* tie r0.id == r1.id == r2.id */
70 	"r1 = r0;"
71 	"r2 = r0;"
72 	"r3 = 7;"
73 	"if r1 > r3 goto +0;"
74 	/* force r0 to be precise, this eventually marks r1 and r2 as
75 	 * precise as well because of shared IDs
76 	 */
77 	"r4 = r10;"
78 	"r4 += r0;"
79 	/* Mark r1 and r2 as alive. */
80 	"r1 = r1;"
81 	"r2 = r2;"
82 	"r0 = 0;"
83 	"exit;"
84 	:
85 	: __imm(bpf_ktime_get_ns)
86 	: __clobber_all);
87 }
88 
89 /* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed,
90  * check that verifier marks r{0,1,2} as precise while backtracking
91  * 'if r1 > r3' with r3 already marked.
92  */
93 SEC("socket")
94 __success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)95 __flag(BPF_F_TEST_STATE_FREQ)
96 __msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0")
97 __msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
98 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
99 __naked void linked_regs_bpf_x_dst(void)
100 {
101 	asm volatile (
102 	/* r0 = random number up to 0xff */
103 	"call %[bpf_ktime_get_ns];"
104 	"r0 &= 0xff;"
105 	/* tie r0.id == r1.id == r2.id */
106 	"r1 = r0;"
107 	"r2 = r0;"
108 	"r3 = 7;"
109 	"if r1 > r3 goto +0;"
110 	/* force r0 to be precise, this eventually marks r1 and r2 as
111 	 * precise as well because of shared IDs
112 	 */
113 	"r4 = r10;"
114 	"r4 += r3;"
115 	/* Mark r1 and r2 as alive. */
116 	"r0 = r0;"
117 	"r1 = r1;"
118 	"r2 = r2;"
119 	"r0 = 0;"
120 	"exit;"
121 	:
122 	: __imm(bpf_ktime_get_ns)
123 	: __clobber_all);
124 }
125 
126 /* Same as linked_regs_bpf_k, but break one of the
127  * links, note that r1 is absent from regs=... in __msg below.
128  */
129 SEC("socket")
130 __success __log_level(2)
131 __msg("7: (0f) r3 += r0")
132 __msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10")
133 __msg("frame0: parent state regs=r0 stack=:")
134 __msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0")
135 __msg("frame0: parent state regs=r0,r2 stack=:")
__flag(BPF_F_TEST_STATE_FREQ)136 __flag(BPF_F_TEST_STATE_FREQ)
137 __naked void linked_regs_broken_link(void)
138 {
139 	asm volatile (
140 	/* r0 = random number up to 0xff */
141 	"call %[bpf_ktime_get_ns];"
142 	"r0 &= 0xff;"
143 	/* tie r0.id == r1.id == r2.id */
144 	"r1 = r0;"
145 	"r2 = r0;"
146 	/* break link for r1, this is the only line that differs
147 	 * compared to the previous test
148 	 */
149 	"r1 = 0;"
150 	"if r0 > 7 goto +0;"
151 	/* force r0 to be precise,
152 	 * this eventually marks r2 as precise because of shared IDs
153 	 */
154 	"r3 = r10;"
155 	"r3 += r0;"
156 	/* Mark r1 and r2 as alive. */
157 	"r1 = r1;"
158 	"r2 = r2;"
159 	"r0 = 0;"
160 	"exit;"
161 	:
162 	: __imm(bpf_ktime_get_ns)
163 	: __clobber_all);
164 }
165 
166 /* Check that precision marks propagate through scalar IDs.
167  * Use the same scalar ID in multiple stack frames, check that
168  * precision information is propagated up the call stack.
169  */
170 SEC("socket")
171 __success __log_level(2)
172 __msg("17: (0f) r2 += r1")
173 /* Current state */
174 __msg("frame2: last_idx 17 first_idx 14 subseq_idx -1 ")
175 __msg("frame2: regs=r1 stack= before 16: (bf) r2 = r10")
176 __msg("frame2: parent state regs=r1 stack=")
177 __msg("frame1: parent state regs= stack=")
178 __msg("frame0: parent state regs= stack=")
179 /* Parent state */
180 __msg("frame2: last_idx 13 first_idx 13 subseq_idx 14 ")
181 __msg("frame2: regs=r1 stack= before 13: (25) if r1 > 0x7 goto pc+0")
182 __msg("frame2: parent state regs=r1 stack=")
183 /* frame1.r{6,7} are marked because mark_precise_scalar_ids()
184  * looks for all registers with frame2.r1.id in the current state
185  */
186 __msg("frame1: parent state regs=r6,r7 stack=")
187 __msg("frame0: parent state regs=r6 stack=")
188 /* Parent state */
189 __msg("frame2: last_idx 9 first_idx 9 subseq_idx 13")
190 __msg("frame2: regs=r1 stack= before 9: (85) call pc+3")
191 /* frame1.r1 is marked because of backtracking of call instruction */
192 __msg("frame1: parent state regs=r1,r6,r7 stack=")
193 __msg("frame0: parent state regs=r6 stack=")
194 /* Parent state */
195 __msg("frame1: last_idx 8 first_idx 7 subseq_idx 9")
196 __msg("frame1: regs=r1,r6,r7 stack= before 8: (bf) r7 = r1")
197 __msg("frame1: regs=r1,r6 stack= before 7: (bf) r6 = r1")
198 __msg("frame1: parent state regs=r1 stack=")
199 __msg("frame0: parent state regs=r6 stack=")
200 /* Parent state */
201 __msg("frame1: last_idx 4 first_idx 4 subseq_idx 7")
202 __msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
203 __msg("frame0: parent state regs=r1,r6 stack=")
204 /* Parent state */
205 __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
206 __msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0")
207 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
208 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)209 __flag(BPF_F_TEST_STATE_FREQ)
210 __naked void precision_many_frames(void)
211 {
212 	asm volatile (
213 	/* r0 = random number up to 0xff */
214 	"call %[bpf_ktime_get_ns];"
215 	"r0 &= 0xff;"
216 	/* tie r0.id == r1.id == r6.id */
217 	"r1 = r0;"
218 	"r6 = r0;"
219 	"call precision_many_frames__foo;"
220 	"r6 = r6;" /* mark r6 as live */
221 	"exit;"
222 	:
223 	: __imm(bpf_ktime_get_ns)
224 	: __clobber_all);
225 }
226 
227 static __naked __noinline __used
precision_many_frames__foo(void)228 void precision_many_frames__foo(void)
229 {
230 	asm volatile (
231 	/* conflate one of the register numbers (r6) with outer frame,
232 	 * to verify that those are tracked independently
233 	 */
234 	"r6 = r1;"
235 	"r7 = r1;"
236 	"call precision_many_frames__bar;"
237 	"r6 = r6;" /* mark r6 as live */
238 	"r7 = r7;" /* mark r7 as live */
239 	"exit"
240 	::: __clobber_all);
241 }
242 
243 static __naked __noinline __used
precision_many_frames__bar(void)244 void precision_many_frames__bar(void)
245 {
246 	asm volatile (
247 	"if r1 > 7 goto +0;"
248 	"r6 = 0;" /* mark r6 as live */
249 	"r7 = 0;" /* mark r7 as live */
250 	/* force r1 to be precise, this eventually marks:
251 	 * - bar frame r1
252 	 * - foo frame r{1,6,7}
253 	 * - main frame r{1,6}
254 	 */
255 	"r2 = r10;"
256 	"r2 += r1;"
257 	"r0 = 0;"
258 	"exit;"
259 	::: __clobber_all);
260 }
261 
262 /* Check that scalars with the same IDs are marked precise on stack as
263  * well as in registers.
264  */
265 SEC("socket")
266 __success __log_level(2)
267 __msg("11: (0f) r2 += r1")
268 /* foo frame */
269 __msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
270 __msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
271 __msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
272 __msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
273 __msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
274 /* main frame */
275 __msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
276 __msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
277 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)278 __flag(BPF_F_TEST_STATE_FREQ)
279 __naked void precision_stack(void)
280 {
281 	asm volatile (
282 	/* r0 = random number up to 0xff */
283 	"call %[bpf_ktime_get_ns];"
284 	"r0 &= 0xff;"
285 	/* tie r0.id == r1.id == fp[-8].id */
286 	"r1 = r0;"
287 	"*(u64*)(r10 - 8) = r1;"
288 	"call precision_stack__foo;"
289 	"r0 = 0;"
290 	"exit;"
291 	:
292 	: __imm(bpf_ktime_get_ns)
293 	: __clobber_all);
294 }
295 
296 static __naked __noinline __used
precision_stack__foo(void)297 void precision_stack__foo(void)
298 {
299 	asm volatile (
300 	/* conflate one of the register numbers (r6) with outer frame,
301 	 * to verify that those are tracked independently
302 	 */
303 	"*(u64*)(r10 - 8) = r1;"
304 	"*(u64*)(r10 - 16) = r1;"
305 	"if r1 > 7 goto +0;"
306 	/* force r1 to be precise, this eventually marks:
307 	 * - foo frame r1,fp{-8,-16}
308 	 * - main frame r1,fp{-8}
309 	 */
310 	"r2 = r10;"
311 	"r2 += r1;"
312 	"exit"
313 	::: __clobber_all);
314 }
315 
316 /* Use two separate scalar IDs to check that these are propagated
317  * independently.
318  */
319 SEC("socket")
320 __success __log_level(2)
321 /* r{6,7} */
322 __msg("12: (0f) r3 += r7")
323 __msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10")
324 __msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0")
325 /* ... skip some insns ... */
326 __msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
327 __msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
328 /* r{8,9} */
329 __msg("13: (0f) r3 += r9")
330 __msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7")
331 /* ... skip some insns ... */
332 __msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0")
333 __msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
334 __msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
__flag(BPF_F_TEST_STATE_FREQ)335 __flag(BPF_F_TEST_STATE_FREQ)
336 __naked void precision_two_ids(void)
337 {
338 	asm volatile (
339 	/* r6 = random number up to 0xff
340 	 * r6.id == r7.id
341 	 */
342 	"call %[bpf_ktime_get_ns];"
343 	"r0 &= 0xff;"
344 	"r6 = r0;"
345 	"r7 = r0;"
346 	/* same, but for r{8,9} */
347 	"call %[bpf_ktime_get_ns];"
348 	"r0 &= 0xff;"
349 	"r8 = r0;"
350 	"r9 = r0;"
351 	/* clear r0 id */
352 	"r0 = 0;"
353 	/* propagate equal scalars precision */
354 	"if r7 > 7 goto +0;"
355 	"if r9 > 7 goto +0;"
356 	"r3 = r10;"
357 	/* force r7 to be precise, this also marks r6 */
358 	"r3 += r7;"
359 	/* force r9 to be precise, this also marks r8 */
360 	"r3 += r9;"
361 	"r6 = r6;" /* mark r6 as live */
362 	"r8 = r8;" /* mark r8 as live */
363 	"exit;"
364 	:
365 	: __imm(bpf_ktime_get_ns)
366 	: __clobber_all);
367 }
368 
369 SEC("socket")
370 __success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)371 __flag(BPF_F_TEST_STATE_FREQ)
372 /* check that r0 and r6 have different IDs after 'if',
373  * collect_linked_regs() can't tie more than 6 registers for a single insn.
374  */
375 __msg("8: (25) if r0 > 0x7 goto pc+0         ; R0=scalar(id=1")
376 __msg("14: (bf) r6 = r6                      ; R6=scalar(id=2")
377 /* check that r{0-5} are marked precise after 'if' */
378 __msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
379 __msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
380 __naked void linked_regs_too_many_regs(void)
381 {
382 	asm volatile (
383 	/* r0 = random number up to 0xff */
384 	"call %[bpf_ktime_get_ns];"
385 	"r0 &= 0xff;"
386 	/* tie r{0-6} IDs */
387 	"r1 = r0;"
388 	"r2 = r0;"
389 	"r3 = r0;"
390 	"r4 = r0;"
391 	"r5 = r0;"
392 	"r6 = r0;"
393 	/* propagate range for r{0-6} */
394 	"if r0 > 7 goto +0;"
395 	/* keep r{1-5} live */
396 	"r1 = r1;"
397 	"r2 = r2;"
398 	"r3 = r3;"
399 	"r4 = r4;"
400 	"r5 = r5;"
401 	/* make r6 appear in the log */
402 	"r6 = r6;"
403 	/* force r0 to be precise,
404 	 * this would cause r{0-4} to be precise because of shared IDs
405 	 */
406 	"r7 = r10;"
407 	"r7 += r0;"
408 	"r0 = 0;"
409 	"exit;"
410 	:
411 	: __imm(bpf_ktime_get_ns)
412 	: __clobber_all);
413 }
414 
415 SEC("socket")
416 __failure __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)417 __flag(BPF_F_TEST_STATE_FREQ)
418 __msg("regs=r7 stack= before 5: (3d) if r8 >= r0")
419 __msg("parent state regs=r0,r7,r8")
420 __msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1")
421 __msg("div by zero")
422 __naked void linked_regs_broken_link_2(void)
423 {
424 	asm volatile (
425 	"call %[bpf_get_prandom_u32];"
426 	"r7 = r0;"
427 	"r8 = r0;"
428 	"call %[bpf_get_prandom_u32];"
429 	"if r0 > 1 goto +0;"
430 	/* r7.id == r8.id,
431 	 * thus r7 precision implies r8 precision,
432 	 * which implies r0 precision because of the conditional below.
433 	 */
434 	"if r8 >= r0 goto 1f;"
435 	/* break id relation between r7 and r8 */
436 	"r8 += r8;"
437 	/* make r7 precise */
438 	"if r7 == 0 goto 1f;"
439 	"r0 /= 0;"
440 "1:"
441 	"r0 = 42;"
442 	"exit;"
443 	:
444 	: __imm(bpf_get_prandom_u32)
445 	: __clobber_all);
446 }
447 
448 /* Check that mark_chain_precision() for one of the conditional jump
449  * operands does not trigger equal scalars precision propagation.
450  */
451 SEC("socket")
452 __success __log_level(2)
453 __msg("3: (25) if r1 > 0x100 goto pc+0")
454 __msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
cjmp_no_linked_regs_trigger(void)455 __naked void cjmp_no_linked_regs_trigger(void)
456 {
457 	asm volatile (
458 	/* r0 = random number up to 0xff */
459 	"call %[bpf_ktime_get_ns];"
460 	"r0 &= 0xff;"
461 	/* tie r0.id == r1.id */
462 	"r1 = r0;"
463 	/* the jump below would be predicted, thus r1 would be marked precise,
464 	 * this should not imply precision mark for r0
465 	 */
466 	"if r1 > 256 goto +0;"
467 	"r0 = 0;"
468 	"exit;"
469 	:
470 	: __imm(bpf_ktime_get_ns)
471 	: __clobber_all);
472 }
473 
474 /* Verify that check_ids() is used by regsafe() for scalars.
475  *
476  * r9 = ... some pointer with range X ...
477  * r6 = ... unbound scalar ID=a ...
478  * r7 = ... unbound scalar ID=b ...
479  * if (r6 > r7) goto +1
480  * r7 = r6
481  * if (r7 > X) goto exit
482  * r9 += r6
483  * ... access memory using r9 ...
484  *
485  * The memory access is safe only if r7 is bounded,
486  * which is true for one branch and not true for another.
487  */
488 SEC("socket")
489 __failure __msg("register with unbounded min value")
__flag(BPF_F_TEST_STATE_FREQ)490 __flag(BPF_F_TEST_STATE_FREQ)
491 __naked void check_ids_in_regsafe(void)
492 {
493 	asm volatile (
494 	/* Bump allocated stack */
495 	"r1 = 0;"
496 	"*(u64*)(r10 - 8) = r1;"
497 	/* r9 = pointer to stack */
498 	"r9 = r10;"
499 	"r9 += -8;"
500 	/* r7 = ktime_get_ns() */
501 	"call %[bpf_ktime_get_ns];"
502 	"r7 = r0;"
503 	/* r6 = ktime_get_ns() */
504 	"call %[bpf_ktime_get_ns];"
505 	"r6 = r0;"
506 	/* if r6 > r7 is an unpredictable jump */
507 	"if r6 > r7 goto l1_%=;"
508 	"r7 = r6;"
509 "l1_%=:"
510 	/* if r7 > 4 ...; transfers range to r6 on one execution path
511 	 * but does not transfer on another
512 	 */
513 	"if r7 > 4 goto l2_%=;"
514 	/* Access memory at r9[r6], r6 is not always bounded */
515 	"r9 += r6;"
516 	"r0 = *(u8*)(r9 + 0);"
517 "l2_%=:"
518 	"r0 = 0;"
519 	"exit;"
520 	:
521 	: __imm(bpf_ktime_get_ns)
522 	: __clobber_all);
523 }
524 
525 /* Similar to check_ids_in_regsafe.
526  * The l0 could be reached in two states:
527  *
528  *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
529  *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
530  *
531  * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
532  * This example would be considered safe without changes to
533  * mark_chain_precision() to track scalar values with equal IDs.
534  */
535 SEC("socket")
536 __failure __msg("register with unbounded min value")
__flag(BPF_F_TEST_STATE_FREQ)537 __flag(BPF_F_TEST_STATE_FREQ)
538 __naked void check_ids_in_regsafe_2(void)
539 {
540 	asm volatile (
541 	/* Bump allocated stack */
542 	"r1 = 0;"
543 	"*(u64*)(r10 - 8) = r1;"
544 	/* r9 = pointer to stack */
545 	"r9 = r10;"
546 	"r9 += -16;"
547 	/* r8 = ktime_get_ns() */
548 	"call %[bpf_ktime_get_ns];"
549 	"r8 = r0;"
550 	/* r7 = ktime_get_ns() */
551 	"call %[bpf_ktime_get_ns];"
552 	"r7 = r0;"
553 	/* r6 = ktime_get_ns() */
554 	"call %[bpf_ktime_get_ns];"
555 	"r6 = r0;"
556 	/* scratch .id from r0 */
557 	"r0 = 0;"
558 	/* if r6 > r7 is an unpredictable jump */
559 	"if r6 > r7 goto l1_%=;"
560 	/* tie r6 and r7 .id */
561 	"r6 = r7;"
562 "l0_%=:"
563 	/* if r7 > 4 exit(0) */
564 	"if r7 > 4 goto l2_%=;"
565 	/* Access memory at r9[r6] */
566 	"r9 += r6;"
567 	"r9 += r7;"
568 	"r9 += r8;"
569 	"r0 = *(u8*)(r9 + 0);"
570 "l2_%=:"
571 	"r0 = 0;"
572 	"exit;"
573 "l1_%=:"
574 	/* tie r6 and r8 .id */
575 	"r6 = r8;"
576 	"goto l0_%=;"
577 	:
578 	: __imm(bpf_ktime_get_ns)
579 	: __clobber_all);
580 }
581 
582 /* Check that scalar IDs *are not* generated on register to register
583  * assignments if source register is a constant.
584  *
585  * If such IDs *are* generated the 'l1' below would be reached in
586  * two states:
587  *
588  *   (1) r1{.id=A}, r2{.id=A}
589  *   (2) r1{.id=C}, r2{.id=C}
590  *
591  * Thus forcing 'if r1 == r2' verification twice.
592  */
593 SEC("socket")
594 __success __log_level(2)
595 __msg("11: (1d) if r3 == r4 goto pc+0")
596 __msg("frame 0: propagating r3,r4")
597 __msg("11: safe")
598 __msg("processed 15 insns")
__flag(BPF_F_TEST_STATE_FREQ)599 __flag(BPF_F_TEST_STATE_FREQ)
600 __naked void no_scalar_id_for_const(void)
601 {
602 	asm volatile (
603 	"call %[bpf_ktime_get_ns];"
604 	/* unpredictable jump */
605 	"if r0 > 7 goto l0_%=;"
606 	/* possibly generate same scalar ids for r3 and r4 */
607 	"r1 = 0;"
608 	"r1 = r1;"
609 	"r3 = r1;"
610 	"r4 = r1;"
611 	"goto l1_%=;"
612 "l0_%=:"
613 	/* possibly generate different scalar ids for r3 and r4 */
614 	"r1 = 0;"
615 	"r2 = 0;"
616 	"r3 = r1;"
617 	"r4 = r2;"
618 "l1_%=:"
619 	/* predictable jump, marks r3 and r4 precise */
620 	"if r3 == r4 goto +0;"
621 	"r0 = 0;"
622 	"exit;"
623 	:
624 	: __imm(bpf_ktime_get_ns)
625 	: __clobber_all);
626 }
627 
628 /* Same as no_scalar_id_for_const() but for 32-bit values */
629 SEC("socket")
630 __success __log_level(2)
631 __msg("11: (1e) if w3 == w4 goto pc+0")
632 __msg("frame 0: propagating r3,r4")
633 __msg("11: safe")
634 __msg("processed 15 insns")
__flag(BPF_F_TEST_STATE_FREQ)635 __flag(BPF_F_TEST_STATE_FREQ)
636 __naked void no_scalar_id_for_const32(void)
637 {
638 	asm volatile (
639 	"call %[bpf_ktime_get_ns];"
640 	/* unpredictable jump */
641 	"if r0 > 7 goto l0_%=;"
642 	/* possibly generate same scalar ids for r3 and r4 */
643 	"w1 = 0;"
644 	"w1 = w1;"
645 	"w3 = w1;"
646 	"w4 = w1;"
647 	"goto l1_%=;"
648 "l0_%=:"
649 	/* possibly generate different scalar ids for r3 and r4 */
650 	"w1 = 0;"
651 	"w2 = 0;"
652 	"w3 = w1;"
653 	"w4 = w2;"
654 "l1_%=:"
655 	/* predictable jump, marks r1 and r2 precise */
656 	"if w3 == w4 goto +0;"
657 	"r0 = 0;"
658 	"exit;"
659 	:
660 	: __imm(bpf_ktime_get_ns)
661 	: __clobber_all);
662 }
663 
664 /* Check that unique scalar IDs are ignored when new verifier state is
665  * compared to cached verifier state. For this test:
666  * - cached state has no id on r1
667  * - new state has a unique id on r1
668  */
669 SEC("socket")
670 __success __log_level(2)
671 __msg("6: (25) if r6 > 0x7 goto pc+1")
672 __msg("7: (57) r1 &= 255")
673 __msg("8: (bf) r2 = r10")
674 __msg("from 6 to 8: safe")
675 __msg("processed 12 insns")
__flag(BPF_F_TEST_STATE_FREQ)676 __flag(BPF_F_TEST_STATE_FREQ)
677 __naked void ignore_unique_scalar_ids_cur(void)
678 {
679 	asm volatile (
680 	"call %[bpf_ktime_get_ns];"
681 	"r6 = r0;"
682 	"call %[bpf_ktime_get_ns];"
683 	"r0 &= 0xff;"
684 	/* r1.id == r0.id */
685 	"r1 = r0;"
686 	/* make r1.id unique */
687 	"r0 = 0;"
688 	"if r6 > 7 goto l0_%=;"
689 	/* clear r1 id, but keep the range compatible */
690 	"r1 &= 0xff;"
691 "l0_%=:"
692 	/* get here in two states:
693 	 * - first: r1 has no id (cached state)
694 	 * - second: r1 has a unique id (should be considered equivalent)
695 	 */
696 	"r2 = r10;"
697 	"r2 += r1;"
698 	"exit;"
699 	:
700 	: __imm(bpf_ktime_get_ns)
701 	: __clobber_all);
702 }
703 
704 /* Check that unique scalar IDs are ignored when new verifier state is
705  * compared to cached verifier state. For this test:
706  * - cached state has a unique id on r1
707  * - new state has no id on r1
708  */
709 SEC("socket")
710 __success __log_level(2)
711 __msg("6: (25) if r6 > 0x7 goto pc+1")
712 __msg("7: (05) goto pc+1")
713 __msg("9: (bf) r2 = r10")
714 __msg("9: safe")
715 __msg("processed 13 insns")
__flag(BPF_F_TEST_STATE_FREQ)716 __flag(BPF_F_TEST_STATE_FREQ)
717 __naked void ignore_unique_scalar_ids_old(void)
718 {
719 	asm volatile (
720 	"call %[bpf_ktime_get_ns];"
721 	"r6 = r0;"
722 	"call %[bpf_ktime_get_ns];"
723 	"r0 &= 0xff;"
724 	/* r1.id == r0.id */
725 	"r1 = r0;"
726 	/* make r1.id unique */
727 	"r0 = 0;"
728 	"if r6 > 7 goto l1_%=;"
729 	"goto l0_%=;"
730 "l1_%=:"
731 	/* clear r1 id, but keep the range compatible */
732 	"r1 &= 0xff;"
733 "l0_%=:"
734 	/* get here in two states:
735 	 * - first: r1 has a unique id (cached state)
736 	 * - second: r1 has no id (should be considered equivalent)
737 	 */
738 	"r2 = r10;"
739 	"r2 += r1;"
740 	"exit;"
741 	:
742 	: __imm(bpf_ktime_get_ns)
743 	: __clobber_all);
744 }
745 
746 /* Check that two registers with 0 scalar IDs in a verified state can be mapped
747  * to the same scalar ID in current state.
748  */
749 SEC("socket")
750 __success __log_level(2)
751 /* The states should be equivalent on reaching insn 12.
752  */
753 __msg("12: safe")
754 __msg("processed 17 insns")
__flag(BPF_F_TEST_STATE_FREQ)755 __flag(BPF_F_TEST_STATE_FREQ)
756 __naked void two_nil_old_ids_one_cur_id(void)
757 {
758 	asm volatile (
759 	/* Give unique scalar IDs to r{6,7} */
760 	"call %[bpf_ktime_get_ns];"
761 	"r0 &= 0xff;"
762 	"r6 = r0;"
763 	"r6 *= 1;"
764 	"call %[bpf_ktime_get_ns];"
765 	"r0 &= 0xff;"
766 	"r7 = r0;"
767 	"r7 *= 1;"
768 	"r0 = 0;"
769 	/* Maybe make r{6,7} IDs identical */
770 	"if r6 > r7 goto l0_%=;"
771 	"goto l1_%=;"
772 "l0_%=:"
773 	"r6 = r7;"
774 "l1_%=:"
775 	/* Mark r{6,7} precise.
776 	 * Get here in two states:
777 	 * - first:  r6{.id=0}, r7{.id=0} (cached state)
778 	 * - second: r6{.id=A}, r7{.id=A}
779 	 * Verifier considers such states equivalent.
780 	 * Thus "exit;" would be verified only once.
781 	 */
782 	"r2 = r10;"
783 	"r2 += r6;"
784 	"r2 += r7;"
785 	"exit;"
786 	:
787 	: __imm(bpf_ktime_get_ns)
788 	: __clobber_all);
789 }
790 
791 /* Check that two different scalar IDs in a verified state can't be
792  * mapped to the same scalar ID in current state.
793  */
794 SEC("socket")
795 __success __log_level(2)
796 /* The exit instruction should be reachable from two states,
797  * use two matches and "processed .. insns" to ensure this.
798  */
799 __msg("15: (95) exit")
800 __msg("15: (95) exit")
801 __msg("processed 20 insns")
__flag(BPF_F_TEST_STATE_FREQ)802 __flag(BPF_F_TEST_STATE_FREQ)
803 __naked void two_old_ids_one_cur_id(void)
804 {
805 	asm volatile (
806 	/* Give unique scalar IDs to r{6,7} */
807 	"call %[bpf_ktime_get_ns];"
808 	"r0 &= 0xff;"
809 	"r6 = r0;"
810 	"r8 = r0;"
811 	"call %[bpf_ktime_get_ns];"
812 	"r0 &= 0xff;"
813 	"r7 = r0;"
814 	"r9 = r0;"
815 	"r0 = 0;"
816 	/* Maybe make r{6,7} IDs identical */
817 	"if r6 > r7 goto l0_%=;"
818 	"goto l1_%=;"
819 "l0_%=:"
820 	"r6 = r7;"
821 "l1_%=:"
822 	/* Mark r{6,7} precise.
823 	 * Get here in two states:
824 	 * - first:  r6{.id=A}, r7{.id=B} (cached state)
825 	 * - second: r6{.id=A}, r7{.id=A}
826 	 * Currently we don't want to consider such states equivalent.
827 	 * Thus "exit;" would be verified twice.
828 	 */
829 	"r2 = r10;"
830 	"r2 += r6;"
831 	"r2 += r7;"
832 	"exit;"
833 	:
834 	: __imm(bpf_ktime_get_ns)
835 	: __clobber_all);
836 }
837 
838 SEC("socket")
839 /* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
__flag(BPF_F_TEST_RND_HI32)840 __flag(BPF_F_TEST_RND_HI32)
841 __success
842 /* This test was added because of a bug in verifier.c:sync_linked_regs(),
843  * upon range propagation it destroyed subreg_def marks for registers.
844  * The subreg_def mark is used to decide whether zero extension instructions
845  * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
846  * also causes generation of statements to randomize upper halves of
847  * read registers.
848  *
849  * The test is written in a way to return an upper half of a register
850  * that is affected by range propagation and must have it's subreg_def
851  * preserved. This gives a return value of 0 and leads to undefined
852  * return value if subreg_def mark is not preserved.
853  */
854 __retval(0)
855 /* Check that verifier believes r1/r0 are zero at exit */
856 __log_level(2)
857 __msg("4: (77) r1 >>= 32                     ; R1=0")
858 __msg("5: (bf) r0 = r1                       ; R0=0 R1=0")
859 __msg("6: (95) exit")
860 __msg("from 3 to 4")
861 __msg("4: (77) r1 >>= 32                     ; R1=0")
862 __msg("5: (bf) r0 = r1                       ; R0=0 R1=0")
863 __msg("6: (95) exit")
864 /* Verify that statements to randomize upper half of r1 had not been
865  * generated.
866  */
867 __xlated("call unknown")
868 __xlated("r0 &= 2147483647")
869 __xlated("w1 = w0")
870 /* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
871  * are the only CI archs that do not need zero extension for subregs.
872  */
873 #if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
874 __xlated("w1 = w1")
875 #endif
876 __xlated("if w0 < 0xa goto pc+0")
877 __xlated("r1 >>= 32")
878 __xlated("r0 = r1")
879 __xlated("exit")
880 __naked void linked_regs_and_subreg_def(void)
881 {
882 	asm volatile (
883 	"call %[bpf_ktime_get_ns];"
884 	/* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
885 	 * assign same IDs to registers.
886 	 */
887 	"r0 &= 0x7fffffff;"
888 	/* link w1 and w0 via ID */
889 	"w1 = w0;"
890 	/* 'if' statement propagates range info from w0 to w1,
891 	 * but should not affect w1->subreg_def property.
892 	 */
893 	"if w0 < 10 goto +0;"
894 	/* r1 is read here, on archs that require subreg zero
895 	 * extension this would cause zext patch generation.
896 	 */
897 	"r1 >>= 32;"
898 	"r0 = r1;"
899 	"exit;"
900 	:
901 	: __imm(bpf_ktime_get_ns)
902 	: __clobber_all);
903 }
904 
905 char _license[] SEC("license") = "GPL";
906