1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #define _GNU_SOURCE
5 #include <limits.h>
6 #include <test_progs.h>
7 #include <linux/filter.h>
8 #include <linux/bpf.h>
9
10 /* =================================
11 * SHORT AND CONSISTENT NUMBER TYPES
12 * =================================
13 */
14 #define U64_MAX ((u64)UINT64_MAX)
15 #define U32_MAX ((u32)UINT_MAX)
16 #define U16_MAX ((u32)UINT_MAX)
17 #define S64_MIN ((s64)INT64_MIN)
18 #define S64_MAX ((s64)INT64_MAX)
19 #define S32_MIN ((s32)INT_MIN)
20 #define S32_MAX ((s32)INT_MAX)
21 #define S16_MIN ((s16)0x80000000)
22 #define S16_MAX ((s16)0x7fffffff)
23
24 typedef unsigned long long ___u64;
25 typedef unsigned int ___u32;
26 typedef long long ___s64;
27 typedef int ___s32;
28
29 /* avoid conflicts with already defined types in kernel headers */
30 #define u64 ___u64
31 #define u32 ___u32
32 #define s64 ___s64
33 #define s32 ___s32
34
35 /* ==================================
36 * STRING BUF ABSTRACTION AND HELPERS
37 * ==================================
38 */
39 struct strbuf {
40 size_t buf_sz;
41 int pos;
42 char buf[0];
43 };
44
45 #define DEFINE_STRBUF(name, N) \
46 struct { struct strbuf buf; char data[(N)]; } ___##name; \
47 struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf)
48
49 __printf(2, 3)
snappendf(struct strbuf * s,const char * fmt,...)50 static inline void snappendf(struct strbuf *s, const char *fmt, ...)
51 {
52 va_list args;
53
54 va_start(args, fmt);
55 s->pos += vsnprintf(s->buf + s->pos,
56 s->pos < s->buf_sz ? s->buf_sz - s->pos : 0,
57 fmt, args);
58 va_end(args);
59 }
60
61 /* ==================================
62 * GENERIC NUMBER TYPE AND OPERATIONS
63 * ==================================
64 */
65 enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 };
66
min_t(enum num_t t,u64 x,u64 y)67 static __always_inline u64 min_t(enum num_t t, u64 x, u64 y)
68 {
69 switch (t) {
70 case U64: return (u64)x < (u64)y ? (u64)x : (u64)y;
71 case U32: return (u32)x < (u32)y ? (u32)x : (u32)y;
72 case S64: return (s64)x < (s64)y ? (s64)x : (s64)y;
73 case S32: return (s32)x < (s32)y ? (s32)x : (s32)y;
74 default: printf("min_t!\n"); exit(1);
75 }
76 }
77
max_t(enum num_t t,u64 x,u64 y)78 static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
79 {
80 switch (t) {
81 case U64: return (u64)x > (u64)y ? (u64)x : (u64)y;
82 case U32: return (u32)x > (u32)y ? (u32)x : (u32)y;
83 case S64: return (s64)x > (s64)y ? (s64)x : (s64)y;
84 case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y;
85 default: printf("max_t!\n"); exit(1);
86 }
87 }
88
cast_t(enum num_t t,u64 x)89 static __always_inline u64 cast_t(enum num_t t, u64 x)
90 {
91 switch (t) {
92 case U64: return (u64)x;
93 case U32: return (u32)x;
94 case S64: return (s64)x;
95 case S32: return (u32)(s32)x;
96 default: printf("cast_t!\n"); exit(1);
97 }
98 }
99
t_str(enum num_t t)100 static const char *t_str(enum num_t t)
101 {
102 switch (t) {
103 case U64: return "u64";
104 case U32: return "u32";
105 case S64: return "s64";
106 case S32: return "s32";
107 default: printf("t_str!\n"); exit(1);
108 }
109 }
110
t_is_32(enum num_t t)111 static enum num_t t_is_32(enum num_t t)
112 {
113 switch (t) {
114 case U64: return false;
115 case U32: return true;
116 case S64: return false;
117 case S32: return true;
118 default: printf("t_is_32!\n"); exit(1);
119 }
120 }
121
t_signed(enum num_t t)122 static enum num_t t_signed(enum num_t t)
123 {
124 switch (t) {
125 case U64: return S64;
126 case U32: return S32;
127 case S64: return S64;
128 case S32: return S32;
129 default: printf("t_signed!\n"); exit(1);
130 }
131 }
132
t_unsigned(enum num_t t)133 static enum num_t t_unsigned(enum num_t t)
134 {
135 switch (t) {
136 case U64: return U64;
137 case U32: return U32;
138 case S64: return U64;
139 case S32: return U32;
140 default: printf("t_unsigned!\n"); exit(1);
141 }
142 }
143
144 #define UNUM_MAX_DECIMAL U16_MAX
145 #define SNUM_MAX_DECIMAL S16_MAX
146 #define SNUM_MIN_DECIMAL S16_MIN
147
num_is_small(enum num_t t,u64 x)148 static bool num_is_small(enum num_t t, u64 x)
149 {
150 switch (t) {
151 case U64: return (u64)x <= UNUM_MAX_DECIMAL;
152 case U32: return (u32)x <= UNUM_MAX_DECIMAL;
153 case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL;
154 case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL;
155 default: printf("num_is_small!\n"); exit(1);
156 }
157 }
158
snprintf_num(enum num_t t,struct strbuf * sb,u64 x)159 static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x)
160 {
161 bool is_small = num_is_small(t, x);
162
163 if (is_small) {
164 switch (t) {
165 case U64: return snappendf(sb, "%llu", (u64)x);
166 case U32: return snappendf(sb, "%u", (u32)x);
167 case S64: return snappendf(sb, "%lld", (s64)x);
168 case S32: return snappendf(sb, "%d", (s32)x);
169 default: printf("snprintf_num!\n"); exit(1);
170 }
171 } else {
172 switch (t) {
173 case U64:
174 if (x == U64_MAX)
175 return snappendf(sb, "U64_MAX");
176 else if (x >= U64_MAX - 256)
177 return snappendf(sb, "U64_MAX-%llu", U64_MAX - x);
178 else
179 return snappendf(sb, "%#llx", (u64)x);
180 case U32:
181 if ((u32)x == U32_MAX)
182 return snappendf(sb, "U32_MAX");
183 else if ((u32)x >= U32_MAX - 256)
184 return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x);
185 else
186 return snappendf(sb, "%#x", (u32)x);
187 case S64:
188 if ((s64)x == S64_MAX)
189 return snappendf(sb, "S64_MAX");
190 else if ((s64)x >= S64_MAX - 256)
191 return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x);
192 else if ((s64)x == S64_MIN)
193 return snappendf(sb, "S64_MIN");
194 else if ((s64)x <= S64_MIN + 256)
195 return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN);
196 else
197 return snappendf(sb, "%#llx", (s64)x);
198 case S32:
199 if ((s32)x == S32_MAX)
200 return snappendf(sb, "S32_MAX");
201 else if ((s32)x >= S32_MAX - 256)
202 return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x);
203 else if ((s32)x == S32_MIN)
204 return snappendf(sb, "S32_MIN");
205 else if ((s32)x <= S32_MIN + 256)
206 return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN);
207 else
208 return snappendf(sb, "%#x", (s32)x);
209 default: printf("snprintf_num!\n"); exit(1);
210 }
211 }
212 }
213
214 /* ===================================
215 * GENERIC RANGE STRUCT AND OPERATIONS
216 * ===================================
217 */
218 struct range {
219 u64 a, b;
220 };
221
snprintf_range(enum num_t t,struct strbuf * sb,struct range x)222 static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
223 {
224 if (x.a == x.b)
225 return snprintf_num(t, sb, x.a);
226
227 snappendf(sb, "[");
228 snprintf_num(t, sb, x.a);
229 snappendf(sb, "; ");
230 snprintf_num(t, sb, x.b);
231 snappendf(sb, "]");
232 }
233
print_range(enum num_t t,struct range x,const char * sfx)234 static void print_range(enum num_t t, struct range x, const char *sfx)
235 {
236 DEFINE_STRBUF(sb, 128);
237
238 snprintf_range(t, sb, x);
239 printf("%s%s", sb->buf, sfx);
240 }
241
242 static const struct range unkn[] = {
243 [U64] = { 0, U64_MAX },
244 [U32] = { 0, U32_MAX },
245 [S64] = { (u64)S64_MIN, (u64)S64_MAX },
246 [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX },
247 };
248
unkn_subreg(enum num_t t)249 static struct range unkn_subreg(enum num_t t)
250 {
251 switch (t) {
252 case U64: return unkn[U32];
253 case U32: return unkn[U32];
254 case S64: return unkn[U32];
255 case S32: return unkn[S32];
256 default: printf("unkn_subreg!\n"); exit(1);
257 }
258 }
259
range(enum num_t t,u64 a,u64 b)260 static struct range range(enum num_t t, u64 a, u64 b)
261 {
262 switch (t) {
263 case U64: return (struct range){ (u64)a, (u64)b };
264 case U32: return (struct range){ (u32)a, (u32)b };
265 case S64: return (struct range){ (s64)a, (s64)b };
266 case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
267 default: printf("range!\n"); exit(1);
268 }
269 }
270
sign64(u64 x)271 static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; }
sign32(u64 x)272 static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; }
upper32(u64 x)273 static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); }
swap_low32(u64 x,u32 y)274 static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; }
275
range_eq(struct range x,struct range y)276 static bool range_eq(struct range x, struct range y)
277 {
278 return x.a == y.a && x.b == y.b;
279 }
280
range_cast_to_s32(struct range x)281 static struct range range_cast_to_s32(struct range x)
282 {
283 u64 a = x.a, b = x.b;
284
285 /* if upper 32 bits are constant, lower 32 bits should form a proper
286 * s32 range to be correct
287 */
288 if (upper32(a) == upper32(b) && (s32)a <= (s32)b)
289 return range(S32, a, b);
290
291 /* Special case where upper bits form a small sequence of two
292 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
293 * 0x00000000 is also valid), while lower bits form a proper s32 range
294 * going from negative numbers to positive numbers.
295 *
296 * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating
297 * over full 64-bit numbers range will form a proper [-16, 16]
298 * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
299 */
300 if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0)
301 return range(S32, a, b);
302
303 /* otherwise we can't derive much meaningful information */
304 return unkn[S32];
305 }
306
range_cast_u64(enum num_t to_t,struct range x)307 static struct range range_cast_u64(enum num_t to_t, struct range x)
308 {
309 u64 a = (u64)x.a, b = (u64)x.b;
310
311 switch (to_t) {
312 case U64:
313 return x;
314 case U32:
315 if (upper32(a) != upper32(b))
316 return unkn[U32];
317 return range(U32, a, b);
318 case S64:
319 if (sign64(a) != sign64(b))
320 return unkn[S64];
321 return range(S64, a, b);
322 case S32:
323 return range_cast_to_s32(x);
324 default: printf("range_cast_u64!\n"); exit(1);
325 }
326 }
327
range_cast_s64(enum num_t to_t,struct range x)328 static struct range range_cast_s64(enum num_t to_t, struct range x)
329 {
330 s64 a = (s64)x.a, b = (s64)x.b;
331
332 switch (to_t) {
333 case U64:
334 /* equivalent to (s64)a <= (s64)b check */
335 if (sign64(a) != sign64(b))
336 return unkn[U64];
337 return range(U64, a, b);
338 case U32:
339 if (upper32(a) != upper32(b) || sign32(a) != sign32(b))
340 return unkn[U32];
341 return range(U32, a, b);
342 case S64:
343 return x;
344 case S32:
345 return range_cast_to_s32(x);
346 default: printf("range_cast_s64!\n"); exit(1);
347 }
348 }
349
range_cast_u32(enum num_t to_t,struct range x)350 static struct range range_cast_u32(enum num_t to_t, struct range x)
351 {
352 u32 a = (u32)x.a, b = (u32)x.b;
353
354 switch (to_t) {
355 case U64:
356 case S64:
357 /* u32 is always a valid zero-extended u64/s64 */
358 return range(to_t, a, b);
359 case U32:
360 return x;
361 case S32:
362 return range_cast_to_s32(range(U32, a, b));
363 default: printf("range_cast_u32!\n"); exit(1);
364 }
365 }
366
range_cast_s32(enum num_t to_t,struct range x)367 static struct range range_cast_s32(enum num_t to_t, struct range x)
368 {
369 s32 a = (s32)x.a, b = (s32)x.b;
370
371 switch (to_t) {
372 case U64:
373 case U32:
374 case S64:
375 if (sign32(a) != sign32(b))
376 return unkn[to_t];
377 return range(to_t, a, b);
378 case S32:
379 return x;
380 default: printf("range_cast_s32!\n"); exit(1);
381 }
382 }
383
384 /* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving
385 * all possible information. Worst case, it will be unknown range within
386 * *to_t* domain, if nothing more specific can be guaranteed during the
387 * conversion
388 */
range_cast(enum num_t from_t,enum num_t to_t,struct range from)389 static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
390 {
391 switch (from_t) {
392 case U64: return range_cast_u64(to_t, from);
393 case U32: return range_cast_u32(to_t, from);
394 case S64: return range_cast_s64(to_t, from);
395 case S32: return range_cast_s32(to_t, from);
396 default: printf("range_cast!\n"); exit(1);
397 }
398 }
399
is_valid_num(enum num_t t,u64 x)400 static bool is_valid_num(enum num_t t, u64 x)
401 {
402 switch (t) {
403 case U64: return true;
404 case U32: return upper32(x) == 0;
405 case S64: return true;
406 case S32: return upper32(x) == 0;
407 default: printf("is_valid_num!\n"); exit(1);
408 }
409 }
410
is_valid_range(enum num_t t,struct range x)411 static bool is_valid_range(enum num_t t, struct range x)
412 {
413 if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b))
414 return false;
415
416 switch (t) {
417 case U64: return (u64)x.a <= (u64)x.b;
418 case U32: return (u32)x.a <= (u32)x.b;
419 case S64: return (s64)x.a <= (s64)x.b;
420 case S32: return (s32)x.a <= (s32)x.b;
421 default: printf("is_valid_range!\n"); exit(1);
422 }
423 }
424
range_improve(enum num_t t,struct range old,struct range new)425 static struct range range_improve(enum num_t t, struct range old, struct range new)
426 {
427 return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
428 }
429
range_refine(enum num_t x_t,struct range x,enum num_t y_t,struct range y)430 static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
431 {
432 struct range y_cast;
433
434 y_cast = range_cast(y_t, x_t, y);
435
436 /* If we know that
437 * - *x* is in the range of signed 32bit value, and
438 * - *y_cast* range is 32-bit signed non-negative
439 * then *x* range can be improved with *y_cast* such that *x* range
440 * is 32-bit signed non-negative. Otherwise, if the new range for *x*
441 * allows upper 32-bit * 0xffffffff then the eventual new range for
442 * *x* will be out of signed 32-bit range which violates the origin
443 * *x* range.
444 */
445 if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
446 (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
447 return range_improve(x_t, x, y_cast);
448
449 /* the case when new range knowledge, *y*, is a 32-bit subregister
450 * range, while previous range knowledge, *x*, is a full register
451 * 64-bit range, needs special treatment to take into account upper 32
452 * bits of full register range
453 */
454 if (t_is_32(y_t) && !t_is_32(x_t)) {
455 struct range x_swap;
456
457 /* some combinations of upper 32 bits and sign bit can lead to
458 * invalid ranges, in such cases it's easier to detect them
459 * after cast/swap than try to enumerate all the conditions
460 * under which transformation and knowledge transfer is valid
461 */
462 x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
463 if (!is_valid_range(x_t, x_swap))
464 return x;
465 return range_improve(x_t, x, x_swap);
466 }
467
468 if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
469 if (x_t == S64 && x.a > x.b) {
470 if (x.b < y.a && x.a <= y.b)
471 return range(x_t, x.a, y.b);
472 if (x.a > y.b && x.b >= y.a)
473 return range(x_t, y.a, x.b);
474 } else if (x_t == U64 && y.a > y.b) {
475 if (y.b < x.a && y.a <= x.b)
476 return range(x_t, y.a, x.b);
477 if (y.a > x.b && y.b >= x.a)
478 return range(x_t, x.a, y.b);
479 }
480 }
481
482 /* otherwise, plain range cast and intersection works */
483 return range_improve(x_t, x, y_cast);
484 }
485
486 /* =======================
487 * GENERIC CONDITIONAL OPS
488 * =======================
489 */
490 enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE };
491
complement_op(enum op op)492 static enum op complement_op(enum op op)
493 {
494 switch (op) {
495 case OP_LT: return OP_GE;
496 case OP_LE: return OP_GT;
497 case OP_GT: return OP_LE;
498 case OP_GE: return OP_LT;
499 case OP_EQ: return OP_NE;
500 case OP_NE: return OP_EQ;
501 default: printf("complement_op!\n"); exit(1);
502 }
503 }
504
op_str(enum op op)505 static const char *op_str(enum op op)
506 {
507 switch (op) {
508 case OP_LT: return "<";
509 case OP_LE: return "<=";
510 case OP_GT: return ">";
511 case OP_GE: return ">=";
512 case OP_EQ: return "==";
513 case OP_NE: return "!=";
514 default: printf("op_str!\n"); exit(1);
515 }
516 }
517
518 /* Can register with range [x.a, x.b] *EVER* satisfy
519 * OP (<, <=, >, >=, ==, !=) relation to
520 * a register with range [y.a, y.b]
521 * _in *num_t* domain_
522 */
range_canbe_op(enum num_t t,struct range x,struct range y,enum op op)523 static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
524 {
525 #define range_canbe(T) do { \
526 switch (op) { \
527 case OP_LT: return (T)x.a < (T)y.b; \
528 case OP_LE: return (T)x.a <= (T)y.b; \
529 case OP_GT: return (T)x.b > (T)y.a; \
530 case OP_GE: return (T)x.b >= (T)y.a; \
531 case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \
532 case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \
533 default: printf("range_canbe op %d\n", op); exit(1); \
534 } \
535 } while (0)
536
537 switch (t) {
538 case U64: { range_canbe(u64); }
539 case U32: { range_canbe(u32); }
540 case S64: { range_canbe(s64); }
541 case S32: { range_canbe(s32); }
542 default: printf("range_canbe!\n"); exit(1);
543 }
544 #undef range_canbe
545 }
546
547 /* Does register with range [x.a, x.b] *ALWAYS* satisfy
548 * OP (<, <=, >, >=, ==, !=) relation to
549 * a register with range [y.a, y.b]
550 * _in *num_t* domain_
551 */
range_always_op(enum num_t t,struct range x,struct range y,enum op op)552 static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
553 {
554 /* always op <=> ! canbe complement(op) */
555 return !range_canbe_op(t, x, y, complement_op(op));
556 }
557
558 /* Does register with range [x.a, x.b] *NEVER* satisfy
559 * OP (<, <=, >, >=, ==, !=) relation to
560 * a register with range [y.a, y.b]
561 * _in *num_t* domain_
562 */
range_never_op(enum num_t t,struct range x,struct range y,enum op op)563 static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
564 {
565 return !range_canbe_op(t, x, y, op);
566 }
567
568 /* similar to verifier's is_branch_taken():
569 * 1 - always taken;
570 * 0 - never taken,
571 * -1 - unsure.
572 */
range_branch_taken_op(enum num_t t,struct range x,struct range y,enum op op)573 static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
574 {
575 if (range_always_op(t, x, y, op))
576 return 1;
577 if (range_never_op(t, x, y, op))
578 return 0;
579 return -1;
580 }
581
582 /* What would be the new estimates for register x and y ranges assuming truthful
583 * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy.
584 *
585 * We assume "interesting" cases where ranges overlap. Cases where it's
586 * obvious that (x OP y) is either always true or false should be filtered with
587 * range_never and range_always checks.
588 */
range_cond(enum num_t t,struct range x,struct range y,enum op op,struct range * newx,struct range * newy)589 static void range_cond(enum num_t t, struct range x, struct range y,
590 enum op op, struct range *newx, struct range *newy)
591 {
592 if (!range_canbe_op(t, x, y, op)) {
593 /* nothing to adjust, can't happen, return original values */
594 *newx = x;
595 *newy = y;
596 return;
597 }
598 switch (op) {
599 case OP_LT:
600 *newx = range(t, x.a, min_t(t, x.b, y.b - 1));
601 *newy = range(t, max_t(t, x.a + 1, y.a), y.b);
602 break;
603 case OP_LE:
604 *newx = range(t, x.a, min_t(t, x.b, y.b));
605 *newy = range(t, max_t(t, x.a, y.a), y.b);
606 break;
607 case OP_GT:
608 *newx = range(t, max_t(t, x.a, y.a + 1), x.b);
609 *newy = range(t, y.a, min_t(t, x.b - 1, y.b));
610 break;
611 case OP_GE:
612 *newx = range(t, max_t(t, x.a, y.a), x.b);
613 *newy = range(t, y.a, min_t(t, x.b, y.b));
614 break;
615 case OP_EQ:
616 *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
617 *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
618 break;
619 case OP_NE:
620 /* below logic is supported by the verifier now */
621 if (x.a == x.b && x.a == y.a) {
622 /* X is a constant matching left side of Y */
623 *newx = range(t, x.a, x.b);
624 *newy = range(t, y.a + 1, y.b);
625 } else if (x.a == x.b && x.b == y.b) {
626 /* X is a constant matching rigth side of Y */
627 *newx = range(t, x.a, x.b);
628 *newy = range(t, y.a, y.b - 1);
629 } else if (y.a == y.b && x.a == y.a) {
630 /* Y is a constant matching left side of X */
631 *newx = range(t, x.a + 1, x.b);
632 *newy = range(t, y.a, y.b);
633 } else if (y.a == y.b && x.b == y.b) {
634 /* Y is a constant matching rigth side of X */
635 *newx = range(t, x.a, x.b - 1);
636 *newy = range(t, y.a, y.b);
637 } else {
638 /* generic case, can't derive more information */
639 *newx = range(t, x.a, x.b);
640 *newy = range(t, y.a, y.b);
641 }
642
643 break;
644 default:
645 break;
646 }
647 }
648
649 /* =======================
650 * REGISTER STATE HANDLING
651 * =======================
652 */
653 struct reg_state {
654 struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
655 bool valid;
656 };
657
print_reg_state(struct reg_state * r,const char * sfx)658 static void print_reg_state(struct reg_state *r, const char *sfx)
659 {
660 DEFINE_STRBUF(sb, 512);
661 enum num_t t;
662 int cnt = 0;
663
664 if (!r->valid) {
665 printf("<not found>%s", sfx);
666 return;
667 }
668
669 snappendf(sb, "scalar(");
670 for (t = first_t; t <= last_t; t++) {
671 snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t));
672 snprintf_range(t, sb, r->r[t]);
673 }
674 snappendf(sb, ")");
675
676 printf("%s%s", sb->buf, sfx);
677 }
678
print_refinement(enum num_t s_t,struct range src,enum num_t d_t,struct range old,struct range new,const char * ctx)679 static void print_refinement(enum num_t s_t, struct range src,
680 enum num_t d_t, struct range old, struct range new,
681 const char *ctx)
682 {
683 printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t));
684 print_range(s_t, src, "");
685 printf(" (%s)DST_OLD=", t_str(d_t));
686 print_range(d_t, old, "");
687 printf(" (%s)DST_NEW=", t_str(d_t));
688 print_range(d_t, new, "\n");
689 }
690
reg_state_refine(struct reg_state * r,enum num_t t,struct range x,const char * ctx)691 static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
692 {
693 enum num_t d_t, s_t;
694 struct range old;
695 bool keep_going = false;
696
697 again:
698 /* try to derive new knowledge from just learned range x of type t */
699 for (d_t = first_t; d_t <= last_t; d_t++) {
700 old = r->r[d_t];
701 r->r[d_t] = range_refine(d_t, r->r[d_t], t, x);
702 if (!range_eq(r->r[d_t], old)) {
703 keep_going = true;
704 if (env.verbosity >= VERBOSE_VERY)
705 print_refinement(t, x, d_t, old, r->r[d_t], ctx);
706 }
707 }
708
709 /* now see if we can derive anything new from updated reg_state's ranges */
710 for (s_t = first_t; s_t <= last_t; s_t++) {
711 for (d_t = first_t; d_t <= last_t; d_t++) {
712 old = r->r[d_t];
713 r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]);
714 if (!range_eq(r->r[d_t], old)) {
715 keep_going = true;
716 if (env.verbosity >= VERBOSE_VERY)
717 print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
718 }
719 }
720 }
721
722 /* keep refining until we converge */
723 if (keep_going) {
724 keep_going = false;
725 goto again;
726 }
727 }
728
reg_state_set_const(struct reg_state * rs,enum num_t t,u64 val)729 static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val)
730 {
731 enum num_t tt;
732
733 rs->valid = true;
734 for (tt = first_t; tt <= last_t; tt++)
735 rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
736
737 reg_state_refine(rs, t, rs->r[t], "CONST");
738 }
739
reg_state_cond(enum num_t t,struct reg_state * x,struct reg_state * y,enum op op,struct reg_state * newx,struct reg_state * newy,const char * ctx)740 static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op,
741 struct reg_state *newx, struct reg_state *newy, const char *ctx)
742 {
743 char buf[32];
744 enum num_t ts[2];
745 struct reg_state xx = *x, yy = *y;
746 int i, t_cnt;
747 struct range z1, z2;
748
749 if (op == OP_EQ || op == OP_NE) {
750 /* OP_EQ and OP_NE are sign-agnostic, so we need to process
751 * both signed and unsigned domains at the same time
752 */
753 ts[0] = t_unsigned(t);
754 ts[1] = t_signed(t);
755 t_cnt = 2;
756 } else {
757 ts[0] = t;
758 t_cnt = 1;
759 }
760
761 for (i = 0; i < t_cnt; i++) {
762 t = ts[i];
763 z1 = x->r[t];
764 z2 = y->r[t];
765
766 range_cond(t, z1, z2, op, &z1, &z2);
767
768 if (newx) {
769 snprintf(buf, sizeof(buf), "%s R1", ctx);
770 reg_state_refine(&xx, t, z1, buf);
771 }
772 if (newy) {
773 snprintf(buf, sizeof(buf), "%s R2", ctx);
774 reg_state_refine(&yy, t, z2, buf);
775 }
776 }
777
778 if (newx)
779 *newx = xx;
780 if (newy)
781 *newy = yy;
782 }
783
reg_state_branch_taken_op(enum num_t t,struct reg_state * x,struct reg_state * y,enum op op)784 static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y,
785 enum op op)
786 {
787 if (op == OP_EQ || op == OP_NE) {
788 /* OP_EQ and OP_NE are sign-agnostic */
789 enum num_t tu = t_unsigned(t);
790 enum num_t ts = t_signed(t);
791 int br_u, br_s, br;
792
793 br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op);
794 br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op);
795
796 if (br_u >= 0 && br_s >= 0 && br_u != br_s)
797 ASSERT_FALSE(true, "branch taken inconsistency!\n");
798
799 /* if 64-bit ranges are indecisive, use 32-bit subranges to
800 * eliminate always/never taken branches, if possible
801 */
802 if (br_u == -1 && (t == U64 || t == S64)) {
803 br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op);
804 /* we can only reject for OP_EQ, never take branch
805 * based on lower 32 bits
806 */
807 if (op == OP_EQ && br == 0)
808 return 0;
809 /* for OP_NEQ we can be conclusive only if lower 32 bits
810 * differ and thus inequality branch is always taken
811 */
812 if (op == OP_NE && br == 1)
813 return 1;
814
815 br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op);
816 if (op == OP_EQ && br == 0)
817 return 0;
818 if (op == OP_NE && br == 1)
819 return 1;
820 }
821
822 return br_u >= 0 ? br_u : br_s;
823 }
824 return range_branch_taken_op(t, x->r[t], y->r[t], op);
825 }
826
827 /* =====================================
828 * BPF PROGS GENERATION AND VERIFICATION
829 * =====================================
830 */
831 struct case_spec {
832 /* whether to init full register (r1) or sub-register (w1) */
833 bool init_subregs;
834 /* whether to establish initial value range on full register (r1) or
835 * sub-register (w1)
836 */
837 bool setup_subregs;
838 /* whether to establish initial value range using signed or unsigned
839 * comparisons (i.e., initialize umin/umax or smin/smax directly)
840 */
841 bool setup_signed;
842 /* whether to perform comparison on full registers or sub-registers */
843 bool compare_subregs;
844 /* whether to perform comparison using signed or unsigned operations */
845 bool compare_signed;
846 };
847
848 /* Generate test BPF program based on provided test ranges, operation, and
849 * specifications about register bitness and signedness.
850 */
load_range_cmp_prog(struct range x,struct range y,enum op op,int branch_taken,struct case_spec spec,char * log_buf,size_t log_sz,int * false_pos,int * true_pos)851 static int load_range_cmp_prog(struct range x, struct range y, enum op op,
852 int branch_taken, struct case_spec spec,
853 char *log_buf, size_t log_sz,
854 int *false_pos, int *true_pos)
855 {
856 #define emit(insn) ({ \
857 struct bpf_insn __insns[] = { insn }; \
858 int __i; \
859 for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
860 insns[cur_pos + __i] = __insns[__i]; \
861 cur_pos += __i; \
862 })
863 #define JMP_TO(target) (target - cur_pos - 1)
864 int cur_pos = 0, exit_pos, fd, op_code;
865 struct bpf_insn insns[64];
866 LIBBPF_OPTS(bpf_prog_load_opts, opts,
867 .log_level = 2,
868 .log_buf = log_buf,
869 .log_size = log_sz,
870 .prog_flags = testing_prog_flags(),
871 );
872
873 /* ; skip exit block below
874 * goto +2;
875 */
876 emit(BPF_JMP_A(2));
877 exit_pos = cur_pos;
878 /* ; exit block for all the preparatory conditionals
879 * out:
880 * r0 = 0;
881 * exit;
882 */
883 emit(BPF_MOV64_IMM(BPF_REG_0, 0));
884 emit(BPF_EXIT_INSN());
885 /*
886 * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value
887 * call bpf_get_current_pid_tgid;
888 * r6 = r0; | w6 = w0;
889 * call bpf_get_current_pid_tgid;
890 * r7 = r0; | w7 = w0;
891 */
892 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
893 if (spec.init_subregs)
894 emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0));
895 else
896 emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
897 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
898 if (spec.init_subregs)
899 emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0));
900 else
901 emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
902 /* ; setup initial r6/w6 possible value range ([x.a, x.b])
903 * r1 = %[x.a] ll; | w1 = %[x.a];
904 * r2 = %[x.b] ll; | w2 = %[x.b];
905 * if r6 < r1 goto out; | if w6 < w1 goto out;
906 * if r6 > r2 goto out; | if w6 > w2 goto out;
907 */
908 if (spec.setup_subregs) {
909 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
910 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
911 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
912 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
913 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
914 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
915 } else {
916 emit(BPF_LD_IMM64(BPF_REG_1, x.a));
917 emit(BPF_LD_IMM64(BPF_REG_2, x.b));
918 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
919 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
920 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
921 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
922 }
923 /* ; setup initial r7/w7 possible value range ([y.a, y.b])
924 * r1 = %[y.a] ll; | w1 = %[y.a];
925 * r2 = %[y.b] ll; | w2 = %[y.b];
926 * if r7 < r1 goto out; | if w7 < w1 goto out;
927 * if r7 > r2 goto out; | if w7 > w2 goto out;
928 */
929 if (spec.setup_subregs) {
930 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
931 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
932 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
933 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
934 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
935 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
936 } else {
937 emit(BPF_LD_IMM64(BPF_REG_1, y.a));
938 emit(BPF_LD_IMM64(BPF_REG_2, y.b));
939 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
940 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
941 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
942 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
943 }
944 /* ; range test instruction
945 * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
946 */
947 switch (op) {
948 case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break;
949 case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break;
950 case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break;
951 case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break;
952 case OP_EQ: op_code = BPF_JEQ; break;
953 case OP_NE: op_code = BPF_JNE; break;
954 default:
955 printf("unrecognized op %d\n", op);
956 return -ENOTSUP;
957 }
958 /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
959 * ; this is used for debugging, as verifier doesn't always print
960 * ; registers states as of condition jump instruction (e.g., when
961 * ; precision marking happens)
962 * r0 = r6; | w0 = w6;
963 * r0 = r7; | w0 = w7;
964 */
965 if (spec.compare_subregs) {
966 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
967 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
968 } else {
969 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
970 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
971 }
972 if (spec.compare_subregs)
973 emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
974 else
975 emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
976 /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
977 * r0 = r6; | w0 = w6;
978 * r0 = r7; | w0 = w7;
979 * exit;
980 */
981 *false_pos = cur_pos;
982 if (spec.compare_subregs) {
983 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
984 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
985 } else {
986 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
987 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
988 }
989 if (branch_taken == 1) /* false branch is never taken */
990 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
991 else
992 emit(BPF_EXIT_INSN());
993 /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
994 * r0 = r6; | w0 = w6;
995 * r0 = r7; | w0 = w7;
996 * exit;
997 */
998 *true_pos = cur_pos;
999 if (spec.compare_subregs) {
1000 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
1001 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
1002 } else {
1003 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
1004 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
1005 }
1006 if (branch_taken == 0) /* true branch is never taken */
1007 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
1008 emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
1009
1010 fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test",
1011 "GPL", insns, cur_pos, &opts);
1012 if (fd < 0)
1013 return fd;
1014
1015 close(fd);
1016 return 0;
1017 #undef emit
1018 #undef JMP_TO
1019 }
1020
1021 #define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0)
1022
1023 /* Parse register state from verifier log.
1024 * `s` should point to the start of "Rx = ..." substring in the verifier log.
1025 */
parse_reg_state(const char * s,struct reg_state * reg)1026 static int parse_reg_state(const char *s, struct reg_state *reg)
1027 {
1028 /* There are two generic forms for SCALAR register:
1029 * - known constant: R6_rwD=P%lld
1030 * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated
1031 * list of optional range specifiers:
1032 * - umin=%llu, if missing, assumed 0;
1033 * - umax=%llu, if missing, assumed U64_MAX;
1034 * - smin=%lld, if missing, assumed S64_MIN;
1035 * - smax=%lld, if missing, assumed S64_MAX;
1036 * - umin32=%d, if missing, assumed 0;
1037 * - umax32=%d, if missing, assumed U32_MAX;
1038 * - smin32=%d, if missing, assumed S32_MIN;
1039 * - smax32=%d, if missing, assumed S32_MAX;
1040 * - var_off=(%#llx; %#llx), tnum part, we don't care about it.
1041 *
1042 * If some of the values are equal, they will be grouped (but min/max
1043 * are not mixed together, and similarly negative values are not
1044 * grouped with non-negative ones). E.g.:
1045 *
1046 * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000)
1047 *
1048 * _rwD part is optional (and any of the letters can be missing).
1049 * P (precision mark) is optional as well.
1050 *
1051 * Anything inside scalar() is optional, including id, of course.
1052 */
1053 struct {
1054 const char *pfx;
1055 u64 *dst, def;
1056 bool is_32, is_set;
1057 } *f, fields[8] = {
1058 {"smin=", ®->r[S64].a, S64_MIN},
1059 {"smax=", ®->r[S64].b, S64_MAX},
1060 {"umin=", ®->r[U64].a, 0},
1061 {"umax=", ®->r[U64].b, U64_MAX},
1062 {"smin32=", ®->r[S32].a, (u32)S32_MIN, true},
1063 {"smax32=", ®->r[S32].b, (u32)S32_MAX, true},
1064 {"umin32=", ®->r[U32].a, 0, true},
1065 {"umax32=", ®->r[U32].b, U32_MAX, true},
1066 };
1067 const char *p;
1068 int i;
1069
1070 p = strchr(s, '=');
1071 if (!p)
1072 return -EINVAL;
1073 p++;
1074 if (*p == 'P')
1075 p++;
1076
1077 if (!str_has_pfx(p, "scalar(")) {
1078 long long sval;
1079 enum num_t t;
1080
1081 if (p[0] == '0' && p[1] == 'x') {
1082 if (sscanf(p, "%llx", &sval) != 1)
1083 return -EINVAL;
1084 } else {
1085 if (sscanf(p, "%lld", &sval) != 1)
1086 return -EINVAL;
1087 }
1088
1089 reg->valid = true;
1090 for (t = first_t; t <= last_t; t++) {
1091 reg->r[t] = range(t, sval, sval);
1092 }
1093 return 0;
1094 }
1095
1096 p += sizeof("scalar");
1097 while (p) {
1098 int midxs[ARRAY_SIZE(fields)], mcnt = 0;
1099 u64 val;
1100
1101 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1102 f = &fields[i];
1103 if (!str_has_pfx(p, f->pfx))
1104 continue;
1105 midxs[mcnt++] = i;
1106 p += strlen(f->pfx);
1107 }
1108
1109 if (mcnt) {
1110 /* populate all matched fields */
1111 if (p[0] == '0' && p[1] == 'x') {
1112 if (sscanf(p, "%llx", &val) != 1)
1113 return -EINVAL;
1114 } else {
1115 if (sscanf(p, "%lld", &val) != 1)
1116 return -EINVAL;
1117 }
1118
1119 for (i = 0; i < mcnt; i++) {
1120 f = &fields[midxs[i]];
1121 f->is_set = true;
1122 *f->dst = f->is_32 ? (u64)(u32)val : val;
1123 }
1124 } else if (str_has_pfx(p, "var_off")) {
1125 /* skip "var_off=(0x0; 0x3f)" part completely */
1126 p = strchr(p, ')');
1127 if (!p)
1128 return -EINVAL;
1129 p++;
1130 }
1131
1132 p = strpbrk(p, ",)");
1133 if (*p == ')')
1134 break;
1135 if (p)
1136 p++;
1137 }
1138
1139 reg->valid = true;
1140
1141 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1142 f = &fields[i];
1143 if (!f->is_set)
1144 *f->dst = f->def;
1145 }
1146
1147 return 0;
1148 }
1149
1150
1151 /* Parse all register states (TRUE/FALSE branches and DST/SRC registers)
1152 * out of the verifier log for a corresponding test case BPF program.
1153 */
parse_range_cmp_log(const char * log_buf,struct case_spec spec,int false_pos,int true_pos,struct reg_state * false1_reg,struct reg_state * false2_reg,struct reg_state * true1_reg,struct reg_state * true2_reg)1154 static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
1155 int false_pos, int true_pos,
1156 struct reg_state *false1_reg, struct reg_state *false2_reg,
1157 struct reg_state *true1_reg, struct reg_state *true2_reg)
1158 {
1159 struct {
1160 int insn_idx;
1161 int reg_idx;
1162 const char *reg_upper;
1163 struct reg_state *state;
1164 } specs[] = {
1165 {false_pos, 6, "R6=", false1_reg},
1166 {false_pos + 1, 7, "R7=", false2_reg},
1167 {true_pos, 6, "R6=", true1_reg},
1168 {true_pos + 1, 7, "R7=", true2_reg},
1169 };
1170 char buf[32];
1171 const char *p = log_buf, *q;
1172 int i, err;
1173
1174 for (i = 0; i < 4; i++) {
1175 sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx,
1176 spec.compare_subregs ? "bc" : "bf",
1177 spec.compare_subregs ? "w0" : "r0",
1178 spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
1179
1180 q = strstr(p, buf);
1181 if (!q) {
1182 *specs[i].state = (struct reg_state){.valid = false};
1183 continue;
1184 }
1185 p = strstr(q, specs[i].reg_upper);
1186 if (!p)
1187 return -EINVAL;
1188 err = parse_reg_state(p, specs[i].state);
1189 if (err)
1190 return -EINVAL;
1191 }
1192 return 0;
1193 }
1194
1195 /* Validate ranges match, and print details if they don't */
assert_range_eq(enum num_t t,struct range x,struct range y,const char * ctx1,const char * ctx2)1196 static bool assert_range_eq(enum num_t t, struct range x, struct range y,
1197 const char *ctx1, const char *ctx2)
1198 {
1199 DEFINE_STRBUF(sb, 512);
1200
1201 if (range_eq(x, y))
1202 return true;
1203
1204 snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2);
1205 snprintf_range(t, sb, x);
1206 snappendf(sb, " != ");
1207 snprintf_range(t, sb, y);
1208
1209 printf("%s\n", sb->buf);
1210
1211 return false;
1212 }
1213
1214 /* Validate that register states match, and print details if they don't */
assert_reg_state_eq(struct reg_state * r,struct reg_state * e,const char * ctx)1215 static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
1216 {
1217 bool ok = true;
1218 enum num_t t;
1219
1220 if (r->valid != e->valid) {
1221 printf("MISMATCH %s: actual %s != expected %s\n", ctx,
1222 r->valid ? "<valid>" : "<invalid>",
1223 e->valid ? "<valid>" : "<invalid>");
1224 return false;
1225 }
1226
1227 if (!r->valid)
1228 return true;
1229
1230 for (t = first_t; t <= last_t; t++) {
1231 if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t)))
1232 ok = false;
1233 }
1234
1235 return ok;
1236 }
1237
1238 /* Printf verifier log, filtering out irrelevant noise */
print_verifier_log(const char * buf)1239 static void print_verifier_log(const char *buf)
1240 {
1241 const char *p;
1242
1243 while (buf[0]) {
1244 p = strchrnul(buf, '\n');
1245
1246 /* filter out irrelevant precision backtracking logs */
1247 if (str_has_pfx(buf, "mark_precise: "))
1248 goto skip_line;
1249
1250 printf("%.*s\n", (int)(p - buf), buf);
1251
1252 skip_line:
1253 buf = *p == '\0' ? p : p + 1;
1254 }
1255 }
1256
1257 /* Simulate provided test case purely with our own range-based logic.
1258 * This is done to set up expectations for verifier's branch_taken logic and
1259 * verifier's register states in the verifier log.
1260 */
sim_case(enum num_t init_t,enum num_t cond_t,struct range x,struct range y,enum op op,struct reg_state * fr1,struct reg_state * fr2,struct reg_state * tr1,struct reg_state * tr2,int * branch_taken)1261 static void sim_case(enum num_t init_t, enum num_t cond_t,
1262 struct range x, struct range y, enum op op,
1263 struct reg_state *fr1, struct reg_state *fr2,
1264 struct reg_state *tr1, struct reg_state *tr2,
1265 int *branch_taken)
1266 {
1267 const u64 A = x.a;
1268 const u64 B = x.b;
1269 const u64 C = y.a;
1270 const u64 D = y.b;
1271 struct reg_state rc;
1272 enum op rev_op = complement_op(op);
1273 enum num_t t;
1274
1275 fr1->valid = fr2->valid = true;
1276 tr1->valid = tr2->valid = true;
1277 for (t = first_t; t <= last_t; t++) {
1278 /* if we are initializing using 32-bit subregisters,
1279 * full registers get upper 32 bits zeroed automatically
1280 */
1281 struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
1282
1283 fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z;
1284 }
1285
1286 /* step 1: r1 >= A, r2 >= C */
1287 reg_state_set_const(&rc, init_t, A);
1288 reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A");
1289 reg_state_set_const(&rc, init_t, C);
1290 reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C");
1291 *tr1 = *fr1;
1292 *tr2 = *fr2;
1293 if (env.verbosity >= VERBOSE_VERY) {
1294 printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1295 printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1296 }
1297
1298 /* step 2: r1 <= B, r2 <= D */
1299 reg_state_set_const(&rc, init_t, B);
1300 reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B");
1301 reg_state_set_const(&rc, init_t, D);
1302 reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D");
1303 *tr1 = *fr1;
1304 *tr2 = *fr2;
1305 if (env.verbosity >= VERBOSE_VERY) {
1306 printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1307 printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1308 }
1309
1310 /* step 3: r1 <op> r2 */
1311 *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op);
1312 fr1->valid = fr2->valid = false;
1313 tr1->valid = tr2->valid = false;
1314 if (*branch_taken != 1) { /* FALSE is possible */
1315 fr1->valid = fr2->valid = true;
1316 reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE");
1317 }
1318 if (*branch_taken != 0) { /* TRUE is possible */
1319 tr1->valid = tr2->valid = true;
1320 reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE");
1321 }
1322 if (env.verbosity >= VERBOSE_VERY) {
1323 printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n");
1324 printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n");
1325 printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n");
1326 printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n");
1327 }
1328 }
1329
1330 /* ===============================
1331 * HIGH-LEVEL TEST CASE VALIDATION
1332 * ===============================
1333 */
1334 static u32 upper_seeds[] = {
1335 0,
1336 1,
1337 U32_MAX,
1338 U32_MAX - 1,
1339 S32_MAX,
1340 (u32)S32_MIN,
1341 };
1342
1343 static u32 lower_seeds[] = {
1344 0,
1345 1,
1346 2, (u32)-2,
1347 255, (u32)-255,
1348 UINT_MAX,
1349 UINT_MAX - 1,
1350 INT_MAX,
1351 (u32)INT_MIN,
1352 };
1353
1354 struct ctx {
1355 int val_cnt, subval_cnt, range_cnt, subrange_cnt;
1356 u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1357 s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1358 u32 usubvals[ARRAY_SIZE(lower_seeds)];
1359 s32 ssubvals[ARRAY_SIZE(lower_seeds)];
1360 struct range *uranges, *sranges;
1361 struct range *usubranges, *ssubranges;
1362 int max_failure_cnt, cur_failure_cnt;
1363 int total_case_cnt, case_cnt;
1364 int rand_case_cnt;
1365 unsigned rand_seed;
1366 __u64 start_ns;
1367 char progress_ctx[64];
1368 };
1369
cleanup_ctx(struct ctx * ctx)1370 static void cleanup_ctx(struct ctx *ctx)
1371 {
1372 free(ctx->uranges);
1373 free(ctx->sranges);
1374 free(ctx->usubranges);
1375 free(ctx->ssubranges);
1376 }
1377
1378 struct subtest_case {
1379 enum num_t init_t;
1380 enum num_t cond_t;
1381 struct range x;
1382 struct range y;
1383 enum op op;
1384 };
1385
subtest_case_str(struct strbuf * sb,struct subtest_case * t,bool use_op)1386 static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op)
1387 {
1388 snappendf(sb, "(%s)", t_str(t->init_t));
1389 snprintf_range(t->init_t, sb, t->x);
1390 snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>");
1391 snprintf_range(t->init_t, sb, t->y);
1392 }
1393
1394 /* Generate and validate test case based on specific combination of setup
1395 * register ranges (including their expected num_t domain), and conditional
1396 * operation to perform (including num_t domain in which it has to be
1397 * performed)
1398 */
verify_case_op(enum num_t init_t,enum num_t cond_t,struct range x,struct range y,enum op op)1399 static int verify_case_op(enum num_t init_t, enum num_t cond_t,
1400 struct range x, struct range y, enum op op)
1401 {
1402 char log_buf[256 * 1024];
1403 size_t log_sz = sizeof(log_buf);
1404 int err, false_pos = 0, true_pos = 0, branch_taken;
1405 struct reg_state fr1, fr2, tr1, tr2;
1406 struct reg_state fe1, fe2, te1, te2;
1407 bool failed = false;
1408 struct case_spec spec = {
1409 .init_subregs = (init_t == U32 || init_t == S32),
1410 .setup_subregs = (init_t == U32 || init_t == S32),
1411 .setup_signed = (init_t == S64 || init_t == S32),
1412 .compare_subregs = (cond_t == U32 || cond_t == S32),
1413 .compare_signed = (cond_t == S64 || cond_t == S32),
1414 };
1415
1416 log_buf[0] = '\0';
1417
1418 sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken);
1419
1420 err = load_range_cmp_prog(x, y, op, branch_taken, spec,
1421 log_buf, log_sz, &false_pos, &true_pos);
1422 if (err) {
1423 ASSERT_OK(err, "load_range_cmp_prog");
1424 failed = true;
1425 }
1426
1427 err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos,
1428 &fr1, &fr2, &tr1, &tr2);
1429 if (err) {
1430 ASSERT_OK(err, "parse_range_cmp_log");
1431 failed = true;
1432 }
1433
1434 if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") ||
1435 !assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
1436 !assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
1437 !assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
1438 failed = true;
1439 }
1440
1441 if (failed || env.verbosity >= VERBOSE_NORMAL) {
1442 if (failed || env.verbosity >= VERBOSE_VERY) {
1443 printf("VERIFIER LOG:\n========================\n");
1444 print_verifier_log(log_buf);
1445 printf("=====================\n");
1446 }
1447 printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n");
1448 printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n");
1449 printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n");
1450 printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n");
1451 printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n");
1452 printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n");
1453 printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n");
1454 printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n");
1455
1456 return failed ? -EINVAL : 0;
1457 }
1458
1459 return 0;
1460 }
1461
1462 /* Given setup ranges and number types, go over all supported operations,
1463 * generating individual subtest for each allowed combination
1464 */
verify_case_opt(struct ctx * ctx,enum num_t init_t,enum num_t cond_t,struct range x,struct range y,bool is_subtest)1465 static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1466 struct range x, struct range y, bool is_subtest)
1467 {
1468 DEFINE_STRBUF(sb, 256);
1469 int err;
1470 struct subtest_case sub = {
1471 .init_t = init_t,
1472 .cond_t = cond_t,
1473 .x = x,
1474 .y = y,
1475 };
1476
1477 sb->pos = 0; /* reset position in strbuf */
1478 subtest_case_str(sb, &sub, false /* ignore op */);
1479 if (is_subtest && !test__start_subtest(sb->buf))
1480 return 0;
1481
1482 for (sub.op = first_op; sub.op <= last_op; sub.op++) {
1483 sb->pos = 0; /* reset position in strbuf */
1484 subtest_case_str(sb, &sub, true /* print op */);
1485
1486 if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
1487 printf("TEST CASE: %s\n", sb->buf);
1488
1489 err = verify_case_op(init_t, cond_t, x, y, sub.op);
1490 if (err || env.verbosity >= VERBOSE_NORMAL)
1491 ASSERT_OK(err, sb->buf);
1492 if (err) {
1493 ctx->cur_failure_cnt++;
1494 if (ctx->cur_failure_cnt > ctx->max_failure_cnt)
1495 return err;
1496 return 0; /* keep testing other cases */
1497 }
1498 ctx->case_cnt++;
1499 if ((ctx->case_cnt % 10000) == 0) {
1500 double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt;
1501 u64 elapsed_ns = get_time_ns() - ctx->start_ns;
1502 double remain_ns = elapsed_ns / progress * (1 - progress);
1503
1504 fprintf(env.stderr_saved, "PROGRESS (%s): %d/%d (%.2lf%%), "
1505 "elapsed %llu mins (%.2lf hrs), "
1506 "ETA %.0lf mins (%.2lf hrs)\n",
1507 ctx->progress_ctx,
1508 ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress,
1509 elapsed_ns / 1000000000 / 60,
1510 elapsed_ns / 1000000000.0 / 3600,
1511 remain_ns / 1000000000.0 / 60,
1512 remain_ns / 1000000000.0 / 3600);
1513 }
1514 }
1515
1516 return 0;
1517 }
1518
verify_case(struct ctx * ctx,enum num_t init_t,enum num_t cond_t,struct range x,struct range y)1519 static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1520 struct range x, struct range y)
1521 {
1522 return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */);
1523 }
1524
1525 /* ================================
1526 * GENERATED CASES FROM SEED VALUES
1527 * ================================
1528 */
u64_cmp(const void * p1,const void * p2)1529 static int u64_cmp(const void *p1, const void *p2)
1530 {
1531 u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2;
1532
1533 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1534 }
1535
u32_cmp(const void * p1,const void * p2)1536 static int u32_cmp(const void *p1, const void *p2)
1537 {
1538 u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2;
1539
1540 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1541 }
1542
s64_cmp(const void * p1,const void * p2)1543 static int s64_cmp(const void *p1, const void *p2)
1544 {
1545 s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2;
1546
1547 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1548 }
1549
s32_cmp(const void * p1,const void * p2)1550 static int s32_cmp(const void *p1, const void *p2)
1551 {
1552 s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2;
1553
1554 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1555 }
1556
1557 /* Generate valid unique constants from seeds, both signed and unsigned */
gen_vals(struct ctx * ctx)1558 static void gen_vals(struct ctx *ctx)
1559 {
1560 int i, j, cnt = 0;
1561
1562 for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) {
1563 for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
1564 ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
1565 }
1566 }
1567
1568 /* sort and compact uvals (i.e., it's `sort | uniq`) */
1569 qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp);
1570 for (i = 1, j = 0; i < cnt; i++) {
1571 if (ctx->uvals[j] == ctx->uvals[i])
1572 continue;
1573 j++;
1574 ctx->uvals[j] = ctx->uvals[i];
1575 }
1576 ctx->val_cnt = j + 1;
1577
1578 /* we have exactly the same number of s64 values, they are just in
1579 * a different order than u64s, so just sort them differently
1580 */
1581 for (i = 0; i < ctx->val_cnt; i++)
1582 ctx->svals[i] = ctx->uvals[i];
1583 qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
1584
1585 if (env.verbosity >= VERBOSE_SUPER) {
1586 DEFINE_STRBUF(sb1, 256);
1587 DEFINE_STRBUF(sb2, 256);
1588
1589 for (i = 0; i < ctx->val_cnt; i++) {
1590 sb1->pos = sb2->pos = 0;
1591 snprintf_num(U64, sb1, ctx->uvals[i]);
1592 snprintf_num(S64, sb2, ctx->svals[i]);
1593 printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
1594 }
1595 }
1596
1597 /* 32-bit values are generated separately */
1598 cnt = 0;
1599 for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
1600 ctx->usubvals[cnt++] = lower_seeds[i];
1601 }
1602
1603 /* sort and compact usubvals (i.e., it's `sort | uniq`) */
1604 qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp);
1605 for (i = 1, j = 0; i < cnt; i++) {
1606 if (ctx->usubvals[j] == ctx->usubvals[i])
1607 continue;
1608 j++;
1609 ctx->usubvals[j] = ctx->usubvals[i];
1610 }
1611 ctx->subval_cnt = j + 1;
1612
1613 for (i = 0; i < ctx->subval_cnt; i++)
1614 ctx->ssubvals[i] = ctx->usubvals[i];
1615 qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
1616
1617 if (env.verbosity >= VERBOSE_SUPER) {
1618 DEFINE_STRBUF(sb1, 256);
1619 DEFINE_STRBUF(sb2, 256);
1620
1621 for (i = 0; i < ctx->subval_cnt; i++) {
1622 sb1->pos = sb2->pos = 0;
1623 snprintf_num(U32, sb1, ctx->usubvals[i]);
1624 snprintf_num(S32, sb2, ctx->ssubvals[i]);
1625 printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
1626 }
1627 }
1628 }
1629
1630 /* Generate valid ranges from upper/lower seeds */
gen_ranges(struct ctx * ctx)1631 static int gen_ranges(struct ctx *ctx)
1632 {
1633 int i, j, cnt = 0;
1634
1635 for (i = 0; i < ctx->val_cnt; i++) {
1636 for (j = i; j < ctx->val_cnt; j++) {
1637 if (env.verbosity >= VERBOSE_SUPER) {
1638 DEFINE_STRBUF(sb1, 256);
1639 DEFINE_STRBUF(sb2, 256);
1640
1641 sb1->pos = sb2->pos = 0;
1642 snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
1643 snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
1644 printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf);
1645 }
1646 cnt++;
1647 }
1648 }
1649 ctx->range_cnt = cnt;
1650
1651 ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges));
1652 if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc"))
1653 return -EINVAL;
1654 ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges));
1655 if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc"))
1656 return -EINVAL;
1657
1658 cnt = 0;
1659 for (i = 0; i < ctx->val_cnt; i++) {
1660 for (j = i; j < ctx->val_cnt; j++) {
1661 ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
1662 ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
1663 cnt++;
1664 }
1665 }
1666
1667 cnt = 0;
1668 for (i = 0; i < ctx->subval_cnt; i++) {
1669 for (j = i; j < ctx->subval_cnt; j++) {
1670 if (env.verbosity >= VERBOSE_SUPER) {
1671 DEFINE_STRBUF(sb1, 256);
1672 DEFINE_STRBUF(sb2, 256);
1673
1674 sb1->pos = sb2->pos = 0;
1675 snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
1676 snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
1677 printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf);
1678 }
1679 cnt++;
1680 }
1681 }
1682 ctx->subrange_cnt = cnt;
1683
1684 ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges));
1685 if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc"))
1686 return -EINVAL;
1687 ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges));
1688 if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc"))
1689 return -EINVAL;
1690
1691 cnt = 0;
1692 for (i = 0; i < ctx->subval_cnt; i++) {
1693 for (j = i; j < ctx->subval_cnt; j++) {
1694 ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
1695 ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
1696 cnt++;
1697 }
1698 }
1699
1700 return 0;
1701 }
1702
parse_env_vars(struct ctx * ctx)1703 static int parse_env_vars(struct ctx *ctx)
1704 {
1705 const char *s;
1706
1707 if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
1708 errno = 0;
1709 ctx->max_failure_cnt = strtol(s, NULL, 10);
1710 if (errno || ctx->max_failure_cnt < 0) {
1711 ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT");
1712 return -EINVAL;
1713 }
1714 }
1715
1716 if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
1717 errno = 0;
1718 ctx->rand_case_cnt = strtol(s, NULL, 10);
1719 if (errno || ctx->rand_case_cnt < 0) {
1720 ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
1721 return -EINVAL;
1722 }
1723 }
1724
1725 if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
1726 errno = 0;
1727 ctx->rand_seed = strtoul(s, NULL, 10);
1728 if (errno) {
1729 ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
1730 return -EINVAL;
1731 }
1732 }
1733
1734 return 0;
1735 }
1736
prepare_gen_tests(struct ctx * ctx)1737 static int prepare_gen_tests(struct ctx *ctx)
1738 {
1739 const char *s;
1740 int err;
1741
1742 if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
1743 test__skip();
1744 return -ENOTSUP;
1745 }
1746
1747 err = parse_env_vars(ctx);
1748 if (err)
1749 return err;
1750
1751 gen_vals(ctx);
1752 err = gen_ranges(ctx);
1753 if (err) {
1754 ASSERT_OK(err, "gen_ranges");
1755 return err;
1756 }
1757
1758 return 0;
1759 }
1760
1761 /* Go over generated constants and ranges and validate various supported
1762 * combinations of them
1763 */
validate_gen_range_vs_const_64(enum num_t init_t,enum num_t cond_t)1764 static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
1765 {
1766 struct ctx ctx;
1767 struct range rconst;
1768 const struct range *ranges;
1769 const u64 *vals;
1770 int i, j;
1771
1772 memset(&ctx, 0, sizeof(ctx));
1773
1774 if (prepare_gen_tests(&ctx))
1775 goto cleanup;
1776
1777 ranges = init_t == U64 ? ctx.uranges : ctx.sranges;
1778 vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals;
1779
1780 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt);
1781 ctx.start_ns = get_time_ns();
1782 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1783 "RANGE x CONST, %s -> %s",
1784 t_str(init_t), t_str(cond_t));
1785
1786 for (i = 0; i < ctx.val_cnt; i++) {
1787 for (j = 0; j < ctx.range_cnt; j++) {
1788 rconst = range(init_t, vals[i], vals[i]);
1789
1790 /* (u64|s64)(<range> x <const>) */
1791 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1792 goto cleanup;
1793 /* (u64|s64)(<const> x <range>) */
1794 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1795 goto cleanup;
1796 }
1797 }
1798
1799 cleanup:
1800 cleanup_ctx(&ctx);
1801 }
1802
validate_gen_range_vs_const_32(enum num_t init_t,enum num_t cond_t)1803 static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t)
1804 {
1805 struct ctx ctx;
1806 struct range rconst;
1807 const struct range *ranges;
1808 const u32 *vals;
1809 int i, j;
1810
1811 memset(&ctx, 0, sizeof(ctx));
1812
1813 if (prepare_gen_tests(&ctx))
1814 goto cleanup;
1815
1816 ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges;
1817 vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals;
1818
1819 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt);
1820 ctx.start_ns = get_time_ns();
1821 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1822 "RANGE x CONST, %s -> %s",
1823 t_str(init_t), t_str(cond_t));
1824
1825 for (i = 0; i < ctx.subval_cnt; i++) {
1826 for (j = 0; j < ctx.subrange_cnt; j++) {
1827 rconst = range(init_t, vals[i], vals[i]);
1828
1829 /* (u32|s32)(<range> x <const>) */
1830 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1831 goto cleanup;
1832 /* (u32|s32)(<const> x <range>) */
1833 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1834 goto cleanup;
1835 }
1836 }
1837
1838 cleanup:
1839 cleanup_ctx(&ctx);
1840 }
1841
validate_gen_range_vs_range(enum num_t init_t,enum num_t cond_t)1842 static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t)
1843 {
1844 struct ctx ctx;
1845 const struct range *ranges;
1846 int i, j, rcnt;
1847
1848 memset(&ctx, 0, sizeof(ctx));
1849
1850 if (prepare_gen_tests(&ctx))
1851 goto cleanup;
1852
1853 switch (init_t)
1854 {
1855 case U64:
1856 ranges = ctx.uranges;
1857 rcnt = ctx.range_cnt;
1858 break;
1859 case U32:
1860 ranges = ctx.usubranges;
1861 rcnt = ctx.subrange_cnt;
1862 break;
1863 case S64:
1864 ranges = ctx.sranges;
1865 rcnt = ctx.range_cnt;
1866 break;
1867 case S32:
1868 ranges = ctx.ssubranges;
1869 rcnt = ctx.subrange_cnt;
1870 break;
1871 default:
1872 printf("validate_gen_range_vs_range!\n");
1873 exit(1);
1874 }
1875
1876 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2);
1877 ctx.start_ns = get_time_ns();
1878 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1879 "RANGE x RANGE, %s -> %s",
1880 t_str(init_t), t_str(cond_t));
1881
1882 for (i = 0; i < rcnt; i++) {
1883 for (j = i; j < rcnt; j++) {
1884 /* (<range> x <range>) */
1885 if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j]))
1886 goto cleanup;
1887 if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i]))
1888 goto cleanup;
1889 }
1890 }
1891
1892 cleanup:
1893 cleanup_ctx(&ctx);
1894 }
1895
1896 /* Go over thousands of test cases generated from initial seed values.
1897 * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If
1898 * envvar is not set, this test is skipped during test_progs testing.
1899 *
1900 * We split this up into smaller subsets based on initialization and
1901 * conditional numeric domains to get an easy parallelization with test_progs'
1902 * -j argument.
1903 */
1904
1905 /* RANGE x CONST, U64 initial range */
test_reg_bounds_gen_consts_u64_u64(void)1906 void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); }
test_reg_bounds_gen_consts_u64_s64(void)1907 void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); }
test_reg_bounds_gen_consts_u64_u32(void)1908 void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); }
test_reg_bounds_gen_consts_u64_s32(void)1909 void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); }
1910 /* RANGE x CONST, S64 initial range */
test_reg_bounds_gen_consts_s64_u64(void)1911 void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); }
test_reg_bounds_gen_consts_s64_s64(void)1912 void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); }
test_reg_bounds_gen_consts_s64_u32(void)1913 void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); }
test_reg_bounds_gen_consts_s64_s32(void)1914 void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); }
1915 /* RANGE x CONST, U32 initial range */
test_reg_bounds_gen_consts_u32_u64(void)1916 void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); }
test_reg_bounds_gen_consts_u32_s64(void)1917 void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); }
test_reg_bounds_gen_consts_u32_u32(void)1918 void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); }
test_reg_bounds_gen_consts_u32_s32(void)1919 void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); }
1920 /* RANGE x CONST, S32 initial range */
test_reg_bounds_gen_consts_s32_u64(void)1921 void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); }
test_reg_bounds_gen_consts_s32_s64(void)1922 void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); }
test_reg_bounds_gen_consts_s32_u32(void)1923 void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); }
test_reg_bounds_gen_consts_s32_s32(void)1924 void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); }
1925
1926 /* RANGE x RANGE, U64 initial range */
test_reg_bounds_gen_ranges_u64_u64(void)1927 void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); }
test_reg_bounds_gen_ranges_u64_s64(void)1928 void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); }
test_reg_bounds_gen_ranges_u64_u32(void)1929 void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); }
test_reg_bounds_gen_ranges_u64_s32(void)1930 void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); }
1931 /* RANGE x RANGE, S64 initial range */
test_reg_bounds_gen_ranges_s64_u64(void)1932 void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); }
test_reg_bounds_gen_ranges_s64_s64(void)1933 void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); }
test_reg_bounds_gen_ranges_s64_u32(void)1934 void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); }
test_reg_bounds_gen_ranges_s64_s32(void)1935 void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); }
1936 /* RANGE x RANGE, U32 initial range */
test_reg_bounds_gen_ranges_u32_u64(void)1937 void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); }
test_reg_bounds_gen_ranges_u32_s64(void)1938 void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); }
test_reg_bounds_gen_ranges_u32_u32(void)1939 void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); }
test_reg_bounds_gen_ranges_u32_s32(void)1940 void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); }
1941 /* RANGE x RANGE, S32 initial range */
test_reg_bounds_gen_ranges_s32_u64(void)1942 void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); }
test_reg_bounds_gen_ranges_s32_s64(void)1943 void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); }
test_reg_bounds_gen_ranges_s32_u32(void)1944 void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
test_reg_bounds_gen_ranges_s32_s32(void)1945 void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
1946
1947 #define DEFAULT_RAND_CASE_CNT 100
1948
1949 #define RAND_21BIT_MASK ((1 << 22) - 1)
1950
rand_u64()1951 static u64 rand_u64()
1952 {
1953 /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
1954 * seems to be 1<<31, so we need to call it thrice to get full u64;
1955 * we'll use roughly equal split: 22 + 21 + 21 bits
1956 */
1957 return ((u64)random() << 42) |
1958 (((u64)random() & RAND_21BIT_MASK) << 21) |
1959 (random() & RAND_21BIT_MASK);
1960 }
1961
rand_const(enum num_t t)1962 static u64 rand_const(enum num_t t)
1963 {
1964 return cast_t(t, rand_u64());
1965 }
1966
rand_range(enum num_t t)1967 static struct range rand_range(enum num_t t)
1968 {
1969 u64 x = rand_const(t), y = rand_const(t);
1970
1971 return range(t, min_t(t, x, y), max_t(t, x, y));
1972 }
1973
validate_rand_ranges(enum num_t init_t,enum num_t cond_t,bool const_range)1974 static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
1975 {
1976 struct ctx ctx;
1977 struct range range1, range2;
1978 int err, i;
1979 u64 t;
1980
1981 memset(&ctx, 0, sizeof(ctx));
1982
1983 err = parse_env_vars(&ctx);
1984 if (err) {
1985 ASSERT_OK(err, "parse_env_vars");
1986 return;
1987 }
1988
1989 if (ctx.rand_case_cnt == 0)
1990 ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
1991 if (ctx.rand_seed == 0)
1992 ctx.rand_seed = (unsigned)get_time_ns();
1993
1994 srandom(ctx.rand_seed);
1995
1996 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt);
1997 ctx.start_ns = get_time_ns();
1998 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1999 "[RANDOM SEED %u] RANGE x %s, %s -> %s",
2000 ctx.rand_seed, const_range ? "CONST" : "RANGE",
2001 t_str(init_t), t_str(cond_t));
2002
2003 for (i = 0; i < ctx.rand_case_cnt; i++) {
2004 range1 = rand_range(init_t);
2005 if (const_range) {
2006 t = rand_const(init_t);
2007 range2 = range(init_t, t, t);
2008 } else {
2009 range2 = rand_range(init_t);
2010 }
2011
2012 /* <range1> x <range2> */
2013 if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */))
2014 goto cleanup;
2015 /* <range2> x <range1> */
2016 if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */))
2017 goto cleanup;
2018 }
2019
2020 cleanup:
2021 /* make sure we report random seed for reproducing */
2022 ASSERT_TRUE(true, ctx.progress_ctx);
2023 cleanup_ctx(&ctx);
2024 }
2025
2026 /* [RANDOM] RANGE x CONST, U64 initial range */
test_reg_bounds_rand_consts_u64_u64(void)2027 void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
test_reg_bounds_rand_consts_u64_s64(void)2028 void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
test_reg_bounds_rand_consts_u64_u32(void)2029 void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
test_reg_bounds_rand_consts_u64_s32(void)2030 void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
2031 /* [RANDOM] RANGE x CONST, S64 initial range */
test_reg_bounds_rand_consts_s64_u64(void)2032 void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
test_reg_bounds_rand_consts_s64_s64(void)2033 void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
test_reg_bounds_rand_consts_s64_u32(void)2034 void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
test_reg_bounds_rand_consts_s64_s32(void)2035 void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
2036 /* [RANDOM] RANGE x CONST, U32 initial range */
test_reg_bounds_rand_consts_u32_u64(void)2037 void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
test_reg_bounds_rand_consts_u32_s64(void)2038 void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
test_reg_bounds_rand_consts_u32_u32(void)2039 void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
test_reg_bounds_rand_consts_u32_s32(void)2040 void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
2041 /* [RANDOM] RANGE x CONST, S32 initial range */
test_reg_bounds_rand_consts_s32_u64(void)2042 void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
test_reg_bounds_rand_consts_s32_s64(void)2043 void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
test_reg_bounds_rand_consts_s32_u32(void)2044 void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
test_reg_bounds_rand_consts_s32_s32(void)2045 void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
2046
2047 /* [RANDOM] RANGE x RANGE, U64 initial range */
test_reg_bounds_rand_ranges_u64_u64(void)2048 void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
test_reg_bounds_rand_ranges_u64_s64(void)2049 void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
test_reg_bounds_rand_ranges_u64_u32(void)2050 void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
test_reg_bounds_rand_ranges_u64_s32(void)2051 void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
2052 /* [RANDOM] RANGE x RANGE, S64 initial range */
test_reg_bounds_rand_ranges_s64_u64(void)2053 void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
test_reg_bounds_rand_ranges_s64_s64(void)2054 void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
test_reg_bounds_rand_ranges_s64_u32(void)2055 void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
test_reg_bounds_rand_ranges_s64_s32(void)2056 void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
2057 /* [RANDOM] RANGE x RANGE, U32 initial range */
test_reg_bounds_rand_ranges_u32_u64(void)2058 void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
test_reg_bounds_rand_ranges_u32_s64(void)2059 void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
test_reg_bounds_rand_ranges_u32_u32(void)2060 void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
test_reg_bounds_rand_ranges_u32_s32(void)2061 void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
2062 /* [RANDOM] RANGE x RANGE, S32 initial range */
test_reg_bounds_rand_ranges_s32_u64(void)2063 void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
test_reg_bounds_rand_ranges_s32_s64(void)2064 void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
test_reg_bounds_rand_ranges_s32_u32(void)2065 void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
test_reg_bounds_rand_ranges_s32_s32(void)2066 void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
2067
2068 /* A set of hard-coded "interesting" cases to validate as part of normal
2069 * test_progs test runs
2070 */
2071 static struct subtest_case crafted_cases[] = {
2072 {U64, U64, {0, 0xffffffff}, {0, 0}},
2073 {U64, U64, {0, 0x80000000}, {0, 0}},
2074 {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}},
2075 {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}},
2076 {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}},
2077 {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}},
2078 {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}},
2079 {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}},
2080
2081 /* single point overlap, interesting BPF_EQ and BPF_NE interactions */
2082 {U64, U64, {0, 1}, {1, 0x80000000}},
2083 {U64, S64, {0, 1}, {1, 0x80000000}},
2084 {U64, U32, {0, 1}, {1, 0x80000000}},
2085 {U64, S32, {0, 1}, {1, 0x80000000}},
2086
2087 {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}},
2088 {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}},
2089 {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
2090 {U64, S64, {0, 0xffffffffULL}, {1, 1}},
2091 {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
2092
2093 {U64, U32, {0, 0x100000000}, {0, 0}},
2094 {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}},
2095
2096 {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
2097 /* these are tricky cases where lower 32 bits allow to tighten 64
2098 * bit boundaries based on tightened lower 32 bit boundaries
2099 */
2100 {U64, S32, {0, 0x0ffffffffULL}, {0, 0}},
2101 {U64, S32, {0, 0x100000000ULL}, {0, 0}},
2102 {U64, S32, {0, 0x100000001ULL}, {0, 0}},
2103 {U64, S32, {0, 0x180000000ULL}, {0, 0}},
2104 {U64, S32, {0, 0x17fffffffULL}, {0, 0}},
2105 {U64, S32, {0, 0x180000001ULL}, {0, 0}},
2106
2107 /* verifier knows about [-1, 0] range for s32 for this case already */
2108 {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2109 /* but didn't know about these cases initially */
2110 {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */
2111 {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */
2112
2113 /* longer convergence case: learning from u64 -> s64 -> u64 -> u32,
2114 * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX])
2115 */
2116 {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2117
2118 {U32, U32, {1, U32_MAX}, {0, 0}},
2119
2120 {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2121
2122 {S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
2123 {S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
2124 {S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
2125 {S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
2126
2127 /* edge overlap testings for BPF_NE */
2128 {U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
2129 {U64, U64, {0, U64_MAX}, {0, 0}},
2130 {S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
2131 {S64, U64, {S64_MIN, 0}, {0, 0}},
2132 {S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
2133 {U32, U32, {0, U32_MAX}, {0, 0}},
2134 {U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2135 {S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
2136 {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
2137 {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
2138 {S64, U32, {0x0, 0x1f}, {0xffffffff80000000ULL, 0x000000007fffffffULL}},
2139 {S64, U32, {0x0, 0x1f}, {0xffffffffffff8000ULL, 0x0000000000007fffULL}},
2140 {S64, U32, {0x0, 0x1f}, {0xffffffffffffff80ULL, 0x000000000000007fULL}},
2141 };
2142
2143 /* Go over crafted hard-coded cases. This is fast, so we do it as part of
2144 * normal test_progs run.
2145 */
test_reg_bounds_crafted(void)2146 void test_reg_bounds_crafted(void)
2147 {
2148 struct ctx ctx;
2149 int i;
2150
2151 memset(&ctx, 0, sizeof(ctx));
2152
2153 for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) {
2154 struct subtest_case *c = &crafted_cases[i];
2155
2156 verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y);
2157 verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x);
2158 }
2159
2160 cleanup_ctx(&ctx);
2161 }
2162