1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #define _GNU_SOURCE
5 #include <limits.h>
6 #include <test_progs.h>
7 #include <linux/filter.h>
8 #include <linux/bpf.h>
9
10 /* =================================
11 * SHORT AND CONSISTENT NUMBER TYPES
12 * =================================
13 */
14 #define U64_MAX ((u64)UINT64_MAX)
15 #define U32_MAX ((u32)UINT_MAX)
16 #define U16_MAX ((u32)UINT_MAX)
17 #define S64_MIN ((s64)INT64_MIN)
18 #define S64_MAX ((s64)INT64_MAX)
19 #define S32_MIN ((s32)INT_MIN)
20 #define S32_MAX ((s32)INT_MAX)
21 #define S16_MIN ((s16)0x80000000)
22 #define S16_MAX ((s16)0x7fffffff)
23
24 typedef unsigned long long ___u64;
25 typedef unsigned int ___u32;
26 typedef long long ___s64;
27 typedef int ___s32;
28
29 /* avoid conflicts with already defined types in kernel headers */
30 #define u64 ___u64
31 #define u32 ___u32
32 #define s64 ___s64
33 #define s32 ___s32
34
35 /* ==================================
36 * STRING BUF ABSTRACTION AND HELPERS
37 * ==================================
38 */
39 struct strbuf {
40 size_t buf_sz;
41 int pos;
42 char buf[0];
43 };
44
45 #define DEFINE_STRBUF(name, N) \
46 struct { struct strbuf buf; char data[(N)]; } ___##name; \
47 struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf)
48
49 __printf(2, 3)
snappendf(struct strbuf * s,const char * fmt,...)50 static inline void snappendf(struct strbuf *s, const char *fmt, ...)
51 {
52 va_list args;
53
54 va_start(args, fmt);
55 s->pos += vsnprintf(s->buf + s->pos,
56 s->pos < s->buf_sz ? s->buf_sz - s->pos : 0,
57 fmt, args);
58 va_end(args);
59 }
60
61 /* ==================================
62 * GENERIC NUMBER TYPE AND OPERATIONS
63 * ==================================
64 */
65 enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 };
66
min_t(enum num_t t,u64 x,u64 y)67 static __always_inline u64 min_t(enum num_t t, u64 x, u64 y)
68 {
69 switch (t) {
70 case U64: return (u64)x < (u64)y ? (u64)x : (u64)y;
71 case U32: return (u32)x < (u32)y ? (u32)x : (u32)y;
72 case S64: return (s64)x < (s64)y ? (s64)x : (s64)y;
73 case S32: return (s32)x < (s32)y ? (s32)x : (s32)y;
74 default: printf("min_t!\n"); exit(1);
75 }
76 }
77
max_t(enum num_t t,u64 x,u64 y)78 static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
79 {
80 switch (t) {
81 case U64: return (u64)x > (u64)y ? (u64)x : (u64)y;
82 case U32: return (u32)x > (u32)y ? (u32)x : (u32)y;
83 case S64: return (s64)x > (s64)y ? (s64)x : (s64)y;
84 case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y;
85 default: printf("max_t!\n"); exit(1);
86 }
87 }
88
cast_t(enum num_t t,u64 x)89 static __always_inline u64 cast_t(enum num_t t, u64 x)
90 {
91 switch (t) {
92 case U64: return (u64)x;
93 case U32: return (u32)x;
94 case S64: return (s64)x;
95 case S32: return (u32)(s32)x;
96 default: printf("cast_t!\n"); exit(1);
97 }
98 }
99
t_str(enum num_t t)100 static const char *t_str(enum num_t t)
101 {
102 switch (t) {
103 case U64: return "u64";
104 case U32: return "u32";
105 case S64: return "s64";
106 case S32: return "s32";
107 default: printf("t_str!\n"); exit(1);
108 }
109 }
110
t_is_32(enum num_t t)111 static enum num_t t_is_32(enum num_t t)
112 {
113 switch (t) {
114 case U64: return false;
115 case U32: return true;
116 case S64: return false;
117 case S32: return true;
118 default: printf("t_is_32!\n"); exit(1);
119 }
120 }
121
t_signed(enum num_t t)122 static enum num_t t_signed(enum num_t t)
123 {
124 switch (t) {
125 case U64: return S64;
126 case U32: return S32;
127 case S64: return S64;
128 case S32: return S32;
129 default: printf("t_signed!\n"); exit(1);
130 }
131 }
132
t_unsigned(enum num_t t)133 static enum num_t t_unsigned(enum num_t t)
134 {
135 switch (t) {
136 case U64: return U64;
137 case U32: return U32;
138 case S64: return U64;
139 case S32: return U32;
140 default: printf("t_unsigned!\n"); exit(1);
141 }
142 }
143
144 #define UNUM_MAX_DECIMAL U16_MAX
145 #define SNUM_MAX_DECIMAL S16_MAX
146 #define SNUM_MIN_DECIMAL S16_MIN
147
num_is_small(enum num_t t,u64 x)148 static bool num_is_small(enum num_t t, u64 x)
149 {
150 switch (t) {
151 case U64: return (u64)x <= UNUM_MAX_DECIMAL;
152 case U32: return (u32)x <= UNUM_MAX_DECIMAL;
153 case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL;
154 case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL;
155 default: printf("num_is_small!\n"); exit(1);
156 }
157 }
158
snprintf_num(enum num_t t,struct strbuf * sb,u64 x)159 static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x)
160 {
161 bool is_small = num_is_small(t, x);
162
163 if (is_small) {
164 switch (t) {
165 case U64: return snappendf(sb, "%llu", (u64)x);
166 case U32: return snappendf(sb, "%u", (u32)x);
167 case S64: return snappendf(sb, "%lld", (s64)x);
168 case S32: return snappendf(sb, "%d", (s32)x);
169 default: printf("snprintf_num!\n"); exit(1);
170 }
171 } else {
172 switch (t) {
173 case U64:
174 if (x == U64_MAX)
175 return snappendf(sb, "U64_MAX");
176 else if (x >= U64_MAX - 256)
177 return snappendf(sb, "U64_MAX-%llu", U64_MAX - x);
178 else
179 return snappendf(sb, "%#llx", (u64)x);
180 case U32:
181 if ((u32)x == U32_MAX)
182 return snappendf(sb, "U32_MAX");
183 else if ((u32)x >= U32_MAX - 256)
184 return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x);
185 else
186 return snappendf(sb, "%#x", (u32)x);
187 case S64:
188 if ((s64)x == S64_MAX)
189 return snappendf(sb, "S64_MAX");
190 else if ((s64)x >= S64_MAX - 256)
191 return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x);
192 else if ((s64)x == S64_MIN)
193 return snappendf(sb, "S64_MIN");
194 else if ((s64)x <= S64_MIN + 256)
195 return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN);
196 else
197 return snappendf(sb, "%#llx", (s64)x);
198 case S32:
199 if ((s32)x == S32_MAX)
200 return snappendf(sb, "S32_MAX");
201 else if ((s32)x >= S32_MAX - 256)
202 return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x);
203 else if ((s32)x == S32_MIN)
204 return snappendf(sb, "S32_MIN");
205 else if ((s32)x <= S32_MIN + 256)
206 return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN);
207 else
208 return snappendf(sb, "%#x", (s32)x);
209 default: printf("snprintf_num!\n"); exit(1);
210 }
211 }
212 }
213
214 /* ===================================
215 * GENERIC RANGE STRUCT AND OPERATIONS
216 * ===================================
217 */
218 struct range {
219 u64 a, b;
220 };
221
snprintf_range(enum num_t t,struct strbuf * sb,struct range x)222 static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
223 {
224 if (x.a == x.b)
225 return snprintf_num(t, sb, x.a);
226
227 snappendf(sb, "[");
228 snprintf_num(t, sb, x.a);
229 snappendf(sb, "; ");
230 snprintf_num(t, sb, x.b);
231 snappendf(sb, "]");
232 }
233
print_range(enum num_t t,struct range x,const char * sfx)234 static void print_range(enum num_t t, struct range x, const char *sfx)
235 {
236 DEFINE_STRBUF(sb, 128);
237
238 snprintf_range(t, sb, x);
239 printf("%s%s", sb->buf, sfx);
240 }
241
242 static const struct range unkn[] = {
243 [U64] = { 0, U64_MAX },
244 [U32] = { 0, U32_MAX },
245 [S64] = { (u64)S64_MIN, (u64)S64_MAX },
246 [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX },
247 };
248
unkn_subreg(enum num_t t)249 static struct range unkn_subreg(enum num_t t)
250 {
251 switch (t) {
252 case U64: return unkn[U32];
253 case U32: return unkn[U32];
254 case S64: return unkn[U32];
255 case S32: return unkn[S32];
256 default: printf("unkn_subreg!\n"); exit(1);
257 }
258 }
259
range(enum num_t t,u64 a,u64 b)260 static struct range range(enum num_t t, u64 a, u64 b)
261 {
262 switch (t) {
263 case U64: return (struct range){ (u64)a, (u64)b };
264 case U32: return (struct range){ (u32)a, (u32)b };
265 case S64: return (struct range){ (s64)a, (s64)b };
266 case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
267 default: printf("range!\n"); exit(1);
268 }
269 }
270
sign64(u64 x)271 static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; }
sign32(u64 x)272 static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; }
upper32(u64 x)273 static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); }
swap_low32(u64 x,u32 y)274 static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; }
275
range_eq(struct range x,struct range y)276 static bool range_eq(struct range x, struct range y)
277 {
278 return x.a == y.a && x.b == y.b;
279 }
280
range_cast_to_s32(struct range x)281 static struct range range_cast_to_s32(struct range x)
282 {
283 u64 a = x.a, b = x.b;
284
285 /* if upper 32 bits are constant, lower 32 bits should form a proper
286 * s32 range to be correct
287 */
288 if (upper32(a) == upper32(b) && (s32)a <= (s32)b)
289 return range(S32, a, b);
290
291 /* Special case where upper bits form a small sequence of two
292 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
293 * 0x00000000 is also valid), while lower bits form a proper s32 range
294 * going from negative numbers to positive numbers.
295 *
296 * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating
297 * over full 64-bit numbers range will form a proper [-16, 16]
298 * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
299 */
300 if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0)
301 return range(S32, a, b);
302
303 /* otherwise we can't derive much meaningful information */
304 return unkn[S32];
305 }
306
range_cast_u64(enum num_t to_t,struct range x)307 static struct range range_cast_u64(enum num_t to_t, struct range x)
308 {
309 u64 a = (u64)x.a, b = (u64)x.b;
310
311 switch (to_t) {
312 case U64:
313 return x;
314 case U32:
315 if (upper32(a) != upper32(b))
316 return unkn[U32];
317 return range(U32, a, b);
318 case S64:
319 if (sign64(a) != sign64(b))
320 return unkn[S64];
321 return range(S64, a, b);
322 case S32:
323 return range_cast_to_s32(x);
324 default: printf("range_cast_u64!\n"); exit(1);
325 }
326 }
327
range_cast_s64(enum num_t to_t,struct range x)328 static struct range range_cast_s64(enum num_t to_t, struct range x)
329 {
330 s64 a = (s64)x.a, b = (s64)x.b;
331
332 switch (to_t) {
333 case U64:
334 /* equivalent to (s64)a <= (s64)b check */
335 if (sign64(a) != sign64(b))
336 return unkn[U64];
337 return range(U64, a, b);
338 case U32:
339 if (upper32(a) != upper32(b) || sign32(a) != sign32(b))
340 return unkn[U32];
341 return range(U32, a, b);
342 case S64:
343 return x;
344 case S32:
345 return range_cast_to_s32(x);
346 default: printf("range_cast_s64!\n"); exit(1);
347 }
348 }
349
range_cast_u32(enum num_t to_t,struct range x)350 static struct range range_cast_u32(enum num_t to_t, struct range x)
351 {
352 u32 a = (u32)x.a, b = (u32)x.b;
353
354 switch (to_t) {
355 case U64:
356 case S64:
357 /* u32 is always a valid zero-extended u64/s64 */
358 return range(to_t, a, b);
359 case U32:
360 return x;
361 case S32:
362 return range_cast_to_s32(range(U32, a, b));
363 default: printf("range_cast_u32!\n"); exit(1);
364 }
365 }
366
range_cast_s32(enum num_t to_t,struct range x)367 static struct range range_cast_s32(enum num_t to_t, struct range x)
368 {
369 s32 a = (s32)x.a, b = (s32)x.b;
370
371 switch (to_t) {
372 case U64:
373 case U32:
374 case S64:
375 if (sign32(a) != sign32(b))
376 return unkn[to_t];
377 return range(to_t, a, b);
378 case S32:
379 return x;
380 default: printf("range_cast_s32!\n"); exit(1);
381 }
382 }
383
384 /* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving
385 * all possible information. Worst case, it will be unknown range within
386 * *to_t* domain, if nothing more specific can be guaranteed during the
387 * conversion
388 */
range_cast(enum num_t from_t,enum num_t to_t,struct range from)389 static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
390 {
391 switch (from_t) {
392 case U64: return range_cast_u64(to_t, from);
393 case U32: return range_cast_u32(to_t, from);
394 case S64: return range_cast_s64(to_t, from);
395 case S32: return range_cast_s32(to_t, from);
396 default: printf("range_cast!\n"); exit(1);
397 }
398 }
399
is_valid_num(enum num_t t,u64 x)400 static bool is_valid_num(enum num_t t, u64 x)
401 {
402 switch (t) {
403 case U64: return true;
404 case U32: return upper32(x) == 0;
405 case S64: return true;
406 case S32: return upper32(x) == 0;
407 default: printf("is_valid_num!\n"); exit(1);
408 }
409 }
410
is_valid_range(enum num_t t,struct range x)411 static bool is_valid_range(enum num_t t, struct range x)
412 {
413 if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b))
414 return false;
415
416 switch (t) {
417 case U64: return (u64)x.a <= (u64)x.b;
418 case U32: return (u32)x.a <= (u32)x.b;
419 case S64: return (s64)x.a <= (s64)x.b;
420 case S32: return (s32)x.a <= (s32)x.b;
421 default: printf("is_valid_range!\n"); exit(1);
422 }
423 }
424
range_intersection(enum num_t t,struct range old,struct range new)425 static struct range range_intersection(enum num_t t, struct range old, struct range new)
426 {
427 return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
428 }
429
430 /*
431 * Result is precise when 'x' and 'y' overlap or form a continuous range,
432 * result is an over-approximation if 'x' and 'y' do not overlap.
433 */
range_union(enum num_t t,struct range x,struct range y)434 static struct range range_union(enum num_t t, struct range x, struct range y)
435 {
436 if (!is_valid_range(t, x))
437 return y;
438 if (!is_valid_range(t, y))
439 return x;
440 return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
441 }
442
443 /*
444 * This function attempts to improve x range intersecting it with y.
445 * range_cast(... to_t ...) looses precision for ranges that pass to_t
446 * min/max boundaries. To avoid such precision loses this function
447 * splits both x and y into halves corresponding to non-overflowing
448 * sub-ranges: [0, smin] and [smax, -1].
449 * Final result is computed as follows:
450 *
451 * ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
452 * ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
453 *
454 * Precision might still be lost if final union is not a continuous range.
455 */
range_refine_in_halves(enum num_t x_t,struct range x,enum num_t y_t,struct range y)456 static struct range range_refine_in_halves(enum num_t x_t, struct range x,
457 enum num_t y_t, struct range y)
458 {
459 struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
460 u64 smax, smin, neg_one;
461
462 if (t_is_32(x_t)) {
463 smax = (u64)(u32)S32_MAX;
464 smin = (u64)(u32)S32_MIN;
465 neg_one = (u64)(u32)(s32)(-1);
466 } else {
467 smax = (u64)S64_MAX;
468 smin = (u64)S64_MIN;
469 neg_one = U64_MAX;
470 }
471 x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
472 x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
473 y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
474 y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
475 r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
476 r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
477 return range_union(x_t, r_pos, r_neg);
478
479 }
480
range_refine(enum num_t x_t,struct range x,enum num_t y_t,struct range y)481 static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
482 {
483 struct range y_cast;
484
485 if (t_is_32(x_t) == t_is_32(y_t))
486 x = range_refine_in_halves(x_t, x, y_t, y);
487
488 y_cast = range_cast(y_t, x_t, y);
489
490 /* If we know that
491 * - *x* is in the range of signed 32bit value, and
492 * - *y_cast* range is 32-bit signed non-negative
493 * then *x* range can be improved with *y_cast* such that *x* range
494 * is 32-bit signed non-negative. Otherwise, if the new range for *x*
495 * allows upper 32-bit * 0xffffffff then the eventual new range for
496 * *x* will be out of signed 32-bit range which violates the origin
497 * *x* range.
498 */
499 if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
500 (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
501 return range_intersection(x_t, x, y_cast);
502
503 /* the case when new range knowledge, *y*, is a 32-bit subregister
504 * range, while previous range knowledge, *x*, is a full register
505 * 64-bit range, needs special treatment to take into account upper 32
506 * bits of full register range
507 */
508 if (t_is_32(y_t) && !t_is_32(x_t)) {
509 struct range x_swap;
510
511 /* some combinations of upper 32 bits and sign bit can lead to
512 * invalid ranges, in such cases it's easier to detect them
513 * after cast/swap than try to enumerate all the conditions
514 * under which transformation and knowledge transfer is valid
515 */
516 x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
517 if (!is_valid_range(x_t, x_swap))
518 return x;
519 return range_intersection(x_t, x, x_swap);
520 }
521
522 /* otherwise, plain range cast and intersection works */
523 return range_intersection(x_t, x, y_cast);
524 }
525
526 /* =======================
527 * GENERIC CONDITIONAL OPS
528 * =======================
529 */
530 enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE };
531
complement_op(enum op op)532 static enum op complement_op(enum op op)
533 {
534 switch (op) {
535 case OP_LT: return OP_GE;
536 case OP_LE: return OP_GT;
537 case OP_GT: return OP_LE;
538 case OP_GE: return OP_LT;
539 case OP_EQ: return OP_NE;
540 case OP_NE: return OP_EQ;
541 default: printf("complement_op!\n"); exit(1);
542 }
543 }
544
op_str(enum op op)545 static const char *op_str(enum op op)
546 {
547 switch (op) {
548 case OP_LT: return "<";
549 case OP_LE: return "<=";
550 case OP_GT: return ">";
551 case OP_GE: return ">=";
552 case OP_EQ: return "==";
553 case OP_NE: return "!=";
554 default: printf("op_str!\n"); exit(1);
555 }
556 }
557
558 /* Can register with range [x.a, x.b] *EVER* satisfy
559 * OP (<, <=, >, >=, ==, !=) relation to
560 * a register with range [y.a, y.b]
561 * _in *num_t* domain_
562 */
range_canbe_op(enum num_t t,struct range x,struct range y,enum op op)563 static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
564 {
565 #define range_canbe(T) do { \
566 switch (op) { \
567 case OP_LT: return (T)x.a < (T)y.b; \
568 case OP_LE: return (T)x.a <= (T)y.b; \
569 case OP_GT: return (T)x.b > (T)y.a; \
570 case OP_GE: return (T)x.b >= (T)y.a; \
571 case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \
572 case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \
573 default: printf("range_canbe op %d\n", op); exit(1); \
574 } \
575 } while (0)
576
577 switch (t) {
578 case U64: { range_canbe(u64); }
579 case U32: { range_canbe(u32); }
580 case S64: { range_canbe(s64); }
581 case S32: { range_canbe(s32); }
582 default: printf("range_canbe!\n"); exit(1);
583 }
584 #undef range_canbe
585 }
586
587 /* Does register with range [x.a, x.b] *ALWAYS* satisfy
588 * OP (<, <=, >, >=, ==, !=) relation to
589 * a register with range [y.a, y.b]
590 * _in *num_t* domain_
591 */
range_always_op(enum num_t t,struct range x,struct range y,enum op op)592 static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
593 {
594 /* always op <=> ! canbe complement(op) */
595 return !range_canbe_op(t, x, y, complement_op(op));
596 }
597
598 /* Does register with range [x.a, x.b] *NEVER* satisfy
599 * OP (<, <=, >, >=, ==, !=) relation to
600 * a register with range [y.a, y.b]
601 * _in *num_t* domain_
602 */
range_never_op(enum num_t t,struct range x,struct range y,enum op op)603 static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
604 {
605 return !range_canbe_op(t, x, y, op);
606 }
607
608 /* similar to verifier's is_branch_taken():
609 * 1 - always taken;
610 * 0 - never taken,
611 * -1 - unsure.
612 */
range_branch_taken_op(enum num_t t,struct range x,struct range y,enum op op)613 static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
614 {
615 if (range_always_op(t, x, y, op))
616 return 1;
617 if (range_never_op(t, x, y, op))
618 return 0;
619 return -1;
620 }
621
622 /* What would be the new estimates for register x and y ranges assuming truthful
623 * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy.
624 *
625 * We assume "interesting" cases where ranges overlap. Cases where it's
626 * obvious that (x OP y) is either always true or false should be filtered with
627 * range_never and range_always checks.
628 */
range_cond(enum num_t t,struct range x,struct range y,enum op op,struct range * newx,struct range * newy)629 static void range_cond(enum num_t t, struct range x, struct range y,
630 enum op op, struct range *newx, struct range *newy)
631 {
632 if (!range_canbe_op(t, x, y, op)) {
633 /* nothing to adjust, can't happen, return original values */
634 *newx = x;
635 *newy = y;
636 return;
637 }
638 switch (op) {
639 case OP_LT:
640 *newx = range(t, x.a, min_t(t, x.b, y.b - 1));
641 *newy = range(t, max_t(t, x.a + 1, y.a), y.b);
642 break;
643 case OP_LE:
644 *newx = range(t, x.a, min_t(t, x.b, y.b));
645 *newy = range(t, max_t(t, x.a, y.a), y.b);
646 break;
647 case OP_GT:
648 *newx = range(t, max_t(t, x.a, y.a + 1), x.b);
649 *newy = range(t, y.a, min_t(t, x.b - 1, y.b));
650 break;
651 case OP_GE:
652 *newx = range(t, max_t(t, x.a, y.a), x.b);
653 *newy = range(t, y.a, min_t(t, x.b, y.b));
654 break;
655 case OP_EQ:
656 *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
657 *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
658 break;
659 case OP_NE:
660 /* below logic is supported by the verifier now */
661 if (x.a == x.b && x.a == y.a) {
662 /* X is a constant matching left side of Y */
663 *newx = range(t, x.a, x.b);
664 *newy = range(t, y.a + 1, y.b);
665 } else if (x.a == x.b && x.b == y.b) {
666 /* X is a constant matching right side of Y */
667 *newx = range(t, x.a, x.b);
668 *newy = range(t, y.a, y.b - 1);
669 } else if (y.a == y.b && x.a == y.a) {
670 /* Y is a constant matching left side of X */
671 *newx = range(t, x.a + 1, x.b);
672 *newy = range(t, y.a, y.b);
673 } else if (y.a == y.b && x.b == y.b) {
674 /* Y is a constant matching right side of X */
675 *newx = range(t, x.a, x.b - 1);
676 *newy = range(t, y.a, y.b);
677 } else {
678 /* generic case, can't derive more information */
679 *newx = range(t, x.a, x.b);
680 *newy = range(t, y.a, y.b);
681 }
682
683 break;
684 default:
685 break;
686 }
687 }
688
689 /* =======================
690 * REGISTER STATE HANDLING
691 * =======================
692 */
693 struct reg_state {
694 struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
695 bool valid;
696 };
697
print_reg_state(struct reg_state * r,const char * sfx)698 static void print_reg_state(struct reg_state *r, const char *sfx)
699 {
700 DEFINE_STRBUF(sb, 512);
701 enum num_t t;
702 int cnt = 0;
703
704 if (!r->valid) {
705 printf("<not found>%s", sfx);
706 return;
707 }
708
709 snappendf(sb, "scalar(");
710 for (t = first_t; t <= last_t; t++) {
711 snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t));
712 snprintf_range(t, sb, r->r[t]);
713 }
714 snappendf(sb, ")");
715
716 printf("%s%s", sb->buf, sfx);
717 }
718
print_refinement(enum num_t s_t,struct range src,enum num_t d_t,struct range old,struct range new,const char * ctx)719 static void print_refinement(enum num_t s_t, struct range src,
720 enum num_t d_t, struct range old, struct range new,
721 const char *ctx)
722 {
723 printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t));
724 print_range(s_t, src, "");
725 printf(" (%s)DST_OLD=", t_str(d_t));
726 print_range(d_t, old, "");
727 printf(" (%s)DST_NEW=", t_str(d_t));
728 print_range(d_t, new, "\n");
729 }
730
reg_state_refine(struct reg_state * r,enum num_t t,struct range x,const char * ctx)731 static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
732 {
733 enum num_t d_t, s_t;
734 struct range old;
735 bool keep_going = false;
736
737 again:
738 /* try to derive new knowledge from just learned range x of type t */
739 for (d_t = first_t; d_t <= last_t; d_t++) {
740 old = r->r[d_t];
741 r->r[d_t] = range_refine(d_t, r->r[d_t], t, x);
742 if (!range_eq(r->r[d_t], old)) {
743 keep_going = true;
744 if (env.verbosity >= VERBOSE_VERY)
745 print_refinement(t, x, d_t, old, r->r[d_t], ctx);
746 }
747 }
748
749 /* now see if we can derive anything new from updated reg_state's ranges */
750 for (s_t = first_t; s_t <= last_t; s_t++) {
751 for (d_t = first_t; d_t <= last_t; d_t++) {
752 old = r->r[d_t];
753 r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]);
754 if (!range_eq(r->r[d_t], old)) {
755 keep_going = true;
756 if (env.verbosity >= VERBOSE_VERY)
757 print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
758 }
759 }
760 }
761
762 /* keep refining until we converge */
763 if (keep_going) {
764 keep_going = false;
765 goto again;
766 }
767 }
768
reg_state_set_const(struct reg_state * rs,enum num_t t,u64 val)769 static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val)
770 {
771 enum num_t tt;
772
773 rs->valid = true;
774 for (tt = first_t; tt <= last_t; tt++)
775 rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
776
777 reg_state_refine(rs, t, rs->r[t], "CONST");
778 }
779
reg_state_cond(enum num_t t,struct reg_state * x,struct reg_state * y,enum op op,struct reg_state * newx,struct reg_state * newy,const char * ctx)780 static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op,
781 struct reg_state *newx, struct reg_state *newy, const char *ctx)
782 {
783 char buf[32];
784 enum num_t ts[2];
785 struct reg_state xx = *x, yy = *y;
786 int i, t_cnt;
787 struct range z1, z2;
788
789 if (op == OP_EQ || op == OP_NE) {
790 /* OP_EQ and OP_NE are sign-agnostic, so we need to process
791 * both signed and unsigned domains at the same time
792 */
793 ts[0] = t_unsigned(t);
794 ts[1] = t_signed(t);
795 t_cnt = 2;
796 } else {
797 ts[0] = t;
798 t_cnt = 1;
799 }
800
801 for (i = 0; i < t_cnt; i++) {
802 t = ts[i];
803 z1 = x->r[t];
804 z2 = y->r[t];
805
806 range_cond(t, z1, z2, op, &z1, &z2);
807
808 if (newx) {
809 snprintf(buf, sizeof(buf), "%s R1", ctx);
810 reg_state_refine(&xx, t, z1, buf);
811 }
812 if (newy) {
813 snprintf(buf, sizeof(buf), "%s R2", ctx);
814 reg_state_refine(&yy, t, z2, buf);
815 }
816 }
817
818 if (newx)
819 *newx = xx;
820 if (newy)
821 *newy = yy;
822 }
823
reg_state_branch_taken_op(enum num_t t,struct reg_state * x,struct reg_state * y,enum op op)824 static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y,
825 enum op op)
826 {
827 if (op == OP_EQ || op == OP_NE) {
828 /* OP_EQ and OP_NE are sign-agnostic */
829 enum num_t tu = t_unsigned(t);
830 enum num_t ts = t_signed(t);
831 int br_u, br_s, br;
832
833 br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op);
834 br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op);
835
836 if (br_u >= 0 && br_s >= 0 && br_u != br_s)
837 ASSERT_FALSE(true, "branch taken inconsistency!\n");
838
839 /* if 64-bit ranges are indecisive, use 32-bit subranges to
840 * eliminate always/never taken branches, if possible
841 */
842 if (br_u == -1 && (t == U64 || t == S64)) {
843 br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op);
844 /* we can only reject for OP_EQ, never take branch
845 * based on lower 32 bits
846 */
847 if (op == OP_EQ && br == 0)
848 return 0;
849 /* for OP_NEQ we can be conclusive only if lower 32 bits
850 * differ and thus inequality branch is always taken
851 */
852 if (op == OP_NE && br == 1)
853 return 1;
854
855 br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op);
856 if (op == OP_EQ && br == 0)
857 return 0;
858 if (op == OP_NE && br == 1)
859 return 1;
860 }
861
862 return br_u >= 0 ? br_u : br_s;
863 }
864 return range_branch_taken_op(t, x->r[t], y->r[t], op);
865 }
866
867 /* =====================================
868 * BPF PROGS GENERATION AND VERIFICATION
869 * =====================================
870 */
871 struct case_spec {
872 /* whether to init full register (r1) or sub-register (w1) */
873 bool init_subregs;
874 /* whether to establish initial value range on full register (r1) or
875 * sub-register (w1)
876 */
877 bool setup_subregs;
878 /* whether to establish initial value range using signed or unsigned
879 * comparisons (i.e., initialize umin/umax or smin/smax directly)
880 */
881 bool setup_signed;
882 /* whether to perform comparison on full registers or sub-registers */
883 bool compare_subregs;
884 /* whether to perform comparison using signed or unsigned operations */
885 bool compare_signed;
886 };
887
888 /* Generate test BPF program based on provided test ranges, operation, and
889 * specifications about register bitness and signedness.
890 */
load_range_cmp_prog(struct range x,struct range y,enum op op,int branch_taken,struct case_spec spec,char * log_buf,size_t log_sz,int * false_pos,int * true_pos)891 static int load_range_cmp_prog(struct range x, struct range y, enum op op,
892 int branch_taken, struct case_spec spec,
893 char *log_buf, size_t log_sz,
894 int *false_pos, int *true_pos)
895 {
896 #define emit(insn) ({ \
897 struct bpf_insn __insns[] = { insn }; \
898 int __i; \
899 for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
900 insns[cur_pos + __i] = __insns[__i]; \
901 cur_pos += __i; \
902 })
903 #define JMP_TO(target) (target - cur_pos - 1)
904 int cur_pos = 0, exit_pos, fd, op_code;
905 struct bpf_insn insns[64];
906 LIBBPF_OPTS(bpf_prog_load_opts, opts,
907 .log_level = 2,
908 .log_buf = log_buf,
909 .log_size = log_sz,
910 .prog_flags = testing_prog_flags(),
911 );
912
913 /* ; skip exit block below
914 * goto +2;
915 */
916 emit(BPF_JMP_A(2));
917 exit_pos = cur_pos;
918 /* ; exit block for all the preparatory conditionals
919 * out:
920 * r0 = 0;
921 * exit;
922 */
923 emit(BPF_MOV64_IMM(BPF_REG_0, 0));
924 emit(BPF_EXIT_INSN());
925 /*
926 * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value
927 * call bpf_get_current_pid_tgid;
928 * r6 = r0; | w6 = w0;
929 * call bpf_get_current_pid_tgid;
930 * r7 = r0; | w7 = w0;
931 */
932 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
933 if (spec.init_subregs)
934 emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0));
935 else
936 emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
937 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
938 if (spec.init_subregs)
939 emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0));
940 else
941 emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
942 /* ; setup initial r6/w6 possible value range ([x.a, x.b])
943 * r1 = %[x.a] ll; | w1 = %[x.a];
944 * r2 = %[x.b] ll; | w2 = %[x.b];
945 * if r6 < r1 goto out; | if w6 < w1 goto out;
946 * if r6 > r2 goto out; | if w6 > w2 goto out;
947 */
948 if (spec.setup_subregs) {
949 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
950 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
951 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
952 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
953 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
954 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
955 } else {
956 emit(BPF_LD_IMM64(BPF_REG_1, x.a));
957 emit(BPF_LD_IMM64(BPF_REG_2, x.b));
958 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
959 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
960 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
961 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
962 }
963 /* ; setup initial r7/w7 possible value range ([y.a, y.b])
964 * r1 = %[y.a] ll; | w1 = %[y.a];
965 * r2 = %[y.b] ll; | w2 = %[y.b];
966 * if r7 < r1 goto out; | if w7 < w1 goto out;
967 * if r7 > r2 goto out; | if w7 > w2 goto out;
968 */
969 if (spec.setup_subregs) {
970 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
971 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
972 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
973 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
974 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
975 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
976 } else {
977 emit(BPF_LD_IMM64(BPF_REG_1, y.a));
978 emit(BPF_LD_IMM64(BPF_REG_2, y.b));
979 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
980 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
981 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
982 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
983 }
984 /* ; range test instruction
985 * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
986 */
987 switch (op) {
988 case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break;
989 case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break;
990 case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break;
991 case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break;
992 case OP_EQ: op_code = BPF_JEQ; break;
993 case OP_NE: op_code = BPF_JNE; break;
994 default:
995 printf("unrecognized op %d\n", op);
996 return -ENOTSUP;
997 }
998 /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
999 * ; this is used for debugging, as verifier doesn't always print
1000 * ; registers states as of condition jump instruction (e.g., when
1001 * ; precision marking happens)
1002 * r0 = r6; | w0 = w6;
1003 * r0 = r7; | w0 = w7;
1004 */
1005 if (spec.compare_subregs) {
1006 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
1007 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
1008 } else {
1009 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
1010 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
1011 }
1012 if (spec.compare_subregs)
1013 emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
1014 else
1015 emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
1016 /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
1017 * r0 = r6; | w0 = w6;
1018 * r0 = r7; | w0 = w7;
1019 * exit;
1020 */
1021 *false_pos = cur_pos;
1022 if (spec.compare_subregs) {
1023 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
1024 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
1025 } else {
1026 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
1027 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
1028 }
1029 if (branch_taken == 1) /* false branch is never taken */
1030 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
1031 else
1032 emit(BPF_EXIT_INSN());
1033 /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
1034 * r0 = r6; | w0 = w6;
1035 * r0 = r7; | w0 = w7;
1036 * exit;
1037 */
1038 *true_pos = cur_pos;
1039 if (spec.compare_subregs) {
1040 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
1041 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
1042 } else {
1043 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
1044 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
1045 }
1046 if (branch_taken == 0) /* true branch is never taken */
1047 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
1048 emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
1049
1050 fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test",
1051 "GPL", insns, cur_pos, &opts);
1052 if (fd < 0)
1053 return fd;
1054
1055 close(fd);
1056 return 0;
1057 #undef emit
1058 #undef JMP_TO
1059 }
1060
1061 #define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0)
1062
1063 /* Parse register state from verifier log.
1064 * `s` should point to the start of "Rx = ..." substring in the verifier log.
1065 */
parse_reg_state(const char * s,struct reg_state * reg)1066 static int parse_reg_state(const char *s, struct reg_state *reg)
1067 {
1068 /* There are two generic forms for SCALAR register:
1069 * - known constant: R6_rwD=P%lld
1070 * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated
1071 * list of optional range specifiers:
1072 * - umin=%llu, if missing, assumed 0;
1073 * - umax=%llu, if missing, assumed U64_MAX;
1074 * - smin=%lld, if missing, assumed S64_MIN;
1075 * - smax=%lld, if missing, assumed S64_MAX;
1076 * - umin32=%d, if missing, assumed 0;
1077 * - umax32=%d, if missing, assumed U32_MAX;
1078 * - smin32=%d, if missing, assumed S32_MIN;
1079 * - smax32=%d, if missing, assumed S32_MAX;
1080 * - var_off=(%#llx; %#llx), tnum part, we don't care about it.
1081 *
1082 * If some of the values are equal, they will be grouped (but min/max
1083 * are not mixed together, and similarly negative values are not
1084 * grouped with non-negative ones). E.g.:
1085 *
1086 * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000)
1087 *
1088 * _rwD part is optional (and any of the letters can be missing).
1089 * P (precision mark) is optional as well.
1090 *
1091 * Anything inside scalar() is optional, including id, of course.
1092 */
1093 struct {
1094 const char *pfx;
1095 u64 *dst, def;
1096 bool is_32, is_set;
1097 } *f, fields[8] = {
1098 {"smin=", ®->r[S64].a, S64_MIN},
1099 {"smax=", ®->r[S64].b, S64_MAX},
1100 {"umin=", ®->r[U64].a, 0},
1101 {"umax=", ®->r[U64].b, U64_MAX},
1102 {"smin32=", ®->r[S32].a, (u32)S32_MIN, true},
1103 {"smax32=", ®->r[S32].b, (u32)S32_MAX, true},
1104 {"umin32=", ®->r[U32].a, 0, true},
1105 {"umax32=", ®->r[U32].b, U32_MAX, true},
1106 };
1107 const char *p;
1108 int i;
1109
1110 p = strchr(s, '=');
1111 if (!p)
1112 return -EINVAL;
1113 p++;
1114 if (*p == 'P')
1115 p++;
1116
1117 if (!str_has_pfx(p, "scalar(")) {
1118 long long sval;
1119 enum num_t t;
1120
1121 if (p[0] == '0' && p[1] == 'x') {
1122 if (sscanf(p, "%llx", &sval) != 1)
1123 return -EINVAL;
1124 } else {
1125 if (sscanf(p, "%lld", &sval) != 1)
1126 return -EINVAL;
1127 }
1128
1129 reg->valid = true;
1130 for (t = first_t; t <= last_t; t++) {
1131 reg->r[t] = range(t, sval, sval);
1132 }
1133 return 0;
1134 }
1135
1136 p += sizeof("scalar");
1137 while (p) {
1138 int midxs[ARRAY_SIZE(fields)], mcnt = 0;
1139 u64 val;
1140
1141 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1142 f = &fields[i];
1143 if (!str_has_pfx(p, f->pfx))
1144 continue;
1145 midxs[mcnt++] = i;
1146 p += strlen(f->pfx);
1147 }
1148
1149 if (mcnt) {
1150 /* populate all matched fields */
1151 if (p[0] == '0' && p[1] == 'x') {
1152 if (sscanf(p, "%llx", &val) != 1)
1153 return -EINVAL;
1154 } else {
1155 if (sscanf(p, "%lld", &val) != 1)
1156 return -EINVAL;
1157 }
1158
1159 for (i = 0; i < mcnt; i++) {
1160 f = &fields[midxs[i]];
1161 f->is_set = true;
1162 *f->dst = f->is_32 ? (u64)(u32)val : val;
1163 }
1164 } else if (str_has_pfx(p, "var_off")) {
1165 /* skip "var_off=(0x0; 0x3f)" part completely */
1166 p = strchr(p, ')');
1167 if (!p)
1168 return -EINVAL;
1169 p++;
1170 }
1171
1172 p = strpbrk(p, ",)");
1173 if (*p == ')')
1174 break;
1175 if (p)
1176 p++;
1177 }
1178
1179 reg->valid = true;
1180
1181 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1182 f = &fields[i];
1183 if (!f->is_set)
1184 *f->dst = f->def;
1185 }
1186
1187 return 0;
1188 }
1189
1190
1191 /* Parse all register states (TRUE/FALSE branches and DST/SRC registers)
1192 * out of the verifier log for a corresponding test case BPF program.
1193 */
parse_range_cmp_log(const char * log_buf,struct case_spec spec,int false_pos,int true_pos,struct reg_state * false1_reg,struct reg_state * false2_reg,struct reg_state * true1_reg,struct reg_state * true2_reg)1194 static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
1195 int false_pos, int true_pos,
1196 struct reg_state *false1_reg, struct reg_state *false2_reg,
1197 struct reg_state *true1_reg, struct reg_state *true2_reg)
1198 {
1199 struct {
1200 int insn_idx;
1201 int reg_idx;
1202 const char *reg_upper;
1203 struct reg_state *state;
1204 } specs[] = {
1205 {false_pos, 6, "R6=", false1_reg},
1206 {false_pos + 1, 7, "R7=", false2_reg},
1207 {true_pos, 6, "R6=", true1_reg},
1208 {true_pos + 1, 7, "R7=", true2_reg},
1209 };
1210 char buf[32];
1211 const char *p = log_buf, *q;
1212 int i, err;
1213
1214 for (i = 0; i < 4; i++) {
1215 sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx,
1216 spec.compare_subregs ? "bc" : "bf",
1217 spec.compare_subregs ? "w0" : "r0",
1218 spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
1219
1220 q = strstr(p, buf);
1221 if (!q) {
1222 *specs[i].state = (struct reg_state){.valid = false};
1223 continue;
1224 }
1225 p = strstr(q, specs[i].reg_upper);
1226 if (!p)
1227 return -EINVAL;
1228 err = parse_reg_state(p, specs[i].state);
1229 if (err)
1230 return -EINVAL;
1231 }
1232 return 0;
1233 }
1234
1235 /* Validate ranges match, and print details if they don't */
assert_range_eq(enum num_t t,struct range x,struct range y,const char * ctx1,const char * ctx2)1236 static bool assert_range_eq(enum num_t t, struct range x, struct range y,
1237 const char *ctx1, const char *ctx2)
1238 {
1239 DEFINE_STRBUF(sb, 512);
1240
1241 if (range_eq(x, y))
1242 return true;
1243
1244 snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2);
1245 snprintf_range(t, sb, x);
1246 snappendf(sb, " != ");
1247 snprintf_range(t, sb, y);
1248
1249 printf("%s\n", sb->buf);
1250
1251 return false;
1252 }
1253
1254 /* Validate that register states match, and print details if they don't */
assert_reg_state_eq(struct reg_state * r,struct reg_state * e,const char * ctx)1255 static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
1256 {
1257 bool ok = true;
1258 enum num_t t;
1259
1260 if (r->valid != e->valid) {
1261 printf("MISMATCH %s: actual %s != expected %s\n", ctx,
1262 r->valid ? "<valid>" : "<invalid>",
1263 e->valid ? "<valid>" : "<invalid>");
1264 return false;
1265 }
1266
1267 if (!r->valid)
1268 return true;
1269
1270 for (t = first_t; t <= last_t; t++) {
1271 if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t)))
1272 ok = false;
1273 }
1274
1275 return ok;
1276 }
1277
1278 /* Printf verifier log, filtering out irrelevant noise */
print_verifier_log(const char * buf)1279 static void print_verifier_log(const char *buf)
1280 {
1281 const char *p;
1282
1283 while (buf[0]) {
1284 p = strchrnul(buf, '\n');
1285
1286 /* filter out irrelevant precision backtracking logs */
1287 if (str_has_pfx(buf, "mark_precise: "))
1288 goto skip_line;
1289
1290 printf("%.*s\n", (int)(p - buf), buf);
1291
1292 skip_line:
1293 buf = *p == '\0' ? p : p + 1;
1294 }
1295 }
1296
1297 /* Simulate provided test case purely with our own range-based logic.
1298 * This is done to set up expectations for verifier's branch_taken logic and
1299 * verifier's register states in the verifier log.
1300 */
sim_case(enum num_t init_t,enum num_t cond_t,struct range x,struct range y,enum op op,struct reg_state * fr1,struct reg_state * fr2,struct reg_state * tr1,struct reg_state * tr2,int * branch_taken)1301 static void sim_case(enum num_t init_t, enum num_t cond_t,
1302 struct range x, struct range y, enum op op,
1303 struct reg_state *fr1, struct reg_state *fr2,
1304 struct reg_state *tr1, struct reg_state *tr2,
1305 int *branch_taken)
1306 {
1307 const u64 A = x.a;
1308 const u64 B = x.b;
1309 const u64 C = y.a;
1310 const u64 D = y.b;
1311 struct reg_state rc;
1312 enum op rev_op = complement_op(op);
1313 enum num_t t;
1314
1315 fr1->valid = fr2->valid = true;
1316 tr1->valid = tr2->valid = true;
1317 for (t = first_t; t <= last_t; t++) {
1318 /* if we are initializing using 32-bit subregisters,
1319 * full registers get upper 32 bits zeroed automatically
1320 */
1321 struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
1322
1323 fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z;
1324 }
1325
1326 /* step 1: r1 >= A, r2 >= C */
1327 reg_state_set_const(&rc, init_t, A);
1328 reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A");
1329 reg_state_set_const(&rc, init_t, C);
1330 reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C");
1331 *tr1 = *fr1;
1332 *tr2 = *fr2;
1333 if (env.verbosity >= VERBOSE_VERY) {
1334 printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1335 printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1336 }
1337
1338 /* step 2: r1 <= B, r2 <= D */
1339 reg_state_set_const(&rc, init_t, B);
1340 reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B");
1341 reg_state_set_const(&rc, init_t, D);
1342 reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D");
1343 *tr1 = *fr1;
1344 *tr2 = *fr2;
1345 if (env.verbosity >= VERBOSE_VERY) {
1346 printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1347 printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1348 }
1349
1350 /* step 3: r1 <op> r2 */
1351 *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op);
1352 fr1->valid = fr2->valid = false;
1353 tr1->valid = tr2->valid = false;
1354 if (*branch_taken != 1) { /* FALSE is possible */
1355 fr1->valid = fr2->valid = true;
1356 reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE");
1357 }
1358 if (*branch_taken != 0) { /* TRUE is possible */
1359 tr1->valid = tr2->valid = true;
1360 reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE");
1361 }
1362 if (env.verbosity >= VERBOSE_VERY) {
1363 printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n");
1364 printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n");
1365 printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n");
1366 printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n");
1367 }
1368 }
1369
1370 /* ===============================
1371 * HIGH-LEVEL TEST CASE VALIDATION
1372 * ===============================
1373 */
1374 static u32 upper_seeds[] = {
1375 0,
1376 1,
1377 U32_MAX,
1378 U32_MAX - 1,
1379 S32_MAX,
1380 (u32)S32_MIN,
1381 };
1382
1383 static u32 lower_seeds[] = {
1384 0,
1385 1,
1386 2, (u32)-2,
1387 255, (u32)-255,
1388 UINT_MAX,
1389 UINT_MAX - 1,
1390 INT_MAX,
1391 (u32)INT_MIN,
1392 };
1393
1394 struct ctx {
1395 int val_cnt, subval_cnt, range_cnt, subrange_cnt;
1396 u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1397 s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1398 u32 usubvals[ARRAY_SIZE(lower_seeds)];
1399 s32 ssubvals[ARRAY_SIZE(lower_seeds)];
1400 struct range *uranges, *sranges;
1401 struct range *usubranges, *ssubranges;
1402 int max_failure_cnt, cur_failure_cnt;
1403 int total_case_cnt, case_cnt;
1404 int rand_case_cnt;
1405 unsigned rand_seed;
1406 __u64 start_ns;
1407 char progress_ctx[64];
1408 };
1409
cleanup_ctx(struct ctx * ctx)1410 static void cleanup_ctx(struct ctx *ctx)
1411 {
1412 free(ctx->uranges);
1413 free(ctx->sranges);
1414 free(ctx->usubranges);
1415 free(ctx->ssubranges);
1416 }
1417
1418 struct subtest_case {
1419 enum num_t init_t;
1420 enum num_t cond_t;
1421 struct range x;
1422 struct range y;
1423 enum op op;
1424 };
1425
subtest_case_str(struct strbuf * sb,struct subtest_case * t,bool use_op)1426 static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op)
1427 {
1428 snappendf(sb, "(%s)", t_str(t->init_t));
1429 snprintf_range(t->init_t, sb, t->x);
1430 snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>");
1431 snprintf_range(t->init_t, sb, t->y);
1432 }
1433
1434 /* Generate and validate test case based on specific combination of setup
1435 * register ranges (including their expected num_t domain), and conditional
1436 * operation to perform (including num_t domain in which it has to be
1437 * performed)
1438 */
verify_case_op(enum num_t init_t,enum num_t cond_t,struct range x,struct range y,enum op op)1439 static int verify_case_op(enum num_t init_t, enum num_t cond_t,
1440 struct range x, struct range y, enum op op)
1441 {
1442 char log_buf[256 * 1024];
1443 size_t log_sz = sizeof(log_buf);
1444 int err, false_pos = 0, true_pos = 0, branch_taken;
1445 struct reg_state fr1, fr2, tr1, tr2;
1446 struct reg_state fe1, fe2, te1, te2;
1447 bool failed = false;
1448 struct case_spec spec = {
1449 .init_subregs = (init_t == U32 || init_t == S32),
1450 .setup_subregs = (init_t == U32 || init_t == S32),
1451 .setup_signed = (init_t == S64 || init_t == S32),
1452 .compare_subregs = (cond_t == U32 || cond_t == S32),
1453 .compare_signed = (cond_t == S64 || cond_t == S32),
1454 };
1455
1456 log_buf[0] = '\0';
1457
1458 sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken);
1459
1460 err = load_range_cmp_prog(x, y, op, branch_taken, spec,
1461 log_buf, log_sz, &false_pos, &true_pos);
1462 if (err) {
1463 ASSERT_OK(err, "load_range_cmp_prog");
1464 failed = true;
1465 }
1466
1467 err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos,
1468 &fr1, &fr2, &tr1, &tr2);
1469 if (err) {
1470 ASSERT_OK(err, "parse_range_cmp_log");
1471 failed = true;
1472 }
1473
1474 if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") ||
1475 !assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
1476 !assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
1477 !assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
1478 failed = true;
1479 }
1480
1481 if (failed || env.verbosity >= VERBOSE_NORMAL) {
1482 if (failed || env.verbosity >= VERBOSE_VERY) {
1483 printf("VERIFIER LOG:\n========================\n");
1484 print_verifier_log(log_buf);
1485 printf("=====================\n");
1486 }
1487 printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n");
1488 printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n");
1489 printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n");
1490 printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n");
1491 printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n");
1492 printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n");
1493 printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n");
1494 printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n");
1495
1496 return failed ? -EINVAL : 0;
1497 }
1498
1499 return 0;
1500 }
1501
1502 /* Given setup ranges and number types, go over all supported operations,
1503 * generating individual subtest for each allowed combination
1504 */
verify_case_opt(struct ctx * ctx,enum num_t init_t,enum num_t cond_t,struct range x,struct range y,bool is_subtest)1505 static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1506 struct range x, struct range y, bool is_subtest)
1507 {
1508 DEFINE_STRBUF(sb, 256);
1509 int err;
1510 struct subtest_case sub = {
1511 .init_t = init_t,
1512 .cond_t = cond_t,
1513 .x = x,
1514 .y = y,
1515 };
1516
1517 sb->pos = 0; /* reset position in strbuf */
1518 subtest_case_str(sb, &sub, false /* ignore op */);
1519 if (is_subtest && !test__start_subtest(sb->buf))
1520 return 0;
1521
1522 for (sub.op = first_op; sub.op <= last_op; sub.op++) {
1523 sb->pos = 0; /* reset position in strbuf */
1524 subtest_case_str(sb, &sub, true /* print op */);
1525
1526 if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
1527 printf("TEST CASE: %s\n", sb->buf);
1528
1529 err = verify_case_op(init_t, cond_t, x, y, sub.op);
1530 if (err || env.verbosity >= VERBOSE_NORMAL)
1531 ASSERT_OK(err, sb->buf);
1532 if (err) {
1533 ctx->cur_failure_cnt++;
1534 if (ctx->cur_failure_cnt > ctx->max_failure_cnt)
1535 return err;
1536 return 0; /* keep testing other cases */
1537 }
1538 ctx->case_cnt++;
1539 if ((ctx->case_cnt % 10000) == 0) {
1540 double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt;
1541 u64 elapsed_ns = get_time_ns() - ctx->start_ns;
1542 double remain_ns = elapsed_ns / progress * (1 - progress);
1543
1544 fprintf(env.stderr_saved, "PROGRESS (%s): %d/%d (%.2lf%%), "
1545 "elapsed %llu mins (%.2lf hrs), "
1546 "ETA %.0lf mins (%.2lf hrs)\n",
1547 ctx->progress_ctx,
1548 ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress,
1549 elapsed_ns / 1000000000 / 60,
1550 elapsed_ns / 1000000000.0 / 3600,
1551 remain_ns / 1000000000.0 / 60,
1552 remain_ns / 1000000000.0 / 3600);
1553 }
1554 }
1555
1556 return 0;
1557 }
1558
verify_case(struct ctx * ctx,enum num_t init_t,enum num_t cond_t,struct range x,struct range y)1559 static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1560 struct range x, struct range y)
1561 {
1562 return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */);
1563 }
1564
1565 /* ================================
1566 * GENERATED CASES FROM SEED VALUES
1567 * ================================
1568 */
u64_cmp(const void * p1,const void * p2)1569 static int u64_cmp(const void *p1, const void *p2)
1570 {
1571 u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2;
1572
1573 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1574 }
1575
u32_cmp(const void * p1,const void * p2)1576 static int u32_cmp(const void *p1, const void *p2)
1577 {
1578 u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2;
1579
1580 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1581 }
1582
s64_cmp(const void * p1,const void * p2)1583 static int s64_cmp(const void *p1, const void *p2)
1584 {
1585 s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2;
1586
1587 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1588 }
1589
s32_cmp(const void * p1,const void * p2)1590 static int s32_cmp(const void *p1, const void *p2)
1591 {
1592 s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2;
1593
1594 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1595 }
1596
1597 /* Generate valid unique constants from seeds, both signed and unsigned */
gen_vals(struct ctx * ctx)1598 static void gen_vals(struct ctx *ctx)
1599 {
1600 int i, j, cnt = 0;
1601
1602 for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) {
1603 for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
1604 ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
1605 }
1606 }
1607
1608 /* sort and compact uvals (i.e., it's `sort | uniq`) */
1609 qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp);
1610 for (i = 1, j = 0; i < cnt; i++) {
1611 if (ctx->uvals[j] == ctx->uvals[i])
1612 continue;
1613 j++;
1614 ctx->uvals[j] = ctx->uvals[i];
1615 }
1616 ctx->val_cnt = j + 1;
1617
1618 /* we have exactly the same number of s64 values, they are just in
1619 * a different order than u64s, so just sort them differently
1620 */
1621 for (i = 0; i < ctx->val_cnt; i++)
1622 ctx->svals[i] = ctx->uvals[i];
1623 qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
1624
1625 if (env.verbosity >= VERBOSE_SUPER) {
1626 DEFINE_STRBUF(sb1, 256);
1627 DEFINE_STRBUF(sb2, 256);
1628
1629 for (i = 0; i < ctx->val_cnt; i++) {
1630 sb1->pos = sb2->pos = 0;
1631 snprintf_num(U64, sb1, ctx->uvals[i]);
1632 snprintf_num(S64, sb2, ctx->svals[i]);
1633 printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
1634 }
1635 }
1636
1637 /* 32-bit values are generated separately */
1638 cnt = 0;
1639 for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
1640 ctx->usubvals[cnt++] = lower_seeds[i];
1641 }
1642
1643 /* sort and compact usubvals (i.e., it's `sort | uniq`) */
1644 qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp);
1645 for (i = 1, j = 0; i < cnt; i++) {
1646 if (ctx->usubvals[j] == ctx->usubvals[i])
1647 continue;
1648 j++;
1649 ctx->usubvals[j] = ctx->usubvals[i];
1650 }
1651 ctx->subval_cnt = j + 1;
1652
1653 for (i = 0; i < ctx->subval_cnt; i++)
1654 ctx->ssubvals[i] = ctx->usubvals[i];
1655 qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
1656
1657 if (env.verbosity >= VERBOSE_SUPER) {
1658 DEFINE_STRBUF(sb1, 256);
1659 DEFINE_STRBUF(sb2, 256);
1660
1661 for (i = 0; i < ctx->subval_cnt; i++) {
1662 sb1->pos = sb2->pos = 0;
1663 snprintf_num(U32, sb1, ctx->usubvals[i]);
1664 snprintf_num(S32, sb2, ctx->ssubvals[i]);
1665 printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
1666 }
1667 }
1668 }
1669
1670 /* Generate valid ranges from upper/lower seeds */
gen_ranges(struct ctx * ctx)1671 static int gen_ranges(struct ctx *ctx)
1672 {
1673 int i, j, cnt = 0;
1674
1675 for (i = 0; i < ctx->val_cnt; i++) {
1676 for (j = i; j < ctx->val_cnt; j++) {
1677 if (env.verbosity >= VERBOSE_SUPER) {
1678 DEFINE_STRBUF(sb1, 256);
1679 DEFINE_STRBUF(sb2, 256);
1680
1681 sb1->pos = sb2->pos = 0;
1682 snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
1683 snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
1684 printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf);
1685 }
1686 cnt++;
1687 }
1688 }
1689 ctx->range_cnt = cnt;
1690
1691 ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges));
1692 if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc"))
1693 return -EINVAL;
1694 ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges));
1695 if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc"))
1696 return -EINVAL;
1697
1698 cnt = 0;
1699 for (i = 0; i < ctx->val_cnt; i++) {
1700 for (j = i; j < ctx->val_cnt; j++) {
1701 ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
1702 ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
1703 cnt++;
1704 }
1705 }
1706
1707 cnt = 0;
1708 for (i = 0; i < ctx->subval_cnt; i++) {
1709 for (j = i; j < ctx->subval_cnt; j++) {
1710 if (env.verbosity >= VERBOSE_SUPER) {
1711 DEFINE_STRBUF(sb1, 256);
1712 DEFINE_STRBUF(sb2, 256);
1713
1714 sb1->pos = sb2->pos = 0;
1715 snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
1716 snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
1717 printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf);
1718 }
1719 cnt++;
1720 }
1721 }
1722 ctx->subrange_cnt = cnt;
1723
1724 ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges));
1725 if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc"))
1726 return -EINVAL;
1727 ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges));
1728 if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc"))
1729 return -EINVAL;
1730
1731 cnt = 0;
1732 for (i = 0; i < ctx->subval_cnt; i++) {
1733 for (j = i; j < ctx->subval_cnt; j++) {
1734 ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
1735 ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
1736 cnt++;
1737 }
1738 }
1739
1740 return 0;
1741 }
1742
parse_env_vars(struct ctx * ctx)1743 static int parse_env_vars(struct ctx *ctx)
1744 {
1745 const char *s;
1746
1747 if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
1748 errno = 0;
1749 ctx->max_failure_cnt = strtol(s, NULL, 10);
1750 if (errno || ctx->max_failure_cnt < 0) {
1751 ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT");
1752 return -EINVAL;
1753 }
1754 }
1755
1756 if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
1757 errno = 0;
1758 ctx->rand_case_cnt = strtol(s, NULL, 10);
1759 if (errno || ctx->rand_case_cnt < 0) {
1760 ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
1761 return -EINVAL;
1762 }
1763 }
1764
1765 if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
1766 errno = 0;
1767 ctx->rand_seed = strtoul(s, NULL, 10);
1768 if (errno) {
1769 ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
1770 return -EINVAL;
1771 }
1772 }
1773
1774 return 0;
1775 }
1776
prepare_gen_tests(struct ctx * ctx)1777 static int prepare_gen_tests(struct ctx *ctx)
1778 {
1779 const char *s;
1780 int err;
1781
1782 if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
1783 test__skip();
1784 return -ENOTSUP;
1785 }
1786
1787 err = parse_env_vars(ctx);
1788 if (err)
1789 return err;
1790
1791 gen_vals(ctx);
1792 err = gen_ranges(ctx);
1793 if (err) {
1794 ASSERT_OK(err, "gen_ranges");
1795 return err;
1796 }
1797
1798 return 0;
1799 }
1800
1801 /* Go over generated constants and ranges and validate various supported
1802 * combinations of them
1803 */
validate_gen_range_vs_const_64(enum num_t init_t,enum num_t cond_t)1804 static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
1805 {
1806 struct ctx ctx;
1807 struct range rconst;
1808 const struct range *ranges;
1809 const u64 *vals;
1810 int i, j;
1811
1812 memset(&ctx, 0, sizeof(ctx));
1813
1814 if (prepare_gen_tests(&ctx))
1815 goto cleanup;
1816
1817 ranges = init_t == U64 ? ctx.uranges : ctx.sranges;
1818 vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals;
1819
1820 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt);
1821 ctx.start_ns = get_time_ns();
1822 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1823 "RANGE x CONST, %s -> %s",
1824 t_str(init_t), t_str(cond_t));
1825
1826 for (i = 0; i < ctx.val_cnt; i++) {
1827 for (j = 0; j < ctx.range_cnt; j++) {
1828 rconst = range(init_t, vals[i], vals[i]);
1829
1830 /* (u64|s64)(<range> x <const>) */
1831 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1832 goto cleanup;
1833 /* (u64|s64)(<const> x <range>) */
1834 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1835 goto cleanup;
1836 }
1837 }
1838
1839 cleanup:
1840 cleanup_ctx(&ctx);
1841 }
1842
validate_gen_range_vs_const_32(enum num_t init_t,enum num_t cond_t)1843 static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t)
1844 {
1845 struct ctx ctx;
1846 struct range rconst;
1847 const struct range *ranges;
1848 const u32 *vals;
1849 int i, j;
1850
1851 memset(&ctx, 0, sizeof(ctx));
1852
1853 if (prepare_gen_tests(&ctx))
1854 goto cleanup;
1855
1856 ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges;
1857 vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals;
1858
1859 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt);
1860 ctx.start_ns = get_time_ns();
1861 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1862 "RANGE x CONST, %s -> %s",
1863 t_str(init_t), t_str(cond_t));
1864
1865 for (i = 0; i < ctx.subval_cnt; i++) {
1866 for (j = 0; j < ctx.subrange_cnt; j++) {
1867 rconst = range(init_t, vals[i], vals[i]);
1868
1869 /* (u32|s32)(<range> x <const>) */
1870 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1871 goto cleanup;
1872 /* (u32|s32)(<const> x <range>) */
1873 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1874 goto cleanup;
1875 }
1876 }
1877
1878 cleanup:
1879 cleanup_ctx(&ctx);
1880 }
1881
validate_gen_range_vs_range(enum num_t init_t,enum num_t cond_t)1882 static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t)
1883 {
1884 struct ctx ctx;
1885 const struct range *ranges;
1886 int i, j, rcnt;
1887
1888 memset(&ctx, 0, sizeof(ctx));
1889
1890 if (prepare_gen_tests(&ctx))
1891 goto cleanup;
1892
1893 switch (init_t)
1894 {
1895 case U64:
1896 ranges = ctx.uranges;
1897 rcnt = ctx.range_cnt;
1898 break;
1899 case U32:
1900 ranges = ctx.usubranges;
1901 rcnt = ctx.subrange_cnt;
1902 break;
1903 case S64:
1904 ranges = ctx.sranges;
1905 rcnt = ctx.range_cnt;
1906 break;
1907 case S32:
1908 ranges = ctx.ssubranges;
1909 rcnt = ctx.subrange_cnt;
1910 break;
1911 default:
1912 printf("validate_gen_range_vs_range!\n");
1913 exit(1);
1914 }
1915
1916 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2);
1917 ctx.start_ns = get_time_ns();
1918 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1919 "RANGE x RANGE, %s -> %s",
1920 t_str(init_t), t_str(cond_t));
1921
1922 for (i = 0; i < rcnt; i++) {
1923 for (j = i; j < rcnt; j++) {
1924 /* (<range> x <range>) */
1925 if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j]))
1926 goto cleanup;
1927 if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i]))
1928 goto cleanup;
1929 }
1930 }
1931
1932 cleanup:
1933 cleanup_ctx(&ctx);
1934 }
1935
1936 /* Go over thousands of test cases generated from initial seed values.
1937 * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If
1938 * envvar is not set, this test is skipped during test_progs testing.
1939 *
1940 * We split this up into smaller subsets based on initialization and
1941 * conditional numeric domains to get an easy parallelization with test_progs'
1942 * -j argument.
1943 */
1944
1945 /* RANGE x CONST, U64 initial range */
test_reg_bounds_gen_consts_u64_u64(void)1946 void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); }
test_reg_bounds_gen_consts_u64_s64(void)1947 void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); }
test_reg_bounds_gen_consts_u64_u32(void)1948 void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); }
test_reg_bounds_gen_consts_u64_s32(void)1949 void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); }
1950 /* RANGE x CONST, S64 initial range */
test_reg_bounds_gen_consts_s64_u64(void)1951 void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); }
test_reg_bounds_gen_consts_s64_s64(void)1952 void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); }
test_reg_bounds_gen_consts_s64_u32(void)1953 void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); }
test_reg_bounds_gen_consts_s64_s32(void)1954 void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); }
1955 /* RANGE x CONST, U32 initial range */
test_reg_bounds_gen_consts_u32_u64(void)1956 void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); }
test_reg_bounds_gen_consts_u32_s64(void)1957 void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); }
test_reg_bounds_gen_consts_u32_u32(void)1958 void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); }
test_reg_bounds_gen_consts_u32_s32(void)1959 void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); }
1960 /* RANGE x CONST, S32 initial range */
test_reg_bounds_gen_consts_s32_u64(void)1961 void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); }
test_reg_bounds_gen_consts_s32_s64(void)1962 void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); }
test_reg_bounds_gen_consts_s32_u32(void)1963 void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); }
test_reg_bounds_gen_consts_s32_s32(void)1964 void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); }
1965
1966 /* RANGE x RANGE, U64 initial range */
test_reg_bounds_gen_ranges_u64_u64(void)1967 void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); }
test_reg_bounds_gen_ranges_u64_s64(void)1968 void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); }
test_reg_bounds_gen_ranges_u64_u32(void)1969 void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); }
test_reg_bounds_gen_ranges_u64_s32(void)1970 void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); }
1971 /* RANGE x RANGE, S64 initial range */
test_reg_bounds_gen_ranges_s64_u64(void)1972 void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); }
test_reg_bounds_gen_ranges_s64_s64(void)1973 void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); }
test_reg_bounds_gen_ranges_s64_u32(void)1974 void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); }
test_reg_bounds_gen_ranges_s64_s32(void)1975 void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); }
1976 /* RANGE x RANGE, U32 initial range */
test_reg_bounds_gen_ranges_u32_u64(void)1977 void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); }
test_reg_bounds_gen_ranges_u32_s64(void)1978 void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); }
test_reg_bounds_gen_ranges_u32_u32(void)1979 void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); }
test_reg_bounds_gen_ranges_u32_s32(void)1980 void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); }
1981 /* RANGE x RANGE, S32 initial range */
test_reg_bounds_gen_ranges_s32_u64(void)1982 void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); }
test_reg_bounds_gen_ranges_s32_s64(void)1983 void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); }
test_reg_bounds_gen_ranges_s32_u32(void)1984 void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
test_reg_bounds_gen_ranges_s32_s32(void)1985 void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
1986
1987 #define DEFAULT_RAND_CASE_CNT 100
1988
1989 #define RAND_21BIT_MASK ((1 << 22) - 1)
1990
rand_u64()1991 static u64 rand_u64()
1992 {
1993 /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
1994 * seems to be 1<<31, so we need to call it thrice to get full u64;
1995 * we'll use roughly equal split: 22 + 21 + 21 bits
1996 */
1997 return ((u64)random() << 42) |
1998 (((u64)random() & RAND_21BIT_MASK) << 21) |
1999 (random() & RAND_21BIT_MASK);
2000 }
2001
rand_const(enum num_t t)2002 static u64 rand_const(enum num_t t)
2003 {
2004 return cast_t(t, rand_u64());
2005 }
2006
rand_range(enum num_t t)2007 static struct range rand_range(enum num_t t)
2008 {
2009 u64 x = rand_const(t), y = rand_const(t);
2010
2011 return range(t, min_t(t, x, y), max_t(t, x, y));
2012 }
2013
validate_rand_ranges(enum num_t init_t,enum num_t cond_t,bool const_range)2014 static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
2015 {
2016 struct ctx ctx;
2017 struct range range1, range2;
2018 int err, i;
2019 u64 t;
2020
2021 memset(&ctx, 0, sizeof(ctx));
2022
2023 err = parse_env_vars(&ctx);
2024 if (err) {
2025 ASSERT_OK(err, "parse_env_vars");
2026 return;
2027 }
2028
2029 if (ctx.rand_case_cnt == 0)
2030 ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
2031 if (ctx.rand_seed == 0)
2032 ctx.rand_seed = (unsigned)get_time_ns();
2033
2034 srandom(ctx.rand_seed);
2035
2036 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt);
2037 ctx.start_ns = get_time_ns();
2038 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
2039 "[RANDOM SEED %u] RANGE x %s, %s -> %s",
2040 ctx.rand_seed, const_range ? "CONST" : "RANGE",
2041 t_str(init_t), t_str(cond_t));
2042
2043 for (i = 0; i < ctx.rand_case_cnt; i++) {
2044 range1 = rand_range(init_t);
2045 if (const_range) {
2046 t = rand_const(init_t);
2047 range2 = range(init_t, t, t);
2048 } else {
2049 range2 = rand_range(init_t);
2050 }
2051
2052 /* <range1> x <range2> */
2053 if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */))
2054 goto cleanup;
2055 /* <range2> x <range1> */
2056 if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */))
2057 goto cleanup;
2058 }
2059
2060 cleanup:
2061 /* make sure we report random seed for reproducing */
2062 ASSERT_TRUE(true, ctx.progress_ctx);
2063 cleanup_ctx(&ctx);
2064 }
2065
2066 /* [RANDOM] RANGE x CONST, U64 initial range */
test_reg_bounds_rand_consts_u64_u64(void)2067 void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
test_reg_bounds_rand_consts_u64_s64(void)2068 void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
test_reg_bounds_rand_consts_u64_u32(void)2069 void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
test_reg_bounds_rand_consts_u64_s32(void)2070 void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
2071 /* [RANDOM] RANGE x CONST, S64 initial range */
test_reg_bounds_rand_consts_s64_u64(void)2072 void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
test_reg_bounds_rand_consts_s64_s64(void)2073 void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
test_reg_bounds_rand_consts_s64_u32(void)2074 void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
test_reg_bounds_rand_consts_s64_s32(void)2075 void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
2076 /* [RANDOM] RANGE x CONST, U32 initial range */
test_reg_bounds_rand_consts_u32_u64(void)2077 void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
test_reg_bounds_rand_consts_u32_s64(void)2078 void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
test_reg_bounds_rand_consts_u32_u32(void)2079 void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
test_reg_bounds_rand_consts_u32_s32(void)2080 void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
2081 /* [RANDOM] RANGE x CONST, S32 initial range */
test_reg_bounds_rand_consts_s32_u64(void)2082 void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
test_reg_bounds_rand_consts_s32_s64(void)2083 void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
test_reg_bounds_rand_consts_s32_u32(void)2084 void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
test_reg_bounds_rand_consts_s32_s32(void)2085 void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
2086
2087 /* [RANDOM] RANGE x RANGE, U64 initial range */
test_reg_bounds_rand_ranges_u64_u64(void)2088 void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
test_reg_bounds_rand_ranges_u64_s64(void)2089 void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
test_reg_bounds_rand_ranges_u64_u32(void)2090 void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
test_reg_bounds_rand_ranges_u64_s32(void)2091 void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
2092 /* [RANDOM] RANGE x RANGE, S64 initial range */
test_reg_bounds_rand_ranges_s64_u64(void)2093 void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
test_reg_bounds_rand_ranges_s64_s64(void)2094 void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
test_reg_bounds_rand_ranges_s64_u32(void)2095 void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
test_reg_bounds_rand_ranges_s64_s32(void)2096 void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
2097 /* [RANDOM] RANGE x RANGE, U32 initial range */
test_reg_bounds_rand_ranges_u32_u64(void)2098 void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
test_reg_bounds_rand_ranges_u32_s64(void)2099 void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
test_reg_bounds_rand_ranges_u32_u32(void)2100 void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
test_reg_bounds_rand_ranges_u32_s32(void)2101 void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
2102 /* [RANDOM] RANGE x RANGE, S32 initial range */
test_reg_bounds_rand_ranges_s32_u64(void)2103 void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
test_reg_bounds_rand_ranges_s32_s64(void)2104 void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
test_reg_bounds_rand_ranges_s32_u32(void)2105 void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
test_reg_bounds_rand_ranges_s32_s32(void)2106 void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
2107
2108 /* A set of hard-coded "interesting" cases to validate as part of normal
2109 * test_progs test runs
2110 */
2111 static struct subtest_case crafted_cases[] = {
2112 {U64, U64, {0, 0xffffffff}, {0, 0}},
2113 {U64, U64, {0, 0x80000000}, {0, 0}},
2114 {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}},
2115 {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}},
2116 {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}},
2117 {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}},
2118 {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}},
2119 {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}},
2120
2121 /* single point overlap, interesting BPF_EQ and BPF_NE interactions */
2122 {U64, U64, {0, 1}, {1, 0x80000000}},
2123 {U64, S64, {0, 1}, {1, 0x80000000}},
2124 {U64, U32, {0, 1}, {1, 0x80000000}},
2125 {U64, S32, {0, 1}, {1, 0x80000000}},
2126
2127 {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}},
2128 {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}},
2129 {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
2130 {U64, S64, {0, 0xffffffffULL}, {1, 1}},
2131 {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
2132
2133 {U64, U32, {0, 0x100000000}, {0, 0}},
2134 {U64, U32, {0xfffffffe, 0x300000000}, {0x80000000, 0x80000000}},
2135
2136 {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
2137 /* these are tricky cases where lower 32 bits allow to tighten 64
2138 * bit boundaries based on tightened lower 32 bit boundaries
2139 */
2140 {U64, S32, {0, 0x0ffffffffULL}, {0, 0}},
2141 {U64, S32, {0, 0x100000000ULL}, {0, 0}},
2142 {U64, S32, {0, 0x100000001ULL}, {0, 0}},
2143 {U64, S32, {0, 0x180000000ULL}, {0, 0}},
2144 {U64, S32, {0, 0x17fffffffULL}, {0, 0}},
2145 {U64, S32, {0, 0x180000001ULL}, {0, 0}},
2146
2147 /* verifier knows about [-1, 0] range for s32 for this case already */
2148 {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2149 /* but didn't know about these cases initially */
2150 {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */
2151 {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */
2152
2153 /* longer convergence case: learning from u64 -> s64 -> u64 -> u32,
2154 * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX])
2155 */
2156 {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2157
2158 {U32, U32, {1, U32_MAX}, {0, 0}},
2159
2160 {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2161
2162 {S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
2163 {S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
2164 {S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
2165 {S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
2166
2167 /* edge overlap testings for BPF_NE */
2168 {U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
2169 {U64, U64, {0, U64_MAX}, {0, 0}},
2170 {S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
2171 {S64, U64, {S64_MIN, 0}, {0, 0}},
2172 {S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
2173 {U32, U32, {0, U32_MAX}, {0, 0}},
2174 {U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2175 {S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
2176 {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
2177 {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
2178 {S64, U32, {0x0, 0x1f}, {0xffffffff80000000ULL, 0x000000007fffffffULL}},
2179 {S64, U32, {0x0, 0x1f}, {0xffffffffffff8000ULL, 0x0000000000007fffULL}},
2180 {S64, U32, {0x0, 0x1f}, {0xffffffffffffff80ULL, 0x000000000000007fULL}},
2181 };
2182
2183 /* Go over crafted hard-coded cases. This is fast, so we do it as part of
2184 * normal test_progs run.
2185 */
test_reg_bounds_crafted(void)2186 void test_reg_bounds_crafted(void)
2187 {
2188 struct ctx ctx;
2189 int i;
2190
2191 memset(&ctx, 0, sizeof(ctx));
2192
2193 for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) {
2194 struct subtest_case *c = &crafted_cases[i];
2195
2196 verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y);
2197 verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x);
2198 }
2199
2200 cleanup_ctx(&ctx);
2201 }
2202