xref: /linux/tools/testing/selftests/bpf/test_loader.c (revision 79ac11393328fb1717d17c12e3c0eef0e9fa0647)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/capability.h>
4 #include <stdlib.h>
5 #include <test_progs.h>
6 #include <bpf/btf.h>
7 
8 #include "autoconf_helper.h"
9 #include "unpriv_helpers.h"
10 #include "cap_helpers.h"
11 
12 #define str_has_pfx(str, pfx) \
13 	(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
14 
15 #define TEST_LOADER_LOG_BUF_SZ 1048576
16 
17 #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
18 #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
19 #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
20 #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
21 #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
22 #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
23 #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
24 #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
25 #define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
26 #define TEST_TAG_RETVAL_PFX "comment:test_retval="
27 #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
28 #define TEST_TAG_AUXILIARY "comment:test_auxiliary"
29 #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
30 
31 /* Warning: duplicated in bpf_misc.h */
32 #define POINTER_VALUE	0xcafe4all
33 #define TEST_DATA_LEN	64
34 
35 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
36 #define EFFICIENT_UNALIGNED_ACCESS 1
37 #else
38 #define EFFICIENT_UNALIGNED_ACCESS 0
39 #endif
40 
41 static int sysctl_unpriv_disabled = -1;
42 
43 enum mode {
44 	PRIV = 1,
45 	UNPRIV = 2
46 };
47 
48 struct test_subspec {
49 	char *name;
50 	bool expect_failure;
51 	const char **expect_msgs;
52 	size_t expect_msg_cnt;
53 	int retval;
54 	bool execute;
55 };
56 
57 struct test_spec {
58 	const char *prog_name;
59 	struct test_subspec priv;
60 	struct test_subspec unpriv;
61 	int log_level;
62 	int prog_flags;
63 	int mode_mask;
64 	bool auxiliary;
65 	bool valid;
66 };
67 
68 static int tester_init(struct test_loader *tester)
69 {
70 	if (!tester->log_buf) {
71 		tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
72 		tester->log_buf = calloc(tester->log_buf_sz, 1);
73 		if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
74 			return -ENOMEM;
75 	}
76 
77 	return 0;
78 }
79 
80 void test_loader_fini(struct test_loader *tester)
81 {
82 	if (!tester)
83 		return;
84 
85 	free(tester->log_buf);
86 }
87 
88 static void free_test_spec(struct test_spec *spec)
89 {
90 	free(spec->priv.name);
91 	free(spec->unpriv.name);
92 	free(spec->priv.expect_msgs);
93 	free(spec->unpriv.expect_msgs);
94 
95 	spec->priv.name = NULL;
96 	spec->unpriv.name = NULL;
97 	spec->priv.expect_msgs = NULL;
98 	spec->unpriv.expect_msgs = NULL;
99 }
100 
101 static int push_msg(const char *msg, struct test_subspec *subspec)
102 {
103 	void *tmp;
104 
105 	tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
106 	if (!tmp) {
107 		ASSERT_FAIL("failed to realloc memory for messages\n");
108 		return -ENOMEM;
109 	}
110 	subspec->expect_msgs = tmp;
111 	subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
112 
113 	return 0;
114 }
115 
116 static int parse_int(const char *str, int *val, const char *name)
117 {
118 	char *end;
119 	long tmp;
120 
121 	errno = 0;
122 	if (str_has_pfx(str, "0x"))
123 		tmp = strtol(str + 2, &end, 16);
124 	else
125 		tmp = strtol(str, &end, 10);
126 	if (errno || end[0] != '\0') {
127 		PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
128 		return -EINVAL;
129 	}
130 	*val = tmp;
131 	return 0;
132 }
133 
134 static int parse_retval(const char *str, int *val, const char *name)
135 {
136 	struct {
137 		char *name;
138 		int val;
139 	} named_values[] = {
140 		{ "INT_MIN"      , INT_MIN },
141 		{ "POINTER_VALUE", POINTER_VALUE },
142 		{ "TEST_DATA_LEN", TEST_DATA_LEN },
143 	};
144 	int i;
145 
146 	for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
147 		if (strcmp(str, named_values[i].name) != 0)
148 			continue;
149 		*val = named_values[i].val;
150 		return 0;
151 	}
152 
153 	return parse_int(str, val, name);
154 }
155 
156 static void update_flags(int *flags, int flag, bool clear)
157 {
158 	if (clear)
159 		*flags &= ~flag;
160 	else
161 		*flags |= flag;
162 }
163 
164 /* Uses btf_decl_tag attributes to describe the expected test
165  * behavior, see bpf_misc.h for detailed description of each attribute
166  * and attribute combinations.
167  */
168 static int parse_test_spec(struct test_loader *tester,
169 			   struct bpf_object *obj,
170 			   struct bpf_program *prog,
171 			   struct test_spec *spec)
172 {
173 	const char *description = NULL;
174 	bool has_unpriv_result = false;
175 	bool has_unpriv_retval = false;
176 	int func_id, i, err = 0;
177 	struct btf *btf;
178 
179 	memset(spec, 0, sizeof(*spec));
180 
181 	spec->prog_name = bpf_program__name(prog);
182 	spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
183 
184 	btf = bpf_object__btf(obj);
185 	if (!btf) {
186 		ASSERT_FAIL("BPF object has no BTF");
187 		return -EINVAL;
188 	}
189 
190 	func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
191 	if (func_id < 0) {
192 		ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
193 		return -EINVAL;
194 	}
195 
196 	for (i = 1; i < btf__type_cnt(btf); i++) {
197 		const char *s, *val, *msg;
198 		const struct btf_type *t;
199 		bool clear;
200 		int flags;
201 
202 		t = btf__type_by_id(btf, i);
203 		if (!btf_is_decl_tag(t))
204 			continue;
205 
206 		if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
207 			continue;
208 
209 		s = btf__str_by_offset(btf, t->name_off);
210 		if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
211 			description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
212 		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
213 			spec->priv.expect_failure = true;
214 			spec->mode_mask |= PRIV;
215 		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
216 			spec->priv.expect_failure = false;
217 			spec->mode_mask |= PRIV;
218 		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
219 			spec->unpriv.expect_failure = true;
220 			spec->mode_mask |= UNPRIV;
221 			has_unpriv_result = true;
222 		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
223 			spec->unpriv.expect_failure = false;
224 			spec->mode_mask |= UNPRIV;
225 			has_unpriv_result = true;
226 		} else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
227 			spec->auxiliary = true;
228 			spec->mode_mask |= PRIV;
229 		} else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
230 			spec->auxiliary = true;
231 			spec->mode_mask |= UNPRIV;
232 		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
233 			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
234 			err = push_msg(msg, &spec->priv);
235 			if (err)
236 				goto cleanup;
237 			spec->mode_mask |= PRIV;
238 		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
239 			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
240 			err = push_msg(msg, &spec->unpriv);
241 			if (err)
242 				goto cleanup;
243 			spec->mode_mask |= UNPRIV;
244 		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
245 			val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
246 			err = parse_retval(val, &spec->priv.retval, "__retval");
247 			if (err)
248 				goto cleanup;
249 			spec->priv.execute = true;
250 			spec->mode_mask |= PRIV;
251 		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
252 			val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
253 			err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
254 			if (err)
255 				goto cleanup;
256 			spec->mode_mask |= UNPRIV;
257 			spec->unpriv.execute = true;
258 			has_unpriv_retval = true;
259 		} else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
260 			val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
261 			err = parse_int(val, &spec->log_level, "test log level");
262 			if (err)
263 				goto cleanup;
264 		} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
265 			val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
266 
267 			clear = val[0] == '!';
268 			if (clear)
269 				val++;
270 
271 			if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
272 				update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
273 			} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
274 				update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
275 			} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
276 				update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
277 			} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
278 				update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
279 			} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
280 				update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
281 			} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
282 				update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
283 			} else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
284 				update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
285 			} else /* assume numeric value */ {
286 				err = parse_int(val, &flags, "test prog flags");
287 				if (err)
288 					goto cleanup;
289 				update_flags(&spec->prog_flags, flags, clear);
290 			}
291 		}
292 	}
293 
294 	if (spec->mode_mask == 0)
295 		spec->mode_mask = PRIV;
296 
297 	if (!description)
298 		description = spec->prog_name;
299 
300 	if (spec->mode_mask & PRIV) {
301 		spec->priv.name = strdup(description);
302 		if (!spec->priv.name) {
303 			PRINT_FAIL("failed to allocate memory for priv.name\n");
304 			err = -ENOMEM;
305 			goto cleanup;
306 		}
307 	}
308 
309 	if (spec->mode_mask & UNPRIV) {
310 		int descr_len = strlen(description);
311 		const char *suffix = " @unpriv";
312 		char *name;
313 
314 		name = malloc(descr_len + strlen(suffix) + 1);
315 		if (!name) {
316 			PRINT_FAIL("failed to allocate memory for unpriv.name\n");
317 			err = -ENOMEM;
318 			goto cleanup;
319 		}
320 
321 		strcpy(name, description);
322 		strcpy(&name[descr_len], suffix);
323 		spec->unpriv.name = name;
324 	}
325 
326 	if (spec->mode_mask & (PRIV | UNPRIV)) {
327 		if (!has_unpriv_result)
328 			spec->unpriv.expect_failure = spec->priv.expect_failure;
329 
330 		if (!has_unpriv_retval) {
331 			spec->unpriv.retval = spec->priv.retval;
332 			spec->unpriv.execute = spec->priv.execute;
333 		}
334 
335 		if (!spec->unpriv.expect_msgs) {
336 			size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
337 
338 			spec->unpriv.expect_msgs = malloc(sz);
339 			if (!spec->unpriv.expect_msgs) {
340 				PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
341 				err = -ENOMEM;
342 				goto cleanup;
343 			}
344 			memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
345 			spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
346 		}
347 	}
348 
349 	spec->valid = true;
350 
351 	return 0;
352 
353 cleanup:
354 	free_test_spec(spec);
355 	return err;
356 }
357 
358 static void prepare_case(struct test_loader *tester,
359 			 struct test_spec *spec,
360 			 struct bpf_object *obj,
361 			 struct bpf_program *prog)
362 {
363 	int min_log_level = 0, prog_flags;
364 
365 	if (env.verbosity > VERBOSE_NONE)
366 		min_log_level = 1;
367 	if (env.verbosity > VERBOSE_VERY)
368 		min_log_level = 2;
369 
370 	bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
371 
372 	/* Make sure we set at least minimal log level, unless test requires
373 	 * even higher level already. Make sure to preserve independent log
374 	 * level 4 (verifier stats), though.
375 	 */
376 	if ((spec->log_level & 3) < min_log_level)
377 		bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
378 	else
379 		bpf_program__set_log_level(prog, spec->log_level);
380 
381 	prog_flags = bpf_program__flags(prog);
382 	bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
383 
384 	tester->log_buf[0] = '\0';
385 	tester->next_match_pos = 0;
386 }
387 
388 static void emit_verifier_log(const char *log_buf, bool force)
389 {
390 	if (!force && env.verbosity == VERBOSE_NONE)
391 		return;
392 	fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
393 }
394 
395 static void validate_case(struct test_loader *tester,
396 			  struct test_subspec *subspec,
397 			  struct bpf_object *obj,
398 			  struct bpf_program *prog,
399 			  int load_err)
400 {
401 	int i, j;
402 
403 	for (i = 0; i < subspec->expect_msg_cnt; i++) {
404 		char *match;
405 		const char *expect_msg;
406 
407 		expect_msg = subspec->expect_msgs[i];
408 
409 		match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
410 		if (!ASSERT_OK_PTR(match, "expect_msg")) {
411 			/* if we are in verbose mode, we've already emitted log */
412 			if (env.verbosity == VERBOSE_NONE)
413 				emit_verifier_log(tester->log_buf, true /*force*/);
414 			for (j = 0; j < i; j++)
415 				fprintf(stderr,
416 					"MATCHED  MSG: '%s'\n", subspec->expect_msgs[j]);
417 			fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
418 			return;
419 		}
420 
421 		tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
422 	}
423 }
424 
425 struct cap_state {
426 	__u64 old_caps;
427 	bool initialized;
428 };
429 
430 static int drop_capabilities(struct cap_state *caps)
431 {
432 	const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
433 				    1ULL << CAP_PERFMON   | 1ULL << CAP_BPF);
434 	int err;
435 
436 	err = cap_disable_effective(caps_to_drop, &caps->old_caps);
437 	if (err) {
438 		PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
439 		return err;
440 	}
441 
442 	caps->initialized = true;
443 	return 0;
444 }
445 
446 static int restore_capabilities(struct cap_state *caps)
447 {
448 	int err;
449 
450 	if (!caps->initialized)
451 		return 0;
452 
453 	err = cap_enable_effective(caps->old_caps, NULL);
454 	if (err)
455 		PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
456 	caps->initialized = false;
457 	return err;
458 }
459 
460 static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
461 {
462 	if (sysctl_unpriv_disabled < 0)
463 		sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
464 	if (sysctl_unpriv_disabled)
465 		return false;
466 	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
467 		return false;
468 	return true;
469 }
470 
471 static bool is_unpriv_capable_map(struct bpf_map *map)
472 {
473 	enum bpf_map_type type;
474 	__u32 flags;
475 
476 	type = bpf_map__type(map);
477 
478 	switch (type) {
479 	case BPF_MAP_TYPE_HASH:
480 	case BPF_MAP_TYPE_PERCPU_HASH:
481 	case BPF_MAP_TYPE_HASH_OF_MAPS:
482 		flags = bpf_map__map_flags(map);
483 		return !(flags & BPF_F_ZERO_SEED);
484 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
485 	case BPF_MAP_TYPE_ARRAY:
486 	case BPF_MAP_TYPE_RINGBUF:
487 	case BPF_MAP_TYPE_PROG_ARRAY:
488 	case BPF_MAP_TYPE_CGROUP_ARRAY:
489 	case BPF_MAP_TYPE_PERCPU_ARRAY:
490 	case BPF_MAP_TYPE_USER_RINGBUF:
491 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
492 	case BPF_MAP_TYPE_CGROUP_STORAGE:
493 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
494 		return true;
495 	default:
496 		return false;
497 	}
498 }
499 
500 static int do_prog_test_run(int fd_prog, int *retval)
501 {
502 	__u8 tmp_out[TEST_DATA_LEN << 2] = {};
503 	__u8 tmp_in[TEST_DATA_LEN] = {};
504 	int err, saved_errno;
505 	LIBBPF_OPTS(bpf_test_run_opts, topts,
506 		.data_in = tmp_in,
507 		.data_size_in = sizeof(tmp_in),
508 		.data_out = tmp_out,
509 		.data_size_out = sizeof(tmp_out),
510 		.repeat = 1,
511 	);
512 
513 	err = bpf_prog_test_run_opts(fd_prog, &topts);
514 	saved_errno = errno;
515 
516 	if (err) {
517 		PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
518 			   saved_errno, strerror(saved_errno));
519 		return err;
520 	}
521 
522 	ASSERT_OK(0, "bpf_prog_test_run");
523 	*retval = topts.retval;
524 
525 	return 0;
526 }
527 
528 static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
529 {
530 	if (!subspec->execute)
531 		return false;
532 
533 	if (subspec->expect_failure)
534 		return false;
535 
536 	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
537 		if (env.verbosity != VERBOSE_NONE)
538 			printf("alignment prevents execution\n");
539 		return false;
540 	}
541 
542 	return true;
543 }
544 
545 /* this function is forced noinline and has short generic name to look better
546  * in test_progs output (in case of a failure)
547  */
548 static noinline
549 void run_subtest(struct test_loader *tester,
550 		 struct bpf_object_open_opts *open_opts,
551 		 const void *obj_bytes,
552 		 size_t obj_byte_cnt,
553 		 struct test_spec *specs,
554 		 struct test_spec *spec,
555 		 bool unpriv)
556 {
557 	struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
558 	struct bpf_program *tprog = NULL, *tprog_iter;
559 	struct test_spec *spec_iter;
560 	struct cap_state caps = {};
561 	struct bpf_object *tobj;
562 	struct bpf_map *map;
563 	int retval, err, i;
564 	bool should_load;
565 
566 	if (!test__start_subtest(subspec->name))
567 		return;
568 
569 	if (unpriv) {
570 		if (!can_execute_unpriv(tester, spec)) {
571 			test__skip();
572 			test__end_subtest();
573 			return;
574 		}
575 		if (drop_capabilities(&caps)) {
576 			test__end_subtest();
577 			return;
578 		}
579 	}
580 
581 	tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
582 	if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
583 		goto subtest_cleanup;
584 
585 	i = 0;
586 	bpf_object__for_each_program(tprog_iter, tobj) {
587 		spec_iter = &specs[i++];
588 		should_load = false;
589 
590 		if (spec_iter->valid) {
591 			if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
592 				tprog = tprog_iter;
593 				should_load = true;
594 			}
595 
596 			if (spec_iter->auxiliary &&
597 			    spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
598 				should_load = true;
599 		}
600 
601 		bpf_program__set_autoload(tprog_iter, should_load);
602 	}
603 
604 	prepare_case(tester, spec, tobj, tprog);
605 
606 	/* By default bpf_object__load() automatically creates all
607 	 * maps declared in the skeleton. Some map types are only
608 	 * allowed in priv mode. Disable autoload for such maps in
609 	 * unpriv mode.
610 	 */
611 	bpf_object__for_each_map(map, tobj)
612 		bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
613 
614 	err = bpf_object__load(tobj);
615 	if (subspec->expect_failure) {
616 		if (!ASSERT_ERR(err, "unexpected_load_success")) {
617 			emit_verifier_log(tester->log_buf, false /*force*/);
618 			goto tobj_cleanup;
619 		}
620 	} else {
621 		if (!ASSERT_OK(err, "unexpected_load_failure")) {
622 			emit_verifier_log(tester->log_buf, true /*force*/);
623 			goto tobj_cleanup;
624 		}
625 	}
626 
627 	emit_verifier_log(tester->log_buf, false /*force*/);
628 	validate_case(tester, subspec, tobj, tprog, err);
629 
630 	if (should_do_test_run(spec, subspec)) {
631 		/* For some reason test_verifier executes programs
632 		 * with all capabilities restored. Do the same here.
633 		 */
634 		if (restore_capabilities(&caps))
635 			goto tobj_cleanup;
636 
637 		if (tester->pre_execution_cb) {
638 			err = tester->pre_execution_cb(tobj);
639 			if (err) {
640 				PRINT_FAIL("pre_execution_cb failed: %d\n", err);
641 				goto tobj_cleanup;
642 			}
643 		}
644 
645 		do_prog_test_run(bpf_program__fd(tprog), &retval);
646 		if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
647 			PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
648 			goto tobj_cleanup;
649 		}
650 	}
651 
652 tobj_cleanup:
653 	bpf_object__close(tobj);
654 subtest_cleanup:
655 	test__end_subtest();
656 	restore_capabilities(&caps);
657 }
658 
659 static void process_subtest(struct test_loader *tester,
660 			    const char *skel_name,
661 			    skel_elf_bytes_fn elf_bytes_factory)
662 {
663 	LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
664 	struct test_spec *specs = NULL;
665 	struct bpf_object *obj = NULL;
666 	struct bpf_program *prog;
667 	const void *obj_bytes;
668 	int err, i, nr_progs;
669 	size_t obj_byte_cnt;
670 
671 	if (tester_init(tester) < 0)
672 		return; /* failed to initialize tester */
673 
674 	obj_bytes = elf_bytes_factory(&obj_byte_cnt);
675 	obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
676 	if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
677 		return;
678 
679 	nr_progs = 0;
680 	bpf_object__for_each_program(prog, obj)
681 		++nr_progs;
682 
683 	specs = calloc(nr_progs, sizeof(struct test_spec));
684 	if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
685 		return;
686 
687 	i = 0;
688 	bpf_object__for_each_program(prog, obj) {
689 		/* ignore tests for which  we can't derive test specification */
690 		err = parse_test_spec(tester, obj, prog, &specs[i++]);
691 		if (err)
692 			PRINT_FAIL("Can't parse test spec for program '%s'\n",
693 				   bpf_program__name(prog));
694 	}
695 
696 	i = 0;
697 	bpf_object__for_each_program(prog, obj) {
698 		struct test_spec *spec = &specs[i++];
699 
700 		if (!spec->valid || spec->auxiliary)
701 			continue;
702 
703 		if (spec->mode_mask & PRIV)
704 			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
705 				    specs, spec, false);
706 		if (spec->mode_mask & UNPRIV)
707 			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
708 				    specs, spec, true);
709 
710 	}
711 
712 	for (i = 0; i < nr_progs; ++i)
713 		free_test_spec(&specs[i]);
714 	free(specs);
715 	bpf_object__close(obj);
716 }
717 
718 void test_loader__run_subtests(struct test_loader *tester,
719 			       const char *skel_name,
720 			       skel_elf_bytes_fn elf_bytes_factory)
721 {
722 	/* see comment in run_subtest() for why we do this function nesting */
723 	process_subtest(tester, skel_name, elf_bytes_factory);
724 }
725