Home
last modified time | relevance | path

Searched refs:skel (Results 1 – 25 of 308) sorted by relevance

12345678910>>...13

/linux/tools/testing/selftests/bpf/prog_tests/
H A Dtracing_struct.c10 struct tracing_struct *skel; in test_struct_args() local
13 skel = tracing_struct__open_and_load(); in test_struct_args()
14 if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load")) in test_struct_args()
17 err = tracing_struct__attach(skel); in test_struct_args()
23 ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a"); in test_struct_args()
24 ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b"); in test_struct_args()
25 ASSERT_EQ(skel->bss->t1_b, 1, "t1:b"); in test_struct_args()
26 ASSERT_EQ(skel->bss->t1_c, 4, "t1:c"); in test_struct_args()
28 ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs"); in test_struct_args()
29 ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0"); in test_struct_args()
[all …]
H A Dattach_probe.c43 struct test_attach_probe_manual *skel; in test_attach_probe_manual() local
46 skel = test_attach_probe_manual__open_and_load(); in test_attach_probe_manual()
47 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) in test_attach_probe_manual()
57 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in test_attach_probe_manual()
62 skel->links.handle_kprobe = kprobe_link; in test_attach_probe_manual()
65 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in test_attach_probe_manual()
70 skel->links.handle_kretprobe = kretprobe_link; in test_attach_probe_manual()
76 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, in test_attach_probe_manual()
83 skel->links.handle_uprobe = uprobe_link; in test_attach_probe_manual()
86 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, in test_attach_probe_manual()
[all …]
H A Datomics.c7 static void test_add(struct atomics_lskel *skel) in test_add() argument
13 prog_fd = skel->progs.add.prog_fd; in test_add()
20 ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); in test_add()
21 ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); in test_add()
23 ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); in test_add()
24 ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); in test_add()
26 ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); in test_add()
27 ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); in test_add()
29 ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
32 static void test_sub(struct atomics_lskel *skel) in test_sub() argument
[all …]
H A Dtest_struct_ops_module.c39 static int attach_ops_and_check(struct struct_ops_module *skel, in attach_ops_and_check() argument
51 ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result"); in attach_ops_and_check()
52 ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result"); in attach_ops_and_check()
60 struct struct_ops_module *skel; in test_struct_ops_load() local
65 skel = struct_ops_module__open(); in test_struct_ops_load()
66 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) in test_struct_ops_load()
69 skel->struct_ops.testmod_1->data = 13; in test_struct_ops_load()
70 skel->struct_ops.testmod_1->test_2 = skel->progs.test_3; in test_struct_ops_load()
74 bpf_program__set_autoload(skel->progs.test_2, false); in test_struct_ops_load()
75 bpf_map__set_autocreate(skel->maps.testmod_zeroed, false); in test_struct_ops_load()
[all …]
H A Dtest_strncmp.c6 static int trigger_strncmp(const struct strncmp_test *skel) in trigger_strncmp() argument
12 cmp = skel->bss->cmp_ret; in trigger_strncmp()
24 static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, in strncmp_full_str_cmp() argument
27 size_t nr = sizeof(skel->bss->str); in strncmp_full_str_cmp()
28 char *str = skel->bss->str; in strncmp_full_str_cmp()
33 memcpy(str, skel->rodata->target, nr); in strncmp_full_str_cmp()
37 got = trigger_strncmp(skel); in strncmp_full_str_cmp()
46 struct strncmp_test *skel; in test_strncmp_ret() local
49 skel = strncmp_test__open(); in test_strncmp_ret()
50 if (!ASSERT_OK_PTR(skel, "strncmp_test open")) in test_strncmp_ret()
[all …]
H A Darena_atomics.c6 static void test_add(struct arena_atomics *skel) in test_add() argument
12 prog_fd = bpf_program__fd(skel->progs.add); in test_add()
19 ASSERT_EQ(skel->arena->add64_value, 3, "add64_value"); in test_add()
20 ASSERT_EQ(skel->arena->add64_result, 1, "add64_result"); in test_add()
22 ASSERT_EQ(skel->arena->add32_value, 3, "add32_value"); in test_add()
23 ASSERT_EQ(skel->arena->add32_result, 1, "add32_result"); in test_add()
25 ASSERT_EQ(skel->arena->add_stack_value_copy, 3, "add_stack_value"); in test_add()
26 ASSERT_EQ(skel->arena->add_stack_result, 1, "add_stack_result"); in test_add()
28 ASSERT_EQ(skel->arena->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
31 static void test_sub(struct arena_atomics *skel) in test_sub() argument
[all …]
H A Dbpf_loop.c8 static void check_nr_loops(struct bpf_loop *skel) in check_nr_loops() argument
12 link = bpf_program__attach(skel->progs.test_prog); in check_nr_loops()
17 skel->bss->nr_loops = 0; in check_nr_loops()
21 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
25 skel->bss->nr_loops = 500; in check_nr_loops()
29 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
31 ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); in check_nr_loops()
34 skel->bss->nr_loops = -1; in check_nr_loops()
38 ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); in check_nr_loops()
43 static void check_callback_fn_stop(struct bpf_loop *skel) in check_callback_fn_stop() argument
[all …]
H A Dbpf_iter.c39 struct bpf_iter_test_kern3 *skel; in test_btf_id_or_null() local
41 skel = bpf_iter_test_kern3__open_and_load(); in test_btf_id_or_null()
42 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { in test_btf_id_or_null()
43 bpf_iter_test_kern3__destroy(skel); in test_btf_id_or_null()
78 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, in do_read_map_iter_fd() argument
103 bpf_object__destroy_skeleton(*skel); in do_read_map_iter_fd()
104 *skel = NULL; in do_read_map_iter_fd()
140 struct bpf_iter_ipv6_route *skel; in test_ipv6_route() local
142 skel = bpf_iter_ipv6_route__open_and_load(); in test_ipv6_route()
143 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) in test_ipv6_route()
[all …]
H A Dstruct_ops_autocreate.c9 struct struct_ops_autocreate *skel; in cant_load_full_object() local
13 skel = struct_ops_autocreate__open(); in cant_load_full_object()
14 if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open")) in cant_load_full_object()
24 err = struct_ops_autocreate__load(skel); in cant_load_full_object()
34 struct_ops_autocreate__destroy(skel); in cant_load_full_object()
37 static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map) in check_test_1_link() argument
42 link = bpf_map__attach_struct_ops(skel->maps.testmod_1); in check_test_1_link()
47 err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result"); in check_test_1_link()
54 struct struct_ops_autocreate *skel; in can_load_partial_object() local
57 skel = struct_ops_autocreate__open(); in can_load_partial_object()
[all …]
H A Dtc_links.c22 struct test_tc_link *skel; in serial_test_tc_links_basic() local
26 skel = test_tc_link__open_and_load(); in serial_test_tc_links_basic()
27 if (!ASSERT_OK_PTR(skel, "skel_load")) in serial_test_tc_links_basic()
30 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); in serial_test_tc_links_basic()
31 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); in serial_test_tc_links_basic()
38 ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); in serial_test_tc_links_basic()
39 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); in serial_test_tc_links_basic()
41 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); in serial_test_tc_links_basic()
45 skel->links.tc1 = link; in serial_test_tc_links_basic()
47 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); in serial_test_tc_links_basic()
[all …]
H A Diters.c27 struct iters_num *skel; in subtest_num_iters() local
30 skel = iters_num__open_and_load(); in subtest_num_iters()
31 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) in subtest_num_iters()
34 err = iters_num__attach(skel); in subtest_num_iters()
39 iters_num__detach(skel); in subtest_num_iters()
42 ASSERT_EQ(skel->bss->res_##case_name, \ in subtest_num_iters()
43 skel->rodata->exp_##case_name, \ in subtest_num_iters()
67 iters_num__destroy(skel); in subtest_num_iters()
72 struct iters_testmod_seq *skel; in subtest_testmod_seq_iters() local
80 skel = iters_testmod_seq__open_and_load(); in subtest_testmod_seq_iters()
[all …]
H A Dmap_ops.c46 static int setup(struct test_map_ops **skel) in setup() argument
50 if (!skel) in setup()
53 *skel = test_map_ops__open(); in setup()
54 if (!ASSERT_OK_PTR(*skel, "test_map_ops__open")) in setup()
57 (*skel)->rodata->pid = getpid(); in setup()
59 err = test_map_ops__load(*skel); in setup()
63 err = test_map_ops__attach(*skel); in setup()
70 static void teardown(struct test_map_ops **skel) in teardown() argument
72 if (skel && *skel) in teardown()
73 test_map_ops__destroy(*skel); in teardown()
[all …]
H A Duprobe_multi_test.c194 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) in uprobe_multi_test_run() argument
196 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; in uprobe_multi_test_run()
197 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; in uprobe_multi_test_run()
198 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; in uprobe_multi_test_run()
200 skel->bss->user_ptr = test_data; in uprobe_multi_test_run()
207 skel->bss->pid = child ? 0 : getpid(); in uprobe_multi_test_run()
208 skel->bss->expect_pid = child ? child->pid : 0; in uprobe_multi_test_run()
229 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); in uprobe_multi_test_run()
230 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); in uprobe_multi_test_run()
231 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); in uprobe_multi_test_run()
[all …]
H A Dfind_vma.c10 static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) in test_and_reset_skel() argument
13 ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); in test_and_reset_skel()
14 ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); in test_and_reset_skel()
15 ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); in test_and_reset_skel()
16 ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); in test_and_reset_skel()
19 skel->bss->found_vm_exec = 0; in test_and_reset_skel()
20 skel->data->find_addr_ret = -1; in test_and_reset_skel()
21 skel->data->find_zero_ret = -1; in test_and_reset_skel()
22 skel->bss->d_iname[0] = 0; in test_and_reset_skel()
41 static bool find_vma_pe_condition(struct find_vma *skel) in find_vma_pe_condition() argument
[all …]
H A Dbtf_tag.c17 struct test_btf_decl_tag *skel; in test_btf_decl_tag() local
19 skel = test_btf_decl_tag__open_and_load(); in test_btf_decl_tag()
20 if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) in test_btf_decl_tag()
23 if (skel->rodata->skip_tests) { in test_btf_decl_tag()
28 test_btf_decl_tag__destroy(skel); in test_btf_decl_tag()
33 struct btf_type_tag *skel; in test_btf_type_tag() local
35 skel = btf_type_tag__open_and_load(); in test_btf_type_tag()
36 if (!ASSERT_OK_PTR(skel, "btf_type_tag")) in test_btf_type_tag()
39 if (skel->rodata->skip_tests) { in test_btf_type_tag()
44 btf_type_tag__destroy(skel); in test_btf_type_tag()
[all …]
H A Dcgroup1_hierarchy.c10 static void bpf_cgroup1(struct test_cgroup1_hierarchy *skel) in bpf_cgroup1() argument
16 lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run); in bpf_cgroup1()
21 fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); in bpf_cgroup1()
28 static void bpf_cgroup1_sleepable(struct test_cgroup1_hierarchy *skel) in bpf_cgroup1_sleepable() argument
34 lsm_link = bpf_program__attach_lsm(skel->progs.lsm_s_run); in bpf_cgroup1_sleepable()
39 fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); in bpf_cgroup1_sleepable()
46 static void bpf_cgroup1_invalid_id(struct test_cgroup1_hierarchy *skel) in bpf_cgroup1_invalid_id() argument
52 lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run); in bpf_cgroup1_invalid_id()
57 fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); in bpf_cgroup1_invalid_id()
71 struct test_cgroup1_hierarchy *skel; in test_cgroup1_hierarchy() local
[all …]
H A Dglobal_map_resize.c22 struct test_global_map_resize *skel; in global_map_resize_bss_subtest() local
24 const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; in global_map_resize_bss_subtest()
27 skel = test_global_map_resize__open(); in global_map_resize_bss_subtest()
28 if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) in global_map_resize_bss_subtest()
35 skel->bss->array[0] = 1; in global_map_resize_bss_subtest()
38 map = skel->maps.bss; in global_map_resize_bss_subtest()
45 new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); in global_map_resize_bss_subtest()
46 err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); in global_map_resize_bss_subtest()
50 array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]); in global_map_resize_bss_subtest()
54 skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz); in global_map_resize_bss_subtest()
[all …]
H A Drbtree.c19 struct rbtree *skel; in test_rbtree_add_nodes() local
22 skel = rbtree__open_and_load(); in test_rbtree_add_nodes()
23 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) in test_rbtree_add_nodes()
26 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts); in test_rbtree_add_nodes()
29 ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran"); in test_rbtree_add_nodes()
31 rbtree__destroy(skel); in test_rbtree_add_nodes()
41 struct rbtree *skel; in test_rbtree_add_nodes_nested() local
44 skel = rbtree__open_and_load(); in test_rbtree_add_nodes_nested()
45 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) in test_rbtree_add_nodes_nested()
48 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts); in test_rbtree_add_nodes_nested()
[all …]
H A Dpercpu_alloc.c9 struct percpu_alloc_array *skel; in test_array() local
13 skel = percpu_alloc_array__open(); in test_array()
14 if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open")) in test_array()
17 bpf_program__set_autoload(skel->progs.test_array_map_1, true); in test_array()
18 bpf_program__set_autoload(skel->progs.test_array_map_2, true); in test_array()
19 bpf_program__set_autoload(skel->progs.test_array_map_3, true); in test_array()
20 bpf_program__set_autoload(skel->progs.test_array_map_4, true); in test_array()
22 skel->bss->my_pid = getpid(); in test_array()
23 skel->rodata->nr_cpus = libbpf_num_possible_cpus(); in test_array()
25 err = percpu_alloc_array__load(skel); in test_array()
[all …]
H A Dcheck_mtu.c43 struct test_check_mtu *skel; in test_check_mtu_xdp_attach() local
49 skel = test_check_mtu__open_and_load(); in test_check_mtu_xdp_attach()
50 if (CHECK(!skel, "open and load skel", "failed")) in test_check_mtu_xdp_attach()
53 prog = skel->progs.xdp_use_helper_basic; in test_check_mtu_xdp_attach()
58 skel->links.xdp_use_helper_basic = link; in test_check_mtu_xdp_attach()
75 test_check_mtu__destroy(skel); in test_check_mtu_xdp_attach()
78 static void test_check_mtu_run_xdp(struct test_check_mtu *skel, in test_check_mtu_run_xdp() argument
99 mtu_result = skel->bss->global_bpf_mtu_xdp; in test_check_mtu_run_xdp()
106 struct test_check_mtu *skel; in test_check_mtu_xdp() local
109 skel = test_check_mtu__open(); in test_check_mtu_xdp()
[all …]
H A Dtest_ldsx_insn.c10 struct test_ldsx_insn *skel; in test_map_val_and_probed_memory() local
13 skel = test_ldsx_insn__open(); in test_map_val_and_probed_memory()
14 if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) in test_map_val_and_probed_memory()
17 if (skel->rodata->skip) { in test_map_val_and_probed_memory()
22 bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); in test_map_val_and_probed_memory()
23 bpf_program__set_autoload(skel->progs.map_val_prog, true); in test_map_val_and_probed_memory()
24 bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); in test_map_val_and_probed_memory()
26 err = test_ldsx_insn__load(skel); in test_map_val_and_probed_memory()
30 err = test_ldsx_insn__attach(skel); in test_map_val_and_probed_memory()
36 ASSERT_EQ(skel->bss->done1, 1, "done1"); in test_map_val_and_probed_memory()
[all …]
H A Duser_ringbuf.c73 struct user_ringbuf_success *skel; in open_load_ringbuf_skel() local
76 skel = user_ringbuf_success__open(); in open_load_ringbuf_skel()
77 if (!ASSERT_OK_PTR(skel, "skel_open")) in open_load_ringbuf_skel()
80 err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel()
84 err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel()
88 err = user_ringbuf_success__load(skel); in open_load_ringbuf_skel()
92 return skel; in open_load_ringbuf_skel()
95 user_ringbuf_success__destroy(skel); in open_load_ringbuf_skel()
104 struct user_ringbuf_success *skel; in test_user_ringbuf_mappings() local
106 skel = open_load_ringbuf_skel(); in test_user_ringbuf_mappings()
[all …]
/linux/kernel/bpf/preload/iterators/
H A Diterators.lskel-little-endian.h24 iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_map__attach() argument
26 int prog_fd = skel->progs.dump_bpf_map.prog_fd; in iterators_bpf__dump_bpf_map__attach()
30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach()
35 iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_prog__attach() argument
37 int prog_fd = skel->progs.dump_bpf_prog.prog_fd; in iterators_bpf__dump_bpf_prog__attach()
41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach()
46 iterators_bpf__attach(struct iterators_bpf *skel) in iterators_bpf__attach() argument
50 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); in iterators_bpf__attach()
51 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); in iterators_bpf__attach()
56 iterators_bpf__detach(struct iterators_bpf *skel) in iterators_bpf__detach() argument
[all …]
H A Diterators.lskel-big-endian.h24 iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_map__attach() argument
26 int prog_fd = skel->progs.dump_bpf_map.prog_fd; in iterators_bpf__dump_bpf_map__attach()
30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach()
35 iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_prog__attach() argument
37 int prog_fd = skel->progs.dump_bpf_prog.prog_fd; in iterators_bpf__dump_bpf_prog__attach()
41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach()
46 iterators_bpf__attach(struct iterators_bpf *skel) in iterators_bpf__attach() argument
50 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); in iterators_bpf__attach()
51 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); in iterators_bpf__attach()
56 iterators_bpf__detach(struct iterators_bpf *skel) in iterators_bpf__detach() argument
[all …]
/linux/tools/sched_ext/
H A Dscx_qmap.c15 #include "scx_qmap.bpf.skel.h"
57 struct scx_qmap *skel; in main() local
65 skel = SCX_OPS_OPEN(qmap_ops, scx_qmap); in main()
67 skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); in main()
72 skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; in main()
75 skel->bss->test_error_cnt = strtoul(optarg, NULL, 0); in main()
78 skel->rodata->stall_user_nth = strtoul(optarg, NULL, 0); in main()
81 skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); in main()
84 skel->rodata->dsp_inf_loop_after = strtoul(optarg, NULL, 0); in main()
87 skel in main()
[all...]

12345678910>>...13