xref: /linux/tools/testing/selftests/bpf/prog_tests/htab_update.c (revision eb71ab2bf72260054677e348498ba995a057c463)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022. Huawei Technologies Co., Ltd */
3 #define _GNU_SOURCE
4 #include <sched.h>
5 #include <stdbool.h>
6 #include <test_progs.h>
7 #include "htab_update.skel.h"
8 
9 struct htab_update_ctx {
10 	int fd;
11 	int loop;
12 	bool stop;
13 };
14 
test_reenter_update(void)15 static void test_reenter_update(void)
16 {
17 	struct htab_update *skel;
18 	void *value = NULL;
19 	unsigned int key, value_size;
20 	int err;
21 
22 	skel = htab_update__open();
23 	if (!ASSERT_OK_PTR(skel, "htab_update__open"))
24 		return;
25 
26 	bpf_program__set_autoload(skel->progs.bpf_obj_free_fields, true);
27 	err = htab_update__load(skel);
28 	if (!ASSERT_TRUE(!err, "htab_update__load") || err)
29 		goto out;
30 
31 	skel->bss->pid = getpid();
32 	err = htab_update__attach(skel);
33 	if (!ASSERT_OK(err, "htab_update__attach"))
34 		goto out;
35 
36 	value_size = bpf_map__value_size(skel->maps.htab);
37 
38 	value = calloc(1, value_size);
39 	if (!ASSERT_OK_PTR(value, "calloc value"))
40 		goto out;
41 	/*
42 	 * First update: plain insert. This should NOT trigger the re-entrancy
43 	 * path, because there is no old element to free yet.
44 	 */
45 	key = 0;
46 	err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
47 	if (!ASSERT_OK(err, "first update (insert)"))
48 		goto out;
49 
50 	/*
51 	 * Second update: replace existing element with same key and trigger
52 	 * the reentrancy of bpf_map_update_elem().
53 	 * check_and_free_fields() calls bpf_obj_free_fields() on the old
54 	 * value, which is where fentry program runs and performs a nested
55 	 * bpf_map_update_elem(), triggering -EDEADLK.
56 	 */
57 	memset(value, 0, value_size);
58 	err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
59 	if (!ASSERT_OK(err, "second update (replace)"))
60 		goto out;
61 
62 	ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy");
63 out:
64 	free(value);
65 	htab_update__destroy(skel);
66 }
67 
htab_update_thread(void * arg)68 static void *htab_update_thread(void *arg)
69 {
70 	struct htab_update_ctx *ctx = arg;
71 	cpu_set_t cpus;
72 	int i;
73 
74 	/* Pinned on CPU 0 */
75 	CPU_ZERO(&cpus);
76 	CPU_SET(0, &cpus);
77 	pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus);
78 
79 	i = 0;
80 	while (i++ < ctx->loop && !ctx->stop) {
81 		unsigned int key = 0, value = 0;
82 		int err;
83 
84 		err = bpf_map_update_elem(ctx->fd, &key, &value, 0);
85 		if (err) {
86 			ctx->stop = true;
87 			return (void *)(long)err;
88 		}
89 	}
90 
91 	return NULL;
92 }
93 
test_concurrent_update(void)94 static void test_concurrent_update(void)
95 {
96 	struct htab_update_ctx ctx;
97 	struct htab_update *skel;
98 	unsigned int i, nr;
99 	pthread_t *tids;
100 	int err;
101 
102 	skel = htab_update__open_and_load();
103 	if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load"))
104 		return;
105 
106 	ctx.fd = bpf_map__fd(skel->maps.htab);
107 	ctx.loop = 1000;
108 	ctx.stop = false;
109 
110 	nr = 4;
111 	tids = calloc(nr, sizeof(*tids));
112 	if (!ASSERT_NEQ(tids, NULL, "no mem"))
113 		goto out;
114 
115 	for (i = 0; i < nr; i++) {
116 		err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx);
117 		if (!ASSERT_OK(err, "pthread_create")) {
118 			unsigned int j;
119 
120 			ctx.stop = true;
121 			for (j = 0; j < i; j++)
122 				pthread_join(tids[j], NULL);
123 			goto out;
124 		}
125 	}
126 
127 	for (i = 0; i < nr; i++) {
128 		void *thread_err = NULL;
129 
130 		pthread_join(tids[i], &thread_err);
131 		ASSERT_EQ(thread_err, NULL, "update error");
132 	}
133 
134 out:
135 	if (tids)
136 		free(tids);
137 	htab_update__destroy(skel);
138 }
139 
test_htab_update(void)140 void test_htab_update(void)
141 {
142 	if (test__start_subtest("reenter_update"))
143 		test_reenter_update();
144 	if (test__start_subtest("concurrent_update"))
145 		test_concurrent_update();
146 }
147