xref: /linux/tools/testing/selftests/bpf/progs/timer.c (revision 80154575849778e40d9d87aa7ab14491ac401948)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <linux/bpf.h>
4 #include <time.h>
5 #include <errno.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_tcp_helpers.h"
8 
9 char _license[] SEC("license") = "GPL";
10 struct hmap_elem {
11 	int counter;
12 	struct bpf_timer timer;
13 	struct bpf_spin_lock lock; /* unused */
14 };
15 
16 struct {
17 	__uint(type, BPF_MAP_TYPE_HASH);
18 	__uint(max_entries, 1000);
19 	__type(key, int);
20 	__type(value, struct hmap_elem);
21 } hmap SEC(".maps");
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_HASH);
25 	__uint(map_flags, BPF_F_NO_PREALLOC);
26 	__uint(max_entries, 1000);
27 	__type(key, int);
28 	__type(value, struct hmap_elem);
29 } hmap_malloc SEC(".maps");
30 
31 struct elem {
32 	struct bpf_timer t;
33 };
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_ARRAY);
37 	__uint(max_entries, 2);
38 	__type(key, int);
39 	__type(value, struct elem);
40 } array SEC(".maps");
41 
42 struct {
43 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
44 	__uint(max_entries, 4);
45 	__type(key, int);
46 	__type(value, struct elem);
47 } lru SEC(".maps");
48 
49 struct {
50 	__uint(type, BPF_MAP_TYPE_ARRAY);
51 	__uint(max_entries, 1);
52 	__type(key, int);
53 	__type(value, struct elem);
54 } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps");
55 
56 __u64 bss_data;
57 __u64 abs_data;
58 __u64 err;
59 __u64 ok;
60 __u64 callback_check = 52;
61 __u64 callback2_check = 52;
62 __u64 pinned_callback_check;
63 __s32 pinned_cpu;
64 
65 #define ARRAY 1
66 #define HTAB 2
67 #define HTAB_MALLOC 3
68 #define LRU 4
69 
70 /* callback for array and lru timers */
71 static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
72 {
73 	/* increment bss variable twice.
74 	 * Once via array timer callback and once via lru timer callback
75 	 */
76 	bss_data += 5;
77 
78 	/* *key == 0 - the callback was called for array timer.
79 	 * *key == 4 - the callback was called from lru timer.
80 	 */
81 	if (*key == ARRAY) {
82 		struct bpf_timer *lru_timer;
83 		int lru_key = LRU;
84 
85 		/* rearm array timer to be called again in ~35 seconds */
86 		if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
87 			err |= 1;
88 
89 		lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
90 		if (!lru_timer)
91 			return 0;
92 		bpf_timer_set_callback(lru_timer, timer_cb1);
93 		if (bpf_timer_start(lru_timer, 0, 0) != 0)
94 			err |= 2;
95 	} else if (*key == LRU) {
96 		int lru_key, i;
97 
98 		for (i = LRU + 1;
99 		     i <= 100  /* for current LRU eviction algorithm this number
100 				* should be larger than ~ lru->max_entries * 2
101 				*/;
102 		     i++) {
103 			struct elem init = {};
104 
105 			/* lru_key cannot be used as loop induction variable
106 			 * otherwise the loop will be unbounded.
107 			 */
108 			lru_key = i;
109 
110 			/* add more elements into lru map to push out current
111 			 * element and force deletion of this timer
112 			 */
113 			bpf_map_update_elem(map, &lru_key, &init, 0);
114 			/* look it up to bump it into active list */
115 			bpf_map_lookup_elem(map, &lru_key);
116 
117 			/* keep adding until *key changes underneath,
118 			 * which means that key/timer memory was reused
119 			 */
120 			if (*key != LRU)
121 				break;
122 		}
123 
124 		/* check that the timer was removed */
125 		if (bpf_timer_cancel(timer) != -EINVAL)
126 			err |= 4;
127 		ok |= 1;
128 	}
129 	return 0;
130 }
131 
132 SEC("fentry/bpf_fentry_test1")
133 int BPF_PROG2(test1, int, a)
134 {
135 	struct bpf_timer *arr_timer, *lru_timer;
136 	struct elem init = {};
137 	int lru_key = LRU;
138 	int array_key = ARRAY;
139 
140 	arr_timer = bpf_map_lookup_elem(&array, &array_key);
141 	if (!arr_timer)
142 		return 0;
143 	bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
144 
145 	bpf_map_update_elem(&lru, &lru_key, &init, 0);
146 	lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
147 	if (!lru_timer)
148 		return 0;
149 	bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
150 
151 	bpf_timer_set_callback(arr_timer, timer_cb1);
152 	bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
153 
154 	/* init more timers to check that array destruction
155 	 * doesn't leak timer memory.
156 	 */
157 	array_key = 0;
158 	arr_timer = bpf_map_lookup_elem(&array, &array_key);
159 	if (!arr_timer)
160 		return 0;
161 	bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
162 	return 0;
163 }
164 
165 /* callback for prealloc and non-prealloca hashtab timers */
166 static int timer_cb2(void *map, int *key, struct hmap_elem *val)
167 {
168 	if (*key == HTAB)
169 		callback_check--;
170 	else
171 		callback2_check--;
172 	if (val->counter > 0 && --val->counter) {
173 		/* re-arm the timer again to execute after 1 usec */
174 		bpf_timer_start(&val->timer, 1000, 0);
175 	} else if (*key == HTAB) {
176 		struct bpf_timer *arr_timer;
177 		int array_key = ARRAY;
178 
179 		/* cancel arr_timer otherwise bpf_fentry_test1 prog
180 		 * will stay alive forever.
181 		 */
182 		arr_timer = bpf_map_lookup_elem(&array, &array_key);
183 		if (!arr_timer)
184 			return 0;
185 		if (bpf_timer_cancel(arr_timer) != 1)
186 			/* bpf_timer_cancel should return 1 to indicate
187 			 * that arr_timer was active at this time
188 			 */
189 			err |= 8;
190 
191 		/* try to cancel ourself. It shouldn't deadlock. */
192 		if (bpf_timer_cancel(&val->timer) != -EDEADLK)
193 			err |= 16;
194 
195 		/* delete this key and this timer anyway.
196 		 * It shouldn't deadlock either.
197 		 */
198 		bpf_map_delete_elem(map, key);
199 
200 		/* in preallocated hashmap both 'key' and 'val' could have been
201 		 * reused to store another map element (like in LRU above),
202 		 * but in controlled test environment the below test works.
203 		 * It's not a use-after-free. The memory is owned by the map.
204 		 */
205 		if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
206 			err |= 32;
207 		ok |= 2;
208 	} else {
209 		if (*key != HTAB_MALLOC)
210 			err |= 64;
211 
212 		/* try to cancel ourself. It shouldn't deadlock. */
213 		if (bpf_timer_cancel(&val->timer) != -EDEADLK)
214 			err |= 128;
215 
216 		/* delete this key and this timer anyway.
217 		 * It shouldn't deadlock either.
218 		 */
219 		bpf_map_delete_elem(map, key);
220 
221 		ok |= 4;
222 	}
223 	return 0;
224 }
225 
226 int bpf_timer_test(void)
227 {
228 	struct hmap_elem *val;
229 	int key = HTAB, key_malloc = HTAB_MALLOC;
230 
231 	val = bpf_map_lookup_elem(&hmap, &key);
232 	if (val) {
233 		if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
234 			err |= 512;
235 		bpf_timer_set_callback(&val->timer, timer_cb2);
236 		bpf_timer_start(&val->timer, 1000, 0);
237 	}
238 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
239 	if (val) {
240 		if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
241 			err |= 1024;
242 		bpf_timer_set_callback(&val->timer, timer_cb2);
243 		bpf_timer_start(&val->timer, 1000, 0);
244 	}
245 	return 0;
246 }
247 
248 SEC("fentry/bpf_fentry_test2")
249 int BPF_PROG2(test2, int, a, int, b)
250 {
251 	struct hmap_elem init = {}, *val;
252 	int key = HTAB, key_malloc = HTAB_MALLOC;
253 
254 	init.counter = 10; /* number of times to trigger timer_cb2 */
255 	bpf_map_update_elem(&hmap, &key, &init, 0);
256 	val = bpf_map_lookup_elem(&hmap, &key);
257 	if (val)
258 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
259 	/* update the same key to free the timer */
260 	bpf_map_update_elem(&hmap, &key, &init, 0);
261 
262 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
263 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
264 	if (val)
265 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
266 	/* update the same key to free the timer */
267 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
268 
269 	/* init more timers to check that htab operations
270 	 * don't leak timer memory.
271 	 */
272 	key = 0;
273 	bpf_map_update_elem(&hmap, &key, &init, 0);
274 	val = bpf_map_lookup_elem(&hmap, &key);
275 	if (val)
276 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
277 	bpf_map_delete_elem(&hmap, &key);
278 	bpf_map_update_elem(&hmap, &key, &init, 0);
279 	val = bpf_map_lookup_elem(&hmap, &key);
280 	if (val)
281 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
282 
283 	/* and with non-prealloc htab */
284 	key_malloc = 0;
285 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
286 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
287 	if (val)
288 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
289 	bpf_map_delete_elem(&hmap_malloc, &key_malloc);
290 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
291 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
292 	if (val)
293 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
294 
295 	return bpf_timer_test();
296 }
297 
298 /* callback for absolute timer */
299 static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
300 {
301 	abs_data += 6;
302 
303 	if (abs_data < 12) {
304 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
305 				BPF_F_TIMER_ABS);
306 	} else {
307 		/* Re-arm timer ~35 seconds in future */
308 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
309 				BPF_F_TIMER_ABS);
310 	}
311 
312 	return 0;
313 }
314 
315 SEC("fentry/bpf_fentry_test3")
316 int BPF_PROG2(test3, int, a)
317 {
318 	int key = 0;
319 	struct bpf_timer *timer;
320 
321 	bpf_printk("test3");
322 
323 	timer = bpf_map_lookup_elem(&abs_timer, &key);
324 	if (timer) {
325 		if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
326 			err |= 2048;
327 		bpf_timer_set_callback(timer, timer_cb3);
328 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
329 				BPF_F_TIMER_ABS);
330 	}
331 
332 	return 0;
333 }
334 
335 /* callback for pinned timer */
336 static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
337 {
338 	__s32 cpu = bpf_get_smp_processor_id();
339 
340 	if (cpu != pinned_cpu)
341 		err |= 16384;
342 
343 	pinned_callback_check++;
344 	return 0;
345 }
346 
347 static void test_pinned_timer(bool soft)
348 {
349 	int key = 0;
350 	void *map;
351 	struct bpf_timer *timer;
352 	__u64 flags = BPF_F_TIMER_CPU_PIN;
353 	__u64 start_time;
354 
355 	if (soft) {
356 		map = &soft_timer_pinned;
357 		start_time = 0;
358 	} else {
359 		map = &abs_timer_pinned;
360 		start_time = bpf_ktime_get_boot_ns();
361 		flags |= BPF_F_TIMER_ABS;
362 	}
363 
364 	timer = bpf_map_lookup_elem(map, &key);
365 	if (timer) {
366 		if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
367 			err |= 4096;
368 		bpf_timer_set_callback(timer, timer_cb_pinned);
369 		pinned_cpu = bpf_get_smp_processor_id();
370 		bpf_timer_start(timer, start_time + 1000, flags);
371 	} else {
372 		err |= 8192;
373 	}
374 }
375 
376 SEC("fentry/bpf_fentry_test4")
377 int BPF_PROG2(test4, int, a)
378 {
379 	bpf_printk("test4");
380 	test_pinned_timer(true);
381 
382 	return 0;
383 }
384 
385 SEC("fentry/bpf_fentry_test5")
386 int BPF_PROG2(test5, int, a)
387 {
388 	bpf_printk("test5");
389 	test_pinned_timer(false);
390 
391 	return 0;
392 }
393