xref: /linux/tools/testing/selftests/bpf/progs/timer.c (revision 13a44ba0dca8c133a4368323683a270a3874d88c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <linux/bpf.h>
4 #include <time.h>
5 #include <errno.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_tcp_helpers.h"
8 
9 char _license[] SEC("license") = "GPL";
10 struct hmap_elem {
11 	int counter;
12 	struct bpf_timer timer;
13 	struct bpf_spin_lock lock; /* unused */
14 };
15 
16 struct {
17 	__uint(type, BPF_MAP_TYPE_HASH);
18 	__uint(max_entries, 1000);
19 	__type(key, int);
20 	__type(value, struct hmap_elem);
21 } hmap SEC(".maps");
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_HASH);
25 	__uint(map_flags, BPF_F_NO_PREALLOC);
26 	__uint(max_entries, 1000);
27 	__type(key, int);
28 	__type(value, struct hmap_elem);
29 } hmap_malloc SEC(".maps");
30 
31 struct elem {
32 	struct bpf_timer t;
33 };
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_ARRAY);
37 	__uint(max_entries, 2);
38 	__type(key, int);
39 	__type(value, struct elem);
40 } array SEC(".maps");
41 
42 struct {
43 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
44 	__uint(max_entries, 4);
45 	__type(key, int);
46 	__type(value, struct elem);
47 } lru SEC(".maps");
48 
49 struct {
50 	__uint(type, BPF_MAP_TYPE_ARRAY);
51 	__uint(max_entries, 1);
52 	__type(key, int);
53 	__type(value, struct elem);
54 } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
55 	race_array SEC(".maps");
56 
57 __u64 bss_data;
58 __u64 abs_data;
59 __u64 err;
60 __u64 ok;
61 __u64 callback_check = 52;
62 __u64 callback2_check = 52;
63 __u64 pinned_callback_check;
64 __s32 pinned_cpu;
65 
66 #define ARRAY 1
67 #define HTAB 2
68 #define HTAB_MALLOC 3
69 #define LRU 4
70 
71 /* callback for array and lru timers */
72 static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
73 {
74 	/* increment bss variable twice.
75 	 * Once via array timer callback and once via lru timer callback
76 	 */
77 	bss_data += 5;
78 
79 	/* *key == 0 - the callback was called for array timer.
80 	 * *key == 4 - the callback was called from lru timer.
81 	 */
82 	if (*key == ARRAY) {
83 		struct bpf_timer *lru_timer;
84 		int lru_key = LRU;
85 
86 		/* rearm array timer to be called again in ~35 seconds */
87 		if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
88 			err |= 1;
89 
90 		lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
91 		if (!lru_timer)
92 			return 0;
93 		bpf_timer_set_callback(lru_timer, timer_cb1);
94 		if (bpf_timer_start(lru_timer, 0, 0) != 0)
95 			err |= 2;
96 	} else if (*key == LRU) {
97 		int lru_key, i;
98 
99 		for (i = LRU + 1;
100 		     i <= 100  /* for current LRU eviction algorithm this number
101 				* should be larger than ~ lru->max_entries * 2
102 				*/;
103 		     i++) {
104 			struct elem init = {};
105 
106 			/* lru_key cannot be used as loop induction variable
107 			 * otherwise the loop will be unbounded.
108 			 */
109 			lru_key = i;
110 
111 			/* add more elements into lru map to push out current
112 			 * element and force deletion of this timer
113 			 */
114 			bpf_map_update_elem(map, &lru_key, &init, 0);
115 			/* look it up to bump it into active list */
116 			bpf_map_lookup_elem(map, &lru_key);
117 
118 			/* keep adding until *key changes underneath,
119 			 * which means that key/timer memory was reused
120 			 */
121 			if (*key != LRU)
122 				break;
123 		}
124 
125 		/* check that the timer was removed */
126 		if (bpf_timer_cancel(timer) != -EINVAL)
127 			err |= 4;
128 		ok |= 1;
129 	}
130 	return 0;
131 }
132 
133 SEC("fentry/bpf_fentry_test1")
134 int BPF_PROG2(test1, int, a)
135 {
136 	struct bpf_timer *arr_timer, *lru_timer;
137 	struct elem init = {};
138 	int lru_key = LRU;
139 	int array_key = ARRAY;
140 
141 	arr_timer = bpf_map_lookup_elem(&array, &array_key);
142 	if (!arr_timer)
143 		return 0;
144 	bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
145 
146 	bpf_map_update_elem(&lru, &lru_key, &init, 0);
147 	lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
148 	if (!lru_timer)
149 		return 0;
150 	bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
151 
152 	bpf_timer_set_callback(arr_timer, timer_cb1);
153 	bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
154 
155 	/* init more timers to check that array destruction
156 	 * doesn't leak timer memory.
157 	 */
158 	array_key = 0;
159 	arr_timer = bpf_map_lookup_elem(&array, &array_key);
160 	if (!arr_timer)
161 		return 0;
162 	bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
163 	return 0;
164 }
165 
166 /* callback for prealloc and non-prealloca hashtab timers */
167 static int timer_cb2(void *map, int *key, struct hmap_elem *val)
168 {
169 	if (*key == HTAB)
170 		callback_check--;
171 	else
172 		callback2_check--;
173 	if (val->counter > 0 && --val->counter) {
174 		/* re-arm the timer again to execute after 1 usec */
175 		bpf_timer_start(&val->timer, 1000, 0);
176 	} else if (*key == HTAB) {
177 		struct bpf_timer *arr_timer;
178 		int array_key = ARRAY;
179 
180 		/* cancel arr_timer otherwise bpf_fentry_test1 prog
181 		 * will stay alive forever.
182 		 */
183 		arr_timer = bpf_map_lookup_elem(&array, &array_key);
184 		if (!arr_timer)
185 			return 0;
186 		if (bpf_timer_cancel(arr_timer) != 1)
187 			/* bpf_timer_cancel should return 1 to indicate
188 			 * that arr_timer was active at this time
189 			 */
190 			err |= 8;
191 
192 		/* try to cancel ourself. It shouldn't deadlock. */
193 		if (bpf_timer_cancel(&val->timer) != -EDEADLK)
194 			err |= 16;
195 
196 		/* delete this key and this timer anyway.
197 		 * It shouldn't deadlock either.
198 		 */
199 		bpf_map_delete_elem(map, key);
200 
201 		/* in preallocated hashmap both 'key' and 'val' could have been
202 		 * reused to store another map element (like in LRU above),
203 		 * but in controlled test environment the below test works.
204 		 * It's not a use-after-free. The memory is owned by the map.
205 		 */
206 		if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
207 			err |= 32;
208 		ok |= 2;
209 	} else {
210 		if (*key != HTAB_MALLOC)
211 			err |= 64;
212 
213 		/* try to cancel ourself. It shouldn't deadlock. */
214 		if (bpf_timer_cancel(&val->timer) != -EDEADLK)
215 			err |= 128;
216 
217 		/* delete this key and this timer anyway.
218 		 * It shouldn't deadlock either.
219 		 */
220 		bpf_map_delete_elem(map, key);
221 
222 		ok |= 4;
223 	}
224 	return 0;
225 }
226 
227 int bpf_timer_test(void)
228 {
229 	struct hmap_elem *val;
230 	int key = HTAB, key_malloc = HTAB_MALLOC;
231 
232 	val = bpf_map_lookup_elem(&hmap, &key);
233 	if (val) {
234 		if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
235 			err |= 512;
236 		bpf_timer_set_callback(&val->timer, timer_cb2);
237 		bpf_timer_start(&val->timer, 1000, 0);
238 	}
239 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
240 	if (val) {
241 		if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
242 			err |= 1024;
243 		bpf_timer_set_callback(&val->timer, timer_cb2);
244 		bpf_timer_start(&val->timer, 1000, 0);
245 	}
246 	return 0;
247 }
248 
249 SEC("fentry/bpf_fentry_test2")
250 int BPF_PROG2(test2, int, a, int, b)
251 {
252 	struct hmap_elem init = {}, *val;
253 	int key = HTAB, key_malloc = HTAB_MALLOC;
254 
255 	init.counter = 10; /* number of times to trigger timer_cb2 */
256 	bpf_map_update_elem(&hmap, &key, &init, 0);
257 	val = bpf_map_lookup_elem(&hmap, &key);
258 	if (val)
259 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
260 	/* update the same key to free the timer */
261 	bpf_map_update_elem(&hmap, &key, &init, 0);
262 
263 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
264 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
265 	if (val)
266 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
267 	/* update the same key to free the timer */
268 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
269 
270 	/* init more timers to check that htab operations
271 	 * don't leak timer memory.
272 	 */
273 	key = 0;
274 	bpf_map_update_elem(&hmap, &key, &init, 0);
275 	val = bpf_map_lookup_elem(&hmap, &key);
276 	if (val)
277 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
278 	bpf_map_delete_elem(&hmap, &key);
279 	bpf_map_update_elem(&hmap, &key, &init, 0);
280 	val = bpf_map_lookup_elem(&hmap, &key);
281 	if (val)
282 		bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
283 
284 	/* and with non-prealloc htab */
285 	key_malloc = 0;
286 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
287 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
288 	if (val)
289 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
290 	bpf_map_delete_elem(&hmap_malloc, &key_malloc);
291 	bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
292 	val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
293 	if (val)
294 		bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
295 
296 	return bpf_timer_test();
297 }
298 
299 /* callback for absolute timer */
300 static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
301 {
302 	abs_data += 6;
303 
304 	if (abs_data < 12) {
305 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
306 				BPF_F_TIMER_ABS);
307 	} else {
308 		/* Re-arm timer ~35 seconds in future */
309 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
310 				BPF_F_TIMER_ABS);
311 	}
312 
313 	return 0;
314 }
315 
316 SEC("fentry/bpf_fentry_test3")
317 int BPF_PROG2(test3, int, a)
318 {
319 	int key = 0;
320 	struct bpf_timer *timer;
321 
322 	bpf_printk("test3");
323 
324 	timer = bpf_map_lookup_elem(&abs_timer, &key);
325 	if (timer) {
326 		if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
327 			err |= 2048;
328 		bpf_timer_set_callback(timer, timer_cb3);
329 		bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
330 				BPF_F_TIMER_ABS);
331 	}
332 
333 	return 0;
334 }
335 
336 /* callback for pinned timer */
337 static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
338 {
339 	__s32 cpu = bpf_get_smp_processor_id();
340 
341 	if (cpu != pinned_cpu)
342 		err |= 16384;
343 
344 	pinned_callback_check++;
345 	return 0;
346 }
347 
348 static void test_pinned_timer(bool soft)
349 {
350 	int key = 0;
351 	void *map;
352 	struct bpf_timer *timer;
353 	__u64 flags = BPF_F_TIMER_CPU_PIN;
354 	__u64 start_time;
355 
356 	if (soft) {
357 		map = &soft_timer_pinned;
358 		start_time = 0;
359 	} else {
360 		map = &abs_timer_pinned;
361 		start_time = bpf_ktime_get_boot_ns();
362 		flags |= BPF_F_TIMER_ABS;
363 	}
364 
365 	timer = bpf_map_lookup_elem(map, &key);
366 	if (timer) {
367 		if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
368 			err |= 4096;
369 		bpf_timer_set_callback(timer, timer_cb_pinned);
370 		pinned_cpu = bpf_get_smp_processor_id();
371 		bpf_timer_start(timer, start_time + 1000, flags);
372 	} else {
373 		err |= 8192;
374 	}
375 }
376 
377 SEC("fentry/bpf_fentry_test4")
378 int BPF_PROG2(test4, int, a)
379 {
380 	bpf_printk("test4");
381 	test_pinned_timer(true);
382 
383 	return 0;
384 }
385 
386 SEC("fentry/bpf_fentry_test5")
387 int BPF_PROG2(test5, int, a)
388 {
389 	bpf_printk("test5");
390 	test_pinned_timer(false);
391 
392 	return 0;
393 }
394 
395 static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
396 {
397 	bpf_timer_start(timer, 1000000, 0);
398 	return 0;
399 }
400 
401 SEC("syscall")
402 int race(void *ctx)
403 {
404 	struct bpf_timer *timer;
405 	int err, race_key = 0;
406 	struct elem init;
407 
408 	__builtin_memset(&init, 0, sizeof(struct elem));
409 	bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
410 
411 	timer = bpf_map_lookup_elem(&race_array, &race_key);
412 	if (!timer)
413 		return 1;
414 
415 	err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
416 	if (err && err != -EBUSY)
417 		return 1;
418 
419 	bpf_timer_set_callback(timer, race_timer_callback);
420 	bpf_timer_start(timer, 0, 0);
421 	bpf_timer_cancel(timer);
422 
423 	return 0;
424 }
425