xref: /linux/tools/testing/selftests/bpf/prog_tests/timer_start_delete_race.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3 #define _GNU_SOURCE
4 #include <sched.h>
5 #include <pthread.h>
6 #include <test_progs.h>
7 #include "timer_start_delete_race.skel.h"
8 
9 /*
10  * Test for race between bpf_timer_start() and map element deletion.
11  *
12  * The race scenario:
13  * - CPU 1: bpf_timer_start() proceeds to bpf_async_process() and is about
14  *          to call hrtimer_start() but hasn't yet
15  * - CPU 2: map_delete_elem() calls __bpf_async_cancel_and_free(), since
16  *          timer is not scheduled yet hrtimer_try_to_cancel() is a nop,
17  *          then calls bpf_async_refcount_put() dropping refcnt to zero
18  *          and scheduling call_rcu_tasks_trace()
19  * - CPU 1: continues and calls hrtimer_start()
20  * - After RCU tasks trace grace period: memory is freed
21  * - Timer callback fires on freed memory: UAF!
22  *
23  * This test stresses this race by having two threads:
24  * - Thread 1: repeatedly starts timers
25  * - Thread 2: repeatedly deletes map elements
26  *
27  * KASAN should detect use-after-free.
28  */
29 
30 #define ITERATIONS 1000
31 
32 struct ctx {
33 	struct timer_start_delete_race *skel;
34 	volatile bool start;
35 	volatile bool stop;
36 	int errors;
37 };
38 
39 static void *start_timer_thread(void *arg)
40 {
41 	struct ctx *ctx = arg;
42 	cpu_set_t cpuset;
43 	int fd, i;
44 
45 	CPU_ZERO(&cpuset);
46 	CPU_SET(0, &cpuset);
47 	pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
48 
49 	while (!ctx->start && !ctx->stop)
50 		usleep(1);
51 	if (ctx->stop)
52 		return NULL;
53 
54 	fd = bpf_program__fd(ctx->skel->progs.start_timer);
55 
56 	for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
57 		LIBBPF_OPTS(bpf_test_run_opts, opts);
58 		int err;
59 
60 		err = bpf_prog_test_run_opts(fd, &opts);
61 		if (err || opts.retval) {
62 			ctx->errors++;
63 			break;
64 		}
65 	}
66 
67 	return NULL;
68 }
69 
70 static void *delete_elem_thread(void *arg)
71 {
72 	struct ctx *ctx = arg;
73 	cpu_set_t cpuset;
74 	int fd, i;
75 
76 	CPU_ZERO(&cpuset);
77 	CPU_SET(1, &cpuset);
78 	pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
79 
80 	while (!ctx->start && !ctx->stop)
81 		usleep(1);
82 	if (ctx->stop)
83 		return NULL;
84 
85 	fd = bpf_program__fd(ctx->skel->progs.delete_elem);
86 
87 	for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
88 		LIBBPF_OPTS(bpf_test_run_opts, opts);
89 		int err;
90 
91 		err = bpf_prog_test_run_opts(fd, &opts);
92 		if (err || opts.retval) {
93 			ctx->errors++;
94 			break;
95 		}
96 	}
97 
98 	return NULL;
99 }
100 
101 void test_timer_start_delete_race(void)
102 {
103 	struct timer_start_delete_race *skel;
104 	pthread_t threads[2];
105 	struct ctx ctx = {};
106 	int err;
107 
108 	skel = timer_start_delete_race__open_and_load();
109 	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
110 		return;
111 
112 	ctx.skel = skel;
113 
114 	err = pthread_create(&threads[0], NULL, start_timer_thread, &ctx);
115 	if (!ASSERT_OK(err, "create start_timer_thread")) {
116 		ctx.stop = true;
117 		goto cleanup;
118 	}
119 
120 	err = pthread_create(&threads[1], NULL, delete_elem_thread, &ctx);
121 	if (!ASSERT_OK(err, "create delete_elem_thread")) {
122 		ctx.stop = true;
123 		pthread_join(threads[0], NULL);
124 		goto cleanup;
125 	}
126 
127 	ctx.start = true;
128 
129 	pthread_join(threads[0], NULL);
130 	pthread_join(threads[1], NULL);
131 
132 	ASSERT_EQ(ctx.errors, 0, "thread_errors");
133 
134 	/* Either KASAN will catch UAF or kernel will crash or nothing happens */
135 cleanup:
136 	timer_start_delete_race__destroy(skel);
137 }
138