xref: /linux/tools/testing/selftests/bpf/progs/map_kptr_race.c (revision 57885276cc16a2e2b76282c808a4e84cbecb3aae)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include "../test_kmods/bpf_testmod_kfunc.h"
7 
8 struct map_value {
9 	struct prog_test_ref_kfunc __kptr *ref_ptr;
10 };
11 
12 struct {
13 	__uint(type, BPF_MAP_TYPE_HASH);
14 	__uint(map_flags, BPF_F_NO_PREALLOC);
15 	__type(key, int);
16 	__type(value, struct map_value);
17 	__uint(max_entries, 1);
18 } race_hash_map SEC(".maps");
19 
20 struct {
21 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
22 	__uint(map_flags, BPF_F_NO_PREALLOC);
23 	__type(key, int);
24 	__type(value, struct map_value);
25 	__uint(max_entries, 1);
26 } race_percpu_hash_map SEC(".maps");
27 
28 struct {
29 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
30 	__uint(map_flags, BPF_F_NO_PREALLOC);
31 	__type(key, int);
32 	__type(value, struct map_value);
33 } race_sk_ls_map SEC(".maps");
34 
35 int num_of_refs;
36 int sk_ls_leak_done;
37 int target_map_id;
38 int map_freed;
39 const volatile int nr_cpus;
40 
41 SEC("tc")
42 int test_htab_leak(struct __sk_buff *skb)
43 {
44 	struct prog_test_ref_kfunc *p, *old;
45 	struct map_value val = {};
46 	struct map_value *v;
47 	int key = 0;
48 
49 	if (bpf_map_update_elem(&race_hash_map, &key, &val, BPF_ANY))
50 		return 1;
51 
52 	v = bpf_map_lookup_elem(&race_hash_map, &key);
53 	if (!v)
54 		return 2;
55 
56 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
57 	if (!p)
58 		return 3;
59 	old = bpf_kptr_xchg(&v->ref_ptr, p);
60 	if (old)
61 		bpf_kfunc_call_test_release(old);
62 
63 	bpf_map_delete_elem(&race_hash_map, &key);
64 
65 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
66 	if (!p)
67 		return 4;
68 	old = bpf_kptr_xchg(&v->ref_ptr, p);
69 	if (old)
70 		bpf_kfunc_call_test_release(old);
71 
72 	return 0;
73 }
74 
75 static int fill_percpu_kptr(struct map_value *v)
76 {
77 	struct prog_test_ref_kfunc *p, *old;
78 
79 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
80 	if (!p)
81 		return 1;
82 	old = bpf_kptr_xchg(&v->ref_ptr, p);
83 	if (old)
84 		bpf_kfunc_call_test_release(old);
85 	return 0;
86 }
87 
88 SEC("tc")
89 int test_percpu_htab_leak(struct __sk_buff *skb)
90 {
91 	struct map_value *v, *arr[16] = {};
92 	struct map_value val = {};
93 	int key = 0;
94 	int err = 0;
95 
96 	if (bpf_map_update_elem(&race_percpu_hash_map, &key, &val, BPF_ANY))
97 		return 1;
98 
99 	for (int i = 0; i < nr_cpus; i++) {
100 		v = bpf_map_lookup_percpu_elem(&race_percpu_hash_map, &key, i);
101 		if (!v)
102 			return 2;
103 		arr[i] = v;
104 	}
105 
106 	bpf_map_delete_elem(&race_percpu_hash_map, &key);
107 
108 	for (int i = 0; i < nr_cpus; i++) {
109 		v = arr[i];
110 		err = fill_percpu_kptr(v);
111 		if (err)
112 			return 3;
113 	}
114 
115 	return 0;
116 }
117 
118 SEC("tp_btf/inet_sock_set_state")
119 int BPF_PROG(test_sk_ls_leak, struct sock *sk, int oldstate, int newstate)
120 {
121 	struct prog_test_ref_kfunc *p, *old;
122 	struct map_value *v;
123 
124 	if (newstate != BPF_TCP_SYN_SENT)
125 		return 0;
126 
127 	if (sk_ls_leak_done)
128 		return 0;
129 
130 	v = bpf_sk_storage_get(&race_sk_ls_map, sk, NULL,
131 				BPF_SK_STORAGE_GET_F_CREATE);
132 	if (!v)
133 		return 0;
134 
135 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
136 	if (!p)
137 		return 0;
138 	old = bpf_kptr_xchg(&v->ref_ptr, p);
139 	if (old)
140 		bpf_kfunc_call_test_release(old);
141 
142 	bpf_sk_storage_delete(&race_sk_ls_map, sk);
143 
144 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
145 	if (!p)
146 		return 0;
147 	old = bpf_kptr_xchg(&v->ref_ptr, p);
148 	if (old)
149 		bpf_kfunc_call_test_release(old);
150 
151 	sk_ls_leak_done = 1;
152 	return 0;
153 }
154 
155 long target_map_ptr;
156 
157 SEC("fentry/bpf_map_put")
158 int BPF_PROG(map_put, struct bpf_map *map)
159 {
160 	if (target_map_id && map->id == (u32)target_map_id)
161 		target_map_ptr = (long)map;
162 	return 0;
163 }
164 
165 SEC("fexit/htab_map_free")
166 int BPF_PROG(htab_map_free, struct bpf_map *map)
167 {
168 	if (target_map_ptr && (long)map == target_map_ptr)
169 		map_freed = 1;
170 	return 0;
171 }
172 
173 SEC("fexit/bpf_sk_storage_map_free")
174 int BPF_PROG(sk_map_free, struct bpf_map *map)
175 {
176 	if (target_map_ptr && (long)map == target_map_ptr)
177 		map_freed = 1;
178 	return 0;
179 }
180 
181 SEC("syscall")
182 int count_ref(void *ctx)
183 {
184 	struct prog_test_ref_kfunc *p;
185 	unsigned long arg = 0;
186 
187 	p = bpf_kfunc_call_test_acquire(&arg);
188 	if (!p)
189 		return 1;
190 
191 	num_of_refs = p->cnt.refs.counter;
192 
193 	bpf_kfunc_call_test_release(p);
194 	return 0;
195 }
196 
197 char _license[] SEC("license") = "GPL";
198