xref: /linux/kernel/bpf/bpf_task_storage.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #include <linux/pid.h>
8 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/list.h>
11 #include <linux/hash.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_local_storage.h>
16 #include <linux/filter.h>
17 #include <uapi/linux/btf.h>
18 #include <linux/btf_ids.h>
19 #include <linux/rcupdate_trace.h>
20 
21 DEFINE_BPF_STORAGE_CACHE(task_cache);
22 
23 static DEFINE_PER_CPU(int, bpf_task_storage_busy);
24 
25 static void bpf_task_storage_lock(void)
26 {
27 	migrate_disable();
28 	this_cpu_inc(bpf_task_storage_busy);
29 }
30 
31 static void bpf_task_storage_unlock(void)
32 {
33 	this_cpu_dec(bpf_task_storage_busy);
34 	migrate_enable();
35 }
36 
37 static bool bpf_task_storage_trylock(void)
38 {
39 	migrate_disable();
40 	if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
41 		this_cpu_dec(bpf_task_storage_busy);
42 		migrate_enable();
43 		return false;
44 	}
45 	return true;
46 }
47 
48 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
49 {
50 	struct task_struct *task = owner;
51 
52 	return &task->bpf_storage;
53 }
54 
55 static struct bpf_local_storage_data *
56 task_storage_lookup(struct task_struct *task, struct bpf_map *map,
57 		    bool cacheit_lockit)
58 {
59 	struct bpf_local_storage *task_storage;
60 	struct bpf_local_storage_map *smap;
61 
62 	task_storage =
63 		rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
64 	if (!task_storage)
65 		return NULL;
66 
67 	smap = (struct bpf_local_storage_map *)map;
68 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
69 }
70 
71 void bpf_task_storage_free(struct task_struct *task)
72 {
73 	struct bpf_local_storage *local_storage;
74 
75 	rcu_read_lock();
76 
77 	local_storage = rcu_dereference(task->bpf_storage);
78 	if (!local_storage) {
79 		rcu_read_unlock();
80 		return;
81 	}
82 
83 	bpf_task_storage_lock();
84 	bpf_local_storage_destroy(local_storage);
85 	bpf_task_storage_unlock();
86 	rcu_read_unlock();
87 }
88 
89 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
90 {
91 	struct bpf_local_storage_data *sdata;
92 	struct task_struct *task;
93 	unsigned int f_flags;
94 	struct pid *pid;
95 	int fd, err;
96 
97 	fd = *(int *)key;
98 	pid = pidfd_get_pid(fd, &f_flags);
99 	if (IS_ERR(pid))
100 		return ERR_CAST(pid);
101 
102 	/* We should be in an RCU read side critical section, it should be safe
103 	 * to call pid_task.
104 	 */
105 	WARN_ON_ONCE(!rcu_read_lock_held());
106 	task = pid_task(pid, PIDTYPE_PID);
107 	if (!task) {
108 		err = -ENOENT;
109 		goto out;
110 	}
111 
112 	bpf_task_storage_lock();
113 	sdata = task_storage_lookup(task, map, true);
114 	bpf_task_storage_unlock();
115 	put_pid(pid);
116 	return sdata ? sdata->data : NULL;
117 out:
118 	put_pid(pid);
119 	return ERR_PTR(err);
120 }
121 
122 static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
123 					     void *value, u64 map_flags)
124 {
125 	struct bpf_local_storage_data *sdata;
126 	struct task_struct *task;
127 	unsigned int f_flags;
128 	struct pid *pid;
129 	int fd, err;
130 
131 	if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
132 		return -EOPNOTSUPP;
133 
134 	fd = *(int *)key;
135 	pid = pidfd_get_pid(fd, &f_flags);
136 	if (IS_ERR(pid))
137 		return PTR_ERR(pid);
138 
139 	/* We should be in an RCU read side critical section, it should be safe
140 	 * to call pid_task.
141 	 */
142 	WARN_ON_ONCE(!rcu_read_lock_held());
143 	task = pid_task(pid, PIDTYPE_PID);
144 	if (!task) {
145 		err = -ENOENT;
146 		goto out;
147 	}
148 
149 	bpf_task_storage_lock();
150 	sdata = bpf_local_storage_update(
151 		task, (struct bpf_local_storage_map *)map, value, map_flags,
152 		true, GFP_ATOMIC);
153 	bpf_task_storage_unlock();
154 
155 	err = PTR_ERR_OR_ZERO(sdata);
156 out:
157 	put_pid(pid);
158 	return err;
159 }
160 
161 static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
162 			       bool nobusy)
163 {
164 	struct bpf_local_storage_data *sdata;
165 
166 	sdata = task_storage_lookup(task, map, false);
167 	if (!sdata)
168 		return -ENOENT;
169 
170 	if (!nobusy)
171 		return -EBUSY;
172 
173 	bpf_selem_unlink(SELEM(sdata), false);
174 
175 	return 0;
176 }
177 
178 static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
179 {
180 	struct task_struct *task;
181 	unsigned int f_flags;
182 	struct pid *pid;
183 	int fd, err;
184 
185 	fd = *(int *)key;
186 	pid = pidfd_get_pid(fd, &f_flags);
187 	if (IS_ERR(pid))
188 		return PTR_ERR(pid);
189 
190 	/* We should be in an RCU read side critical section, it should be safe
191 	 * to call pid_task.
192 	 */
193 	WARN_ON_ONCE(!rcu_read_lock_held());
194 	task = pid_task(pid, PIDTYPE_PID);
195 	if (!task) {
196 		err = -ENOENT;
197 		goto out;
198 	}
199 
200 	bpf_task_storage_lock();
201 	err = task_storage_delete(task, map, true);
202 	bpf_task_storage_unlock();
203 out:
204 	put_pid(pid);
205 	return err;
206 }
207 
208 /* Called by bpf_task_storage_get*() helpers */
209 static void *__bpf_task_storage_get(struct bpf_map *map,
210 				    struct task_struct *task, void *value,
211 				    u64 flags, gfp_t gfp_flags, bool nobusy)
212 {
213 	struct bpf_local_storage_data *sdata;
214 
215 	sdata = task_storage_lookup(task, map, nobusy);
216 	if (sdata)
217 		return sdata->data;
218 
219 	/* only allocate new storage, when the task is refcounted */
220 	if (refcount_read(&task->usage) &&
221 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
222 		sdata = bpf_local_storage_update(
223 			task, (struct bpf_local_storage_map *)map, value,
224 			BPF_NOEXIST, false, gfp_flags);
225 		return IS_ERR(sdata) ? NULL : sdata->data;
226 	}
227 
228 	return NULL;
229 }
230 
231 /* *gfp_flags* is a hidden argument provided by the verifier */
232 BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
233 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
234 {
235 	bool nobusy;
236 	void *data;
237 
238 	WARN_ON_ONCE(!bpf_rcu_lock_held());
239 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
240 		return (unsigned long)NULL;
241 
242 	nobusy = bpf_task_storage_trylock();
243 	data = __bpf_task_storage_get(map, task, value, flags,
244 				      gfp_flags, nobusy);
245 	if (nobusy)
246 		bpf_task_storage_unlock();
247 	return (unsigned long)data;
248 }
249 
250 /* *gfp_flags* is a hidden argument provided by the verifier */
251 BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
252 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
253 {
254 	void *data;
255 
256 	WARN_ON_ONCE(!bpf_rcu_lock_held());
257 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
258 		return (unsigned long)NULL;
259 
260 	bpf_task_storage_lock();
261 	data = __bpf_task_storage_get(map, task, value, flags,
262 				      gfp_flags, true);
263 	bpf_task_storage_unlock();
264 	return (unsigned long)data;
265 }
266 
267 BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
268 	   task)
269 {
270 	bool nobusy;
271 	int ret;
272 
273 	WARN_ON_ONCE(!bpf_rcu_lock_held());
274 	if (!task)
275 		return -EINVAL;
276 
277 	nobusy = bpf_task_storage_trylock();
278 	/* This helper must only be called from places where the lifetime of the task
279 	 * is guaranteed. Either by being refcounted or by being protected
280 	 * by an RCU read-side critical section.
281 	 */
282 	ret = task_storage_delete(task, map, nobusy);
283 	if (nobusy)
284 		bpf_task_storage_unlock();
285 	return ret;
286 }
287 
288 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
289 	   task)
290 {
291 	int ret;
292 
293 	WARN_ON_ONCE(!bpf_rcu_lock_held());
294 	if (!task)
295 		return -EINVAL;
296 
297 	bpf_task_storage_lock();
298 	/* This helper must only be called from places where the lifetime of the task
299 	 * is guaranteed. Either by being refcounted or by being protected
300 	 * by an RCU read-side critical section.
301 	 */
302 	ret = task_storage_delete(task, map, true);
303 	bpf_task_storage_unlock();
304 	return ret;
305 }
306 
307 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
308 {
309 	return -ENOTSUPP;
310 }
311 
312 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
313 {
314 	return bpf_local_storage_map_alloc(attr, &task_cache, true);
315 }
316 
317 static void task_storage_map_free(struct bpf_map *map)
318 {
319 	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
320 }
321 
322 BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
323 const struct bpf_map_ops task_storage_map_ops = {
324 	.map_meta_equal = bpf_map_meta_equal,
325 	.map_alloc_check = bpf_local_storage_map_alloc_check,
326 	.map_alloc = task_storage_map_alloc,
327 	.map_free = task_storage_map_free,
328 	.map_get_next_key = notsupp_get_next_key,
329 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
330 	.map_update_elem = bpf_pid_task_storage_update_elem,
331 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
332 	.map_check_btf = bpf_local_storage_map_check_btf,
333 	.map_mem_usage = bpf_local_storage_map_mem_usage,
334 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
335 	.map_owner_storage_ptr = task_storage_ptr,
336 };
337 
338 const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
339 	.func = bpf_task_storage_get_recur,
340 	.gpl_only = false,
341 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
342 	.arg1_type = ARG_CONST_MAP_PTR,
343 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
344 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
345 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
346 	.arg4_type = ARG_ANYTHING,
347 };
348 
349 const struct bpf_func_proto bpf_task_storage_get_proto = {
350 	.func = bpf_task_storage_get,
351 	.gpl_only = false,
352 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
353 	.arg1_type = ARG_CONST_MAP_PTR,
354 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
355 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
356 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
357 	.arg4_type = ARG_ANYTHING,
358 };
359 
360 const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
361 	.func = bpf_task_storage_delete_recur,
362 	.gpl_only = false,
363 	.ret_type = RET_INTEGER,
364 	.arg1_type = ARG_CONST_MAP_PTR,
365 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
366 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
367 };
368 
369 const struct bpf_func_proto bpf_task_storage_delete_proto = {
370 	.func = bpf_task_storage_delete,
371 	.gpl_only = false,
372 	.ret_type = RET_INTEGER,
373 	.arg1_type = ARG_CONST_MAP_PTR,
374 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
375 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
376 };
377