xref: /linux/kernel/bpf/bpf_task_storage.c (revision fcb3ad4366b9c810cbb9da34c076a9a52d8aa1e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #include <linux/pid.h>
8 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/list.h>
11 #include <linux/hash.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_local_storage.h>
16 #include <linux/filter.h>
17 #include <uapi/linux/btf.h>
18 #include <linux/btf_ids.h>
19 #include <linux/rcupdate_trace.h>
20 
21 DEFINE_BPF_STORAGE_CACHE(task_cache);
22 
23 static DEFINE_PER_CPU(int, bpf_task_storage_busy);
24 
25 static void bpf_task_storage_lock(void)
26 {
27 	migrate_disable();
28 	this_cpu_inc(bpf_task_storage_busy);
29 }
30 
31 static void bpf_task_storage_unlock(void)
32 {
33 	this_cpu_dec(bpf_task_storage_busy);
34 	migrate_enable();
35 }
36 
37 static bool bpf_task_storage_trylock(void)
38 {
39 	migrate_disable();
40 	if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
41 		this_cpu_dec(bpf_task_storage_busy);
42 		migrate_enable();
43 		return false;
44 	}
45 	return true;
46 }
47 
48 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
49 {
50 	struct task_struct *task = owner;
51 
52 	return &task->bpf_storage;
53 }
54 
55 static struct bpf_local_storage_data *
56 task_storage_lookup(struct task_struct *task, struct bpf_map *map,
57 		    bool cacheit_lockit)
58 {
59 	struct bpf_local_storage *task_storage;
60 	struct bpf_local_storage_map *smap;
61 
62 	task_storage =
63 		rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
64 	if (!task_storage)
65 		return NULL;
66 
67 	smap = (struct bpf_local_storage_map *)map;
68 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
69 }
70 
71 void bpf_task_storage_free(struct task_struct *task)
72 {
73 	struct bpf_local_storage *local_storage;
74 
75 	rcu_read_lock();
76 
77 	local_storage = rcu_dereference(task->bpf_storage);
78 	if (!local_storage) {
79 		rcu_read_unlock();
80 		return;
81 	}
82 
83 	bpf_task_storage_lock();
84 	bpf_local_storage_destroy(local_storage);
85 	bpf_task_storage_unlock();
86 	rcu_read_unlock();
87 }
88 
89 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
90 {
91 	struct bpf_local_storage_data *sdata;
92 	struct task_struct *task;
93 	unsigned int f_flags;
94 	struct pid *pid;
95 	int fd, err;
96 
97 	fd = *(int *)key;
98 	pid = pidfd_get_pid(fd, &f_flags);
99 	if (IS_ERR(pid))
100 		return ERR_CAST(pid);
101 
102 	/* We should be in an RCU read side critical section, it should be safe
103 	 * to call pid_task.
104 	 */
105 	WARN_ON_ONCE(!rcu_read_lock_held());
106 	task = pid_task(pid, PIDTYPE_PID);
107 	if (!task) {
108 		err = -ENOENT;
109 		goto out;
110 	}
111 
112 	bpf_task_storage_lock();
113 	sdata = task_storage_lookup(task, map, true);
114 	bpf_task_storage_unlock();
115 	put_pid(pid);
116 	return sdata ? sdata->data : NULL;
117 out:
118 	put_pid(pid);
119 	return ERR_PTR(err);
120 }
121 
122 static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
123 					     void *value, u64 map_flags)
124 {
125 	struct bpf_local_storage_data *sdata;
126 	struct task_struct *task;
127 	unsigned int f_flags;
128 	struct pid *pid;
129 	int fd, err;
130 
131 	fd = *(int *)key;
132 	pid = pidfd_get_pid(fd, &f_flags);
133 	if (IS_ERR(pid))
134 		return PTR_ERR(pid);
135 
136 	/* We should be in an RCU read side critical section, it should be safe
137 	 * to call pid_task.
138 	 */
139 	WARN_ON_ONCE(!rcu_read_lock_held());
140 	task = pid_task(pid, PIDTYPE_PID);
141 	if (!task) {
142 		err = -ENOENT;
143 		goto out;
144 	}
145 
146 	bpf_task_storage_lock();
147 	sdata = bpf_local_storage_update(
148 		task, (struct bpf_local_storage_map *)map, value, map_flags,
149 		GFP_ATOMIC);
150 	bpf_task_storage_unlock();
151 
152 	err = PTR_ERR_OR_ZERO(sdata);
153 out:
154 	put_pid(pid);
155 	return err;
156 }
157 
158 static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
159 			       bool nobusy)
160 {
161 	struct bpf_local_storage_data *sdata;
162 
163 	sdata = task_storage_lookup(task, map, false);
164 	if (!sdata)
165 		return -ENOENT;
166 
167 	if (!nobusy)
168 		return -EBUSY;
169 
170 	bpf_selem_unlink(SELEM(sdata), false);
171 
172 	return 0;
173 }
174 
175 static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
176 {
177 	struct task_struct *task;
178 	unsigned int f_flags;
179 	struct pid *pid;
180 	int fd, err;
181 
182 	fd = *(int *)key;
183 	pid = pidfd_get_pid(fd, &f_flags);
184 	if (IS_ERR(pid))
185 		return PTR_ERR(pid);
186 
187 	/* We should be in an RCU read side critical section, it should be safe
188 	 * to call pid_task.
189 	 */
190 	WARN_ON_ONCE(!rcu_read_lock_held());
191 	task = pid_task(pid, PIDTYPE_PID);
192 	if (!task) {
193 		err = -ENOENT;
194 		goto out;
195 	}
196 
197 	bpf_task_storage_lock();
198 	err = task_storage_delete(task, map, true);
199 	bpf_task_storage_unlock();
200 out:
201 	put_pid(pid);
202 	return err;
203 }
204 
205 /* Called by bpf_task_storage_get*() helpers */
206 static void *__bpf_task_storage_get(struct bpf_map *map,
207 				    struct task_struct *task, void *value,
208 				    u64 flags, gfp_t gfp_flags, bool nobusy)
209 {
210 	struct bpf_local_storage_data *sdata;
211 
212 	sdata = task_storage_lookup(task, map, nobusy);
213 	if (sdata)
214 		return sdata->data;
215 
216 	/* only allocate new storage, when the task is refcounted */
217 	if (refcount_read(&task->usage) &&
218 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
219 		sdata = bpf_local_storage_update(
220 			task, (struct bpf_local_storage_map *)map, value,
221 			BPF_NOEXIST, gfp_flags);
222 		return IS_ERR(sdata) ? NULL : sdata->data;
223 	}
224 
225 	return NULL;
226 }
227 
228 /* *gfp_flags* is a hidden argument provided by the verifier */
229 BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
230 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
231 {
232 	bool nobusy;
233 	void *data;
234 
235 	WARN_ON_ONCE(!bpf_rcu_lock_held());
236 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
237 		return (unsigned long)NULL;
238 
239 	nobusy = bpf_task_storage_trylock();
240 	data = __bpf_task_storage_get(map, task, value, flags,
241 				      gfp_flags, nobusy);
242 	if (nobusy)
243 		bpf_task_storage_unlock();
244 	return (unsigned long)data;
245 }
246 
247 /* *gfp_flags* is a hidden argument provided by the verifier */
248 BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
249 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
250 {
251 	void *data;
252 
253 	WARN_ON_ONCE(!bpf_rcu_lock_held());
254 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
255 		return (unsigned long)NULL;
256 
257 	bpf_task_storage_lock();
258 	data = __bpf_task_storage_get(map, task, value, flags,
259 				      gfp_flags, true);
260 	bpf_task_storage_unlock();
261 	return (unsigned long)data;
262 }
263 
264 BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
265 	   task)
266 {
267 	bool nobusy;
268 	int ret;
269 
270 	WARN_ON_ONCE(!bpf_rcu_lock_held());
271 	if (!task)
272 		return -EINVAL;
273 
274 	nobusy = bpf_task_storage_trylock();
275 	/* This helper must only be called from places where the lifetime of the task
276 	 * is guaranteed. Either by being refcounted or by being protected
277 	 * by an RCU read-side critical section.
278 	 */
279 	ret = task_storage_delete(task, map, nobusy);
280 	if (nobusy)
281 		bpf_task_storage_unlock();
282 	return ret;
283 }
284 
285 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
286 	   task)
287 {
288 	int ret;
289 
290 	WARN_ON_ONCE(!bpf_rcu_lock_held());
291 	if (!task)
292 		return -EINVAL;
293 
294 	bpf_task_storage_lock();
295 	/* This helper must only be called from places where the lifetime of the task
296 	 * is guaranteed. Either by being refcounted or by being protected
297 	 * by an RCU read-side critical section.
298 	 */
299 	ret = task_storage_delete(task, map, true);
300 	bpf_task_storage_unlock();
301 	return ret;
302 }
303 
304 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
305 {
306 	return -ENOTSUPP;
307 }
308 
309 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
310 {
311 	return bpf_local_storage_map_alloc(attr, &task_cache, true);
312 }
313 
314 static void task_storage_map_free(struct bpf_map *map)
315 {
316 	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
317 }
318 
319 BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
320 const struct bpf_map_ops task_storage_map_ops = {
321 	.map_meta_equal = bpf_map_meta_equal,
322 	.map_alloc_check = bpf_local_storage_map_alloc_check,
323 	.map_alloc = task_storage_map_alloc,
324 	.map_free = task_storage_map_free,
325 	.map_get_next_key = notsupp_get_next_key,
326 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
327 	.map_update_elem = bpf_pid_task_storage_update_elem,
328 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
329 	.map_check_btf = bpf_local_storage_map_check_btf,
330 	.map_mem_usage = bpf_local_storage_map_mem_usage,
331 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
332 	.map_owner_storage_ptr = task_storage_ptr,
333 };
334 
335 const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
336 	.func = bpf_task_storage_get_recur,
337 	.gpl_only = false,
338 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
339 	.arg1_type = ARG_CONST_MAP_PTR,
340 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
341 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
342 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
343 	.arg4_type = ARG_ANYTHING,
344 };
345 
346 const struct bpf_func_proto bpf_task_storage_get_proto = {
347 	.func = bpf_task_storage_get,
348 	.gpl_only = false,
349 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
350 	.arg1_type = ARG_CONST_MAP_PTR,
351 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
352 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
353 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
354 	.arg4_type = ARG_ANYTHING,
355 };
356 
357 const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
358 	.func = bpf_task_storage_delete_recur,
359 	.gpl_only = false,
360 	.ret_type = RET_INTEGER,
361 	.arg1_type = ARG_CONST_MAP_PTR,
362 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
363 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
364 };
365 
366 const struct bpf_func_proto bpf_task_storage_delete_proto = {
367 	.func = bpf_task_storage_delete,
368 	.gpl_only = false,
369 	.ret_type = RET_INTEGER,
370 	.arg1_type = ARG_CONST_MAP_PTR,
371 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
372 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
373 };
374