xref: /linux/kernel/bpf/bpf_task_storage.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #include <linux/pid.h>
8 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/list.h>
11 #include <linux/hash.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_local_storage.h>
16 #include <linux/filter.h>
17 #include <uapi/linux/btf.h>
18 #include <linux/btf_ids.h>
19 #include <linux/rcupdate_trace.h>
20 
21 DEFINE_BPF_STORAGE_CACHE(task_cache);
22 
23 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
24 {
25 	struct task_struct *task = owner;
26 
27 	return &task->bpf_storage;
28 }
29 
30 static struct bpf_local_storage_data *
31 task_storage_lookup(struct task_struct *task, struct bpf_map *map,
32 		    bool cacheit_lockit)
33 {
34 	struct bpf_local_storage *task_storage;
35 	struct bpf_local_storage_map *smap;
36 
37 	task_storage =
38 		rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
39 	if (!task_storage)
40 		return NULL;
41 
42 	smap = (struct bpf_local_storage_map *)map;
43 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
44 }
45 
46 void bpf_task_storage_free(struct task_struct *task)
47 {
48 	struct bpf_local_storage *local_storage;
49 
50 	rcu_read_lock();
51 
52 	local_storage = rcu_dereference(task->bpf_storage);
53 	if (!local_storage)
54 		goto out;
55 
56 	bpf_local_storage_destroy(local_storage);
57 out:
58 	rcu_read_unlock();
59 }
60 
61 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
62 {
63 	struct bpf_local_storage_data *sdata;
64 	struct task_struct *task;
65 	unsigned int f_flags;
66 	struct pid *pid;
67 	int fd, err;
68 
69 	fd = *(int *)key;
70 	pid = pidfd_get_pid(fd, &f_flags);
71 	if (IS_ERR(pid))
72 		return ERR_CAST(pid);
73 
74 	/* We should be in an RCU read side critical section, it should be safe
75 	 * to call pid_task.
76 	 */
77 	WARN_ON_ONCE(!rcu_read_lock_held());
78 	task = pid_task(pid, PIDTYPE_PID);
79 	if (!task) {
80 		err = -ENOENT;
81 		goto out;
82 	}
83 
84 	sdata = task_storage_lookup(task, map, true);
85 	put_pid(pid);
86 	return sdata ? sdata->data : NULL;
87 out:
88 	put_pid(pid);
89 	return ERR_PTR(err);
90 }
91 
92 static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
93 					     void *value, u64 map_flags)
94 {
95 	struct bpf_local_storage_data *sdata;
96 	struct task_struct *task;
97 	unsigned int f_flags;
98 	struct pid *pid;
99 	int fd, err;
100 
101 	if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
102 		return -EOPNOTSUPP;
103 
104 	fd = *(int *)key;
105 	pid = pidfd_get_pid(fd, &f_flags);
106 	if (IS_ERR(pid))
107 		return PTR_ERR(pid);
108 
109 	/* We should be in an RCU read side critical section, it should be safe
110 	 * to call pid_task.
111 	 */
112 	WARN_ON_ONCE(!rcu_read_lock_held());
113 	task = pid_task(pid, PIDTYPE_PID);
114 	if (!task) {
115 		err = -ENOENT;
116 		goto out;
117 	}
118 
119 	sdata = bpf_local_storage_update(
120 		task, (struct bpf_local_storage_map *)map, value, map_flags,
121 		true, GFP_ATOMIC);
122 
123 	err = PTR_ERR_OR_ZERO(sdata);
124 out:
125 	put_pid(pid);
126 	return err;
127 }
128 
129 static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
130 {
131 	struct bpf_local_storage_data *sdata;
132 
133 	sdata = task_storage_lookup(task, map, false);
134 	if (!sdata)
135 		return -ENOENT;
136 
137 	return bpf_selem_unlink(SELEM(sdata));
138 }
139 
140 static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
141 {
142 	struct task_struct *task;
143 	unsigned int f_flags;
144 	struct pid *pid;
145 	int fd, err;
146 
147 	fd = *(int *)key;
148 	pid = pidfd_get_pid(fd, &f_flags);
149 	if (IS_ERR(pid))
150 		return PTR_ERR(pid);
151 
152 	/* We should be in an RCU read side critical section, it should be safe
153 	 * to call pid_task.
154 	 */
155 	WARN_ON_ONCE(!rcu_read_lock_held());
156 	task = pid_task(pid, PIDTYPE_PID);
157 	if (!task) {
158 		err = -ENOENT;
159 		goto out;
160 	}
161 
162 	err = task_storage_delete(task, map);
163 out:
164 	put_pid(pid);
165 	return err;
166 }
167 
168 /* *gfp_flags* is a hidden argument provided by the verifier */
169 BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
170 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
171 {
172 	struct bpf_local_storage_data *sdata;
173 
174 	WARN_ON_ONCE(!bpf_rcu_lock_held());
175 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
176 		return (unsigned long)NULL;
177 
178 	sdata = task_storage_lookup(task, map, true);
179 	if (sdata)
180 		return (unsigned long)sdata->data;
181 
182 	/* only allocate new storage, when the task is refcounted */
183 	if (refcount_read(&task->usage) &&
184 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
185 		sdata = bpf_local_storage_update(
186 			task, (struct bpf_local_storage_map *)map, value,
187 			BPF_NOEXIST, false, gfp_flags);
188 		return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
189 	}
190 
191 	return (unsigned long)NULL;
192 }
193 
194 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
195 	   task)
196 {
197 	WARN_ON_ONCE(!bpf_rcu_lock_held());
198 	if (!task)
199 		return -EINVAL;
200 
201 	/* This helper must only be called from places where the lifetime of the task
202 	 * is guaranteed. Either by being refcounted or by being protected
203 	 * by an RCU read-side critical section.
204 	 */
205 	return task_storage_delete(task, map);
206 }
207 
208 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
209 {
210 	return -ENOTSUPP;
211 }
212 
213 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
214 {
215 	return bpf_local_storage_map_alloc(attr, &task_cache, true);
216 }
217 
218 static void task_storage_map_free(struct bpf_map *map)
219 {
220 	bpf_local_storage_map_free(map, &task_cache);
221 }
222 
223 BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
224 const struct bpf_map_ops task_storage_map_ops = {
225 	.map_meta_equal = bpf_map_meta_equal,
226 	.map_alloc_check = bpf_local_storage_map_alloc_check,
227 	.map_alloc = task_storage_map_alloc,
228 	.map_free = task_storage_map_free,
229 	.map_get_next_key = notsupp_get_next_key,
230 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
231 	.map_update_elem = bpf_pid_task_storage_update_elem,
232 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
233 	.map_check_btf = bpf_local_storage_map_check_btf,
234 	.map_mem_usage = bpf_local_storage_map_mem_usage,
235 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
236 	.map_owner_storage_ptr = task_storage_ptr,
237 };
238 
239 const struct bpf_func_proto bpf_task_storage_get_proto = {
240 	.func = bpf_task_storage_get,
241 	.gpl_only = false,
242 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
243 	.arg1_type = ARG_CONST_MAP_PTR,
244 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
245 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
246 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
247 	.arg4_type = ARG_ANYTHING,
248 };
249 
250 const struct bpf_func_proto bpf_task_storage_delete_proto = {
251 	.func = bpf_task_storage_delete,
252 	.gpl_only = false,
253 	.ret_type = RET_INTEGER,
254 	.arg1_type = ARG_CONST_MAP_PTR,
255 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
256 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
257 };
258