xref: /linux/kernel/bpf/bpf_task_storage.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #include <linux/pid.h>
8 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/list.h>
11 #include <linux/hash.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_local_storage.h>
16 #include <linux/filter.h>
17 #include <uapi/linux/btf.h>
18 #include <linux/btf_ids.h>
19 #include <linux/rcupdate_trace.h>
20 
21 DEFINE_BPF_STORAGE_CACHE(task_cache);
22 
23 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
24 {
25 	struct task_struct *task = owner;
26 
27 	return &task->bpf_storage;
28 }
29 
30 static struct bpf_local_storage_data *
31 task_storage_lookup(struct task_struct *task, struct bpf_map *map,
32 		    bool cacheit_lockit)
33 {
34 	struct bpf_local_storage *task_storage;
35 	struct bpf_local_storage_map *smap;
36 
37 	task_storage =
38 		rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
39 	if (!task_storage)
40 		return NULL;
41 
42 	smap = (struct bpf_local_storage_map *)map;
43 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
44 }
45 
46 void bpf_task_storage_free(struct task_struct *task)
47 {
48 	struct bpf_local_storage *local_storage;
49 
50 	rcu_read_lock();
51 
52 	local_storage = rcu_dereference(task->bpf_storage);
53 	if (!local_storage)
54 		goto out;
55 
56 	bpf_local_storage_destroy(local_storage);
57 out:
58 	rcu_read_unlock();
59 }
60 
61 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
62 {
63 	struct bpf_local_storage_data *sdata;
64 	struct task_struct *task;
65 	unsigned int f_flags;
66 	struct pid *pid;
67 	int fd, err;
68 
69 	fd = *(int *)key;
70 	pid = pidfd_get_pid(fd, &f_flags);
71 	if (IS_ERR(pid))
72 		return ERR_CAST(pid);
73 
74 	/* We should be in an RCU read side critical section, it should be safe
75 	 * to call pid_task.
76 	 */
77 	WARN_ON_ONCE(!rcu_read_lock_held());
78 	task = pid_task(pid, PIDTYPE_PID);
79 	if (!task) {
80 		err = -ENOENT;
81 		goto out;
82 	}
83 
84 	sdata = task_storage_lookup(task, map, true);
85 	put_pid(pid);
86 	return sdata ? sdata->data : NULL;
87 out:
88 	put_pid(pid);
89 	return ERR_PTR(err);
90 }
91 
92 static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
93 					     void *value, u64 map_flags)
94 {
95 	struct bpf_local_storage_data *sdata;
96 	struct task_struct *task;
97 	unsigned int f_flags;
98 	struct pid *pid;
99 	int fd, err;
100 
101 	if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
102 		return -EOPNOTSUPP;
103 
104 	fd = *(int *)key;
105 	pid = pidfd_get_pid(fd, &f_flags);
106 	if (IS_ERR(pid))
107 		return PTR_ERR(pid);
108 
109 	/* We should be in an RCU read side critical section, it should be safe
110 	 * to call pid_task.
111 	 */
112 	WARN_ON_ONCE(!rcu_read_lock_held());
113 	task = pid_task(pid, PIDTYPE_PID);
114 	if (!task) {
115 		err = -ENOENT;
116 		goto out;
117 	}
118 
119 	sdata = bpf_local_storage_update(
120 		task, (struct bpf_local_storage_map *)map, value, map_flags,
121 		true);
122 
123 	err = PTR_ERR_OR_ZERO(sdata);
124 out:
125 	put_pid(pid);
126 	return err;
127 }
128 
129 static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
130 {
131 	struct bpf_local_storage_data *sdata;
132 
133 	sdata = task_storage_lookup(task, map, false);
134 	if (!sdata)
135 		return -ENOENT;
136 
137 	return bpf_selem_unlink(SELEM(sdata));
138 }
139 
140 static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
141 {
142 	struct task_struct *task;
143 	unsigned int f_flags;
144 	struct pid *pid;
145 	int fd, err;
146 
147 	fd = *(int *)key;
148 	pid = pidfd_get_pid(fd, &f_flags);
149 	if (IS_ERR(pid))
150 		return PTR_ERR(pid);
151 
152 	/* We should be in an RCU read side critical section, it should be safe
153 	 * to call pid_task.
154 	 */
155 	WARN_ON_ONCE(!rcu_read_lock_held());
156 	task = pid_task(pid, PIDTYPE_PID);
157 	if (!task) {
158 		err = -ENOENT;
159 		goto out;
160 	}
161 
162 	err = task_storage_delete(task, map);
163 out:
164 	put_pid(pid);
165 	return err;
166 }
167 
168 BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
169 	   task, void *, value, u64, flags)
170 {
171 	struct bpf_local_storage_data *sdata;
172 
173 	WARN_ON_ONCE(!bpf_rcu_lock_held());
174 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
175 		return (unsigned long)NULL;
176 
177 	sdata = task_storage_lookup(task, map, true);
178 	if (sdata)
179 		return (unsigned long)sdata->data;
180 
181 	/* only allocate new storage, when the task is refcounted */
182 	if (refcount_read(&task->usage) &&
183 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
184 		sdata = bpf_local_storage_update(
185 			task, (struct bpf_local_storage_map *)map, value,
186 			BPF_NOEXIST, false);
187 		return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
188 	}
189 
190 	return (unsigned long)NULL;
191 }
192 
193 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
194 	   task)
195 {
196 	WARN_ON_ONCE(!bpf_rcu_lock_held());
197 	if (!task)
198 		return -EINVAL;
199 
200 	/* This helper must only be called from places where the lifetime of the task
201 	 * is guaranteed. Either by being refcounted or by being protected
202 	 * by an RCU read-side critical section.
203 	 */
204 	return task_storage_delete(task, map);
205 }
206 
207 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
208 {
209 	return -ENOTSUPP;
210 }
211 
212 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
213 {
214 	return bpf_local_storage_map_alloc(attr, &task_cache);
215 }
216 
217 static void task_storage_map_free(struct bpf_map *map)
218 {
219 	bpf_local_storage_map_free(map, &task_cache);
220 }
221 
222 BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
223 const struct bpf_map_ops task_storage_map_ops = {
224 	.map_meta_equal = bpf_map_meta_equal,
225 	.map_alloc_check = bpf_local_storage_map_alloc_check,
226 	.map_alloc = task_storage_map_alloc,
227 	.map_free = task_storage_map_free,
228 	.map_get_next_key = notsupp_get_next_key,
229 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
230 	.map_update_elem = bpf_pid_task_storage_update_elem,
231 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
232 	.map_check_btf = bpf_local_storage_map_check_btf,
233 	.map_mem_usage = bpf_local_storage_map_mem_usage,
234 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
235 	.map_owner_storage_ptr = task_storage_ptr,
236 };
237 
238 const struct bpf_func_proto bpf_task_storage_get_proto = {
239 	.func = bpf_task_storage_get,
240 	.gpl_only = false,
241 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
242 	.arg1_type = ARG_CONST_MAP_PTR,
243 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
244 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
245 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
246 	.arg4_type = ARG_ANYTHING,
247 };
248 
249 const struct bpf_func_proto bpf_task_storage_delete_proto = {
250 	.func = bpf_task_storage_delete,
251 	.gpl_only = false,
252 	.ret_type = RET_INTEGER,
253 	.arg1_type = ARG_CONST_MAP_PTR,
254 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
255 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
256 };
257