xref: /linux/kernel/bpf/syscall.c (revision a6923c06a3b2e2c534ae28c53a7531e76cc95cfa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/bpf_trace.h>
7 #include <linux/bpf_lirc.h>
8 #include <linux/bpf_verifier.h>
9 #include <linux/bsearch.h>
10 #include <linux/btf.h>
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmzone.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/fdtable.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/kernel.h>
23 #include <linux/idr.h>
24 #include <linux/cred.h>
25 #include <linux/timekeeping.h>
26 #include <linux/ctype.h>
27 #include <linux/nospec.h>
28 #include <linux/audit.h>
29 #include <uapi/linux/btf.h>
30 #include <linux/pgtable.h>
31 #include <linux/bpf_lsm.h>
32 #include <linux/poll.h>
33 #include <linux/sort.h>
34 #include <linux/bpf-netns.h>
35 #include <linux/rcupdate_trace.h>
36 #include <linux/memcontrol.h>
37 #include <linux/trace_events.h>
38 #include <linux/tracepoint.h>
39 #include <linux/overflow.h>
40 #include <linux/cookie.h>
41 
42 #include <net/netfilter/nf_bpf_link.h>
43 #include <net/netkit.h>
44 #include <net/tcx.h>
45 
46 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
47 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
48 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
49 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
50 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
51 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
52 			IS_FD_HASH(map))
53 
54 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 DEFINE_PER_CPU(int, bpf_prog_active);
57 DEFINE_COOKIE(bpf_map_cookie);
58 static DEFINE_IDR(prog_idr);
59 static DEFINE_SPINLOCK(prog_idr_lock);
60 static DEFINE_IDR(map_idr);
61 static DEFINE_SPINLOCK(map_idr_lock);
62 static DEFINE_IDR(link_idr);
63 static DEFINE_SPINLOCK(link_idr_lock);
64 
65 int sysctl_unprivileged_bpf_disabled __read_mostly =
66 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
67 
68 static const struct bpf_map_ops * const bpf_map_types[] = {
69 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
70 #define BPF_MAP_TYPE(_id, _ops) \
71 	[_id] = &_ops,
72 #define BPF_LINK_TYPE(_id, _name)
73 #include <linux/bpf_types.h>
74 #undef BPF_PROG_TYPE
75 #undef BPF_MAP_TYPE
76 #undef BPF_LINK_TYPE
77 };
78 
79 /*
80  * If we're handed a bigger struct than we know of, ensure all the unknown bits
81  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
82  * we don't know about yet.
83  *
84  * There is a ToCToU between this function call and the following
85  * copy_from_user() call. However, this is not a concern since this function is
86  * meant to be a future-proofing of bits.
87  */
bpf_check_uarg_tail_zero(bpfptr_t uaddr,size_t expected_size,size_t actual_size)88 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
89 			     size_t expected_size,
90 			     size_t actual_size)
91 {
92 	int res;
93 
94 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
95 		return -E2BIG;
96 
97 	if (actual_size <= expected_size)
98 		return 0;
99 
100 	if (uaddr.is_kernel)
101 		res = memchr_inv(uaddr.kernel + expected_size, 0,
102 				 actual_size - expected_size) == NULL;
103 	else
104 		res = check_zeroed_user(uaddr.user + expected_size,
105 					actual_size - expected_size);
106 	if (res < 0)
107 		return res;
108 	return res ? 0 : -E2BIG;
109 }
110 
111 const struct bpf_map_ops bpf_map_offload_ops = {
112 	.map_meta_equal = bpf_map_meta_equal,
113 	.map_alloc = bpf_map_offload_map_alloc,
114 	.map_free = bpf_map_offload_map_free,
115 	.map_check_btf = map_check_no_btf,
116 	.map_mem_usage = bpf_map_offload_map_mem_usage,
117 };
118 
bpf_map_write_active_inc(struct bpf_map * map)119 static void bpf_map_write_active_inc(struct bpf_map *map)
120 {
121 	atomic64_inc(&map->writecnt);
122 }
123 
bpf_map_write_active_dec(struct bpf_map * map)124 static void bpf_map_write_active_dec(struct bpf_map *map)
125 {
126 	atomic64_dec(&map->writecnt);
127 }
128 
bpf_map_write_active(const struct bpf_map * map)129 bool bpf_map_write_active(const struct bpf_map *map)
130 {
131 	return atomic64_read(&map->writecnt) != 0;
132 }
133 
bpf_map_value_size(const struct bpf_map * map)134 static u32 bpf_map_value_size(const struct bpf_map *map)
135 {
136 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
137 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
138 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
139 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
140 		return round_up(map->value_size, 8) * num_possible_cpus();
141 	else if (IS_FD_MAP(map))
142 		return sizeof(u32);
143 	else
144 		return  map->value_size;
145 }
146 
maybe_wait_bpf_programs(struct bpf_map * map)147 static void maybe_wait_bpf_programs(struct bpf_map *map)
148 {
149 	/* Wait for any running non-sleepable BPF programs to complete so that
150 	 * userspace, when we return to it, knows that all non-sleepable
151 	 * programs that could be running use the new map value. For sleepable
152 	 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
153 	 * for the completions of these programs, but considering the waiting
154 	 * time can be very long and userspace may think it will hang forever,
155 	 * so don't handle sleepable BPF programs now.
156 	 */
157 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
158 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
159 		synchronize_rcu();
160 }
161 
unpin_uptr_kaddr(void * kaddr)162 static void unpin_uptr_kaddr(void *kaddr)
163 {
164 	if (kaddr)
165 		unpin_user_page(virt_to_page(kaddr));
166 }
167 
__bpf_obj_unpin_uptrs(struct btf_record * rec,u32 cnt,void * obj)168 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj)
169 {
170 	const struct btf_field *field;
171 	void **uptr_addr;
172 	int i;
173 
174 	for (i = 0, field = rec->fields; i < cnt; i++, field++) {
175 		if (field->type != BPF_UPTR)
176 			continue;
177 
178 		uptr_addr = obj + field->offset;
179 		unpin_uptr_kaddr(*uptr_addr);
180 	}
181 }
182 
bpf_obj_unpin_uptrs(struct btf_record * rec,void * obj)183 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj)
184 {
185 	if (!btf_record_has_field(rec, BPF_UPTR))
186 		return;
187 
188 	__bpf_obj_unpin_uptrs(rec, rec->cnt, obj);
189 }
190 
bpf_obj_pin_uptrs(struct btf_record * rec,void * obj)191 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj)
192 {
193 	const struct btf_field *field;
194 	const struct btf_type *t;
195 	unsigned long start, end;
196 	struct page *page;
197 	void **uptr_addr;
198 	int i, err;
199 
200 	if (!btf_record_has_field(rec, BPF_UPTR))
201 		return 0;
202 
203 	for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
204 		if (field->type != BPF_UPTR)
205 			continue;
206 
207 		uptr_addr = obj + field->offset;
208 		start = *(unsigned long *)uptr_addr;
209 		if (!start)
210 			continue;
211 
212 		t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id);
213 		/* t->size was checked for zero before */
214 		if (check_add_overflow(start, t->size - 1, &end)) {
215 			err = -EFAULT;
216 			goto unpin_all;
217 		}
218 
219 		/* The uptr's struct cannot span across two pages */
220 		if ((start & PAGE_MASK) != (end & PAGE_MASK)) {
221 			err = -EOPNOTSUPP;
222 			goto unpin_all;
223 		}
224 
225 		err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page);
226 		if (err != 1)
227 			goto unpin_all;
228 
229 		if (PageHighMem(page)) {
230 			err = -EOPNOTSUPP;
231 			unpin_user_page(page);
232 			goto unpin_all;
233 		}
234 
235 		*uptr_addr = page_address(page) + offset_in_page(start);
236 	}
237 
238 	return 0;
239 
240 unpin_all:
241 	__bpf_obj_unpin_uptrs(rec, i, obj);
242 	return err;
243 }
244 
bpf_map_update_value(struct bpf_map * map,struct file * map_file,void * key,void * value,__u64 flags)245 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
246 				void *key, void *value, __u64 flags)
247 {
248 	int err;
249 
250 	/* Need to create a kthread, thus must support schedule */
251 	if (bpf_map_is_offloaded(map)) {
252 		return bpf_map_offload_update_elem(map, key, value, flags);
253 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
254 		   map->map_type == BPF_MAP_TYPE_ARENA ||
255 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
256 		return map->ops->map_update_elem(map, key, value, flags);
257 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
258 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
259 		return sock_map_update_elem_sys(map, key, value, flags);
260 	} else if (IS_FD_PROG_ARRAY(map)) {
261 		return bpf_fd_array_map_update_elem(map, map_file, key, value,
262 						    flags);
263 	}
264 
265 	bpf_disable_instrumentation();
266 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
267 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
268 		err = bpf_percpu_hash_update(map, key, value, flags);
269 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
270 		err = bpf_percpu_array_update(map, key, value, flags);
271 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
272 		err = bpf_percpu_cgroup_storage_update(map, key, value,
273 						       flags);
274 	} else if (IS_FD_ARRAY(map)) {
275 		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
276 						   flags);
277 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
278 		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
279 						  flags);
280 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
281 		/* rcu_read_lock() is not needed */
282 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
283 							 flags);
284 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
285 		   map->map_type == BPF_MAP_TYPE_STACK ||
286 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
287 		err = map->ops->map_push_elem(map, value, flags);
288 	} else {
289 		err = bpf_obj_pin_uptrs(map->record, value);
290 		if (!err) {
291 			rcu_read_lock();
292 			err = map->ops->map_update_elem(map, key, value, flags);
293 			rcu_read_unlock();
294 			if (err)
295 				bpf_obj_unpin_uptrs(map->record, value);
296 		}
297 	}
298 	bpf_enable_instrumentation();
299 
300 	return err;
301 }
302 
bpf_map_copy_value(struct bpf_map * map,void * key,void * value,__u64 flags)303 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
304 			      __u64 flags)
305 {
306 	void *ptr;
307 	int err;
308 
309 	if (bpf_map_is_offloaded(map))
310 		return bpf_map_offload_lookup_elem(map, key, value);
311 
312 	bpf_disable_instrumentation();
313 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
314 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
315 		err = bpf_percpu_hash_copy(map, key, value);
316 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
317 		err = bpf_percpu_array_copy(map, key, value);
318 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
319 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
320 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
321 		err = bpf_stackmap_copy(map, key, value);
322 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
323 		err = bpf_fd_array_map_lookup_elem(map, key, value);
324 	} else if (IS_FD_HASH(map)) {
325 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
326 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
327 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
328 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
329 		   map->map_type == BPF_MAP_TYPE_STACK ||
330 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
331 		err = map->ops->map_peek_elem(map, value);
332 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
333 		/* struct_ops map requires directly updating "value" */
334 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
335 	} else {
336 		rcu_read_lock();
337 		if (map->ops->map_lookup_elem_sys_only)
338 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
339 		else
340 			ptr = map->ops->map_lookup_elem(map, key);
341 		if (IS_ERR(ptr)) {
342 			err = PTR_ERR(ptr);
343 		} else if (!ptr) {
344 			err = -ENOENT;
345 		} else {
346 			err = 0;
347 			if (flags & BPF_F_LOCK)
348 				/* lock 'ptr' and copy everything but lock */
349 				copy_map_value_locked(map, value, ptr, true);
350 			else
351 				copy_map_value(map, value, ptr);
352 			/* mask lock and timer, since value wasn't zero inited */
353 			check_and_init_map_value(map, value);
354 		}
355 		rcu_read_unlock();
356 	}
357 
358 	bpf_enable_instrumentation();
359 
360 	return err;
361 }
362 
363 /* Please, do not use this function outside from the map creation path
364  * (e.g. in map update path) without taking care of setting the active
365  * memory cgroup (see at bpf_map_kmalloc_node() for example).
366  */
__bpf_map_area_alloc(u64 size,int numa_node,bool mmapable)367 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
368 {
369 	/* We really just want to fail instead of triggering OOM killer
370 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
371 	 * which is used for lower order allocation requests.
372 	 *
373 	 * It has been observed that higher order allocation requests done by
374 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
375 	 * to reclaim memory from the page cache, thus we set
376 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
377 	 */
378 
379 	gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
380 	unsigned int flags = 0;
381 	unsigned long align = 1;
382 	void *area;
383 
384 	if (size >= SIZE_MAX)
385 		return NULL;
386 
387 	/* kmalloc()'ed memory can't be mmap()'ed */
388 	if (mmapable) {
389 		BUG_ON(!PAGE_ALIGNED(size));
390 		align = SHMLBA;
391 		flags = VM_USERMAP;
392 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
393 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
394 				    numa_node);
395 		if (area != NULL)
396 			return area;
397 	}
398 
399 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
400 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
401 			flags, numa_node, __builtin_return_address(0));
402 }
403 
bpf_map_area_alloc(u64 size,int numa_node)404 void *bpf_map_area_alloc(u64 size, int numa_node)
405 {
406 	return __bpf_map_area_alloc(size, numa_node, false);
407 }
408 
bpf_map_area_mmapable_alloc(u64 size,int numa_node)409 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
410 {
411 	return __bpf_map_area_alloc(size, numa_node, true);
412 }
413 
bpf_map_area_free(void * area)414 void bpf_map_area_free(void *area)
415 {
416 	kvfree(area);
417 }
418 
bpf_map_flags_retain_permanent(u32 flags)419 static u32 bpf_map_flags_retain_permanent(u32 flags)
420 {
421 	/* Some map creation flags are not tied to the map object but
422 	 * rather to the map fd instead, so they have no meaning upon
423 	 * map object inspection since multiple file descriptors with
424 	 * different (access) properties can exist here. Thus, given
425 	 * this has zero meaning for the map itself, lets clear these
426 	 * from here.
427 	 */
428 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
429 }
430 
bpf_map_init_from_attr(struct bpf_map * map,union bpf_attr * attr)431 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
432 {
433 	map->map_type = attr->map_type;
434 	map->key_size = attr->key_size;
435 	map->value_size = attr->value_size;
436 	map->max_entries = attr->max_entries;
437 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
438 	map->numa_node = bpf_map_attr_numa_node(attr);
439 	map->map_extra = attr->map_extra;
440 }
441 
bpf_map_alloc_id(struct bpf_map * map)442 static int bpf_map_alloc_id(struct bpf_map *map)
443 {
444 	int id;
445 
446 	idr_preload(GFP_KERNEL);
447 	spin_lock_bh(&map_idr_lock);
448 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
449 	if (id > 0)
450 		map->id = id;
451 	spin_unlock_bh(&map_idr_lock);
452 	idr_preload_end();
453 
454 	if (WARN_ON_ONCE(!id))
455 		return -ENOSPC;
456 
457 	return id > 0 ? 0 : id;
458 }
459 
bpf_map_free_id(struct bpf_map * map)460 void bpf_map_free_id(struct bpf_map *map)
461 {
462 	unsigned long flags;
463 
464 	/* Offloaded maps are removed from the IDR store when their device
465 	 * disappears - even if someone holds an fd to them they are unusable,
466 	 * the memory is gone, all ops will fail; they are simply waiting for
467 	 * refcnt to drop to be freed.
468 	 */
469 	if (!map->id)
470 		return;
471 
472 	spin_lock_irqsave(&map_idr_lock, flags);
473 
474 	idr_remove(&map_idr, map->id);
475 	map->id = 0;
476 
477 	spin_unlock_irqrestore(&map_idr_lock, flags);
478 }
479 
480 #ifdef CONFIG_MEMCG
bpf_map_save_memcg(struct bpf_map * map)481 static void bpf_map_save_memcg(struct bpf_map *map)
482 {
483 	/* Currently if a map is created by a process belonging to the root
484 	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
485 	 * So we have to check map->objcg for being NULL each time it's
486 	 * being used.
487 	 */
488 	if (memcg_bpf_enabled())
489 		map->objcg = get_obj_cgroup_from_current();
490 }
491 
bpf_map_release_memcg(struct bpf_map * map)492 static void bpf_map_release_memcg(struct bpf_map *map)
493 {
494 	if (map->objcg)
495 		obj_cgroup_put(map->objcg);
496 }
497 
bpf_map_get_memcg(const struct bpf_map * map)498 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
499 {
500 	if (map->objcg)
501 		return get_mem_cgroup_from_objcg(map->objcg);
502 
503 	return root_mem_cgroup;
504 }
505 
bpf_map_kmalloc_node(const struct bpf_map * map,size_t size,gfp_t flags,int node)506 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
507 			   int node)
508 {
509 	struct mem_cgroup *memcg, *old_memcg;
510 	void *ptr;
511 
512 	memcg = bpf_map_get_memcg(map);
513 	old_memcg = set_active_memcg(memcg);
514 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
515 	set_active_memcg(old_memcg);
516 	mem_cgroup_put(memcg);
517 
518 	return ptr;
519 }
520 
bpf_map_kzalloc(const struct bpf_map * map,size_t size,gfp_t flags)521 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
522 {
523 	struct mem_cgroup *memcg, *old_memcg;
524 	void *ptr;
525 
526 	memcg = bpf_map_get_memcg(map);
527 	old_memcg = set_active_memcg(memcg);
528 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
529 	set_active_memcg(old_memcg);
530 	mem_cgroup_put(memcg);
531 
532 	return ptr;
533 }
534 
bpf_map_kvcalloc(struct bpf_map * map,size_t n,size_t size,gfp_t flags)535 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
536 		       gfp_t flags)
537 {
538 	struct mem_cgroup *memcg, *old_memcg;
539 	void *ptr;
540 
541 	memcg = bpf_map_get_memcg(map);
542 	old_memcg = set_active_memcg(memcg);
543 	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
544 	set_active_memcg(old_memcg);
545 	mem_cgroup_put(memcg);
546 
547 	return ptr;
548 }
549 
bpf_map_alloc_percpu(const struct bpf_map * map,size_t size,size_t align,gfp_t flags)550 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
551 				    size_t align, gfp_t flags)
552 {
553 	struct mem_cgroup *memcg, *old_memcg;
554 	void __percpu *ptr;
555 
556 	memcg = bpf_map_get_memcg(map);
557 	old_memcg = set_active_memcg(memcg);
558 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
559 	set_active_memcg(old_memcg);
560 	mem_cgroup_put(memcg);
561 
562 	return ptr;
563 }
564 
565 #else
bpf_map_save_memcg(struct bpf_map * map)566 static void bpf_map_save_memcg(struct bpf_map *map)
567 {
568 }
569 
bpf_map_release_memcg(struct bpf_map * map)570 static void bpf_map_release_memcg(struct bpf_map *map)
571 {
572 }
573 #endif
574 
can_alloc_pages(void)575 static bool can_alloc_pages(void)
576 {
577 	return preempt_count() == 0 && !irqs_disabled() &&
578 		!IS_ENABLED(CONFIG_PREEMPT_RT);
579 }
580 
__bpf_alloc_page(int nid)581 static struct page *__bpf_alloc_page(int nid)
582 {
583 	if (!can_alloc_pages())
584 		return alloc_pages_nolock(nid, 0);
585 
586 	return alloc_pages_node(nid,
587 				GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
588 				| __GFP_NOWARN,
589 				0);
590 }
591 
bpf_map_alloc_pages(const struct bpf_map * map,int nid,unsigned long nr_pages,struct page ** pages)592 int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
593 			unsigned long nr_pages, struct page **pages)
594 {
595 	unsigned long i, j;
596 	struct page *pg;
597 	int ret = 0;
598 #ifdef CONFIG_MEMCG
599 	struct mem_cgroup *memcg, *old_memcg;
600 
601 	memcg = bpf_map_get_memcg(map);
602 	old_memcg = set_active_memcg(memcg);
603 #endif
604 	for (i = 0; i < nr_pages; i++) {
605 		pg = __bpf_alloc_page(nid);
606 
607 		if (pg) {
608 			pages[i] = pg;
609 			continue;
610 		}
611 		for (j = 0; j < i; j++)
612 			free_pages_nolock(pages[j], 0);
613 		ret = -ENOMEM;
614 		break;
615 	}
616 
617 #ifdef CONFIG_MEMCG
618 	set_active_memcg(old_memcg);
619 	mem_cgroup_put(memcg);
620 #endif
621 	return ret;
622 }
623 
624 
btf_field_cmp(const void * a,const void * b)625 static int btf_field_cmp(const void *a, const void *b)
626 {
627 	const struct btf_field *f1 = a, *f2 = b;
628 
629 	if (f1->offset < f2->offset)
630 		return -1;
631 	else if (f1->offset > f2->offset)
632 		return 1;
633 	return 0;
634 }
635 
btf_record_find(const struct btf_record * rec,u32 offset,u32 field_mask)636 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
637 				  u32 field_mask)
638 {
639 	struct btf_field *field;
640 
641 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
642 		return NULL;
643 	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
644 	if (!field || !(field->type & field_mask))
645 		return NULL;
646 	return field;
647 }
648 
btf_record_free(struct btf_record * rec)649 void btf_record_free(struct btf_record *rec)
650 {
651 	int i;
652 
653 	if (IS_ERR_OR_NULL(rec))
654 		return;
655 	for (i = 0; i < rec->cnt; i++) {
656 		switch (rec->fields[i].type) {
657 		case BPF_KPTR_UNREF:
658 		case BPF_KPTR_REF:
659 		case BPF_KPTR_PERCPU:
660 		case BPF_UPTR:
661 			if (rec->fields[i].kptr.module)
662 				module_put(rec->fields[i].kptr.module);
663 			if (btf_is_kernel(rec->fields[i].kptr.btf))
664 				btf_put(rec->fields[i].kptr.btf);
665 			break;
666 		case BPF_LIST_HEAD:
667 		case BPF_LIST_NODE:
668 		case BPF_RB_ROOT:
669 		case BPF_RB_NODE:
670 		case BPF_SPIN_LOCK:
671 		case BPF_RES_SPIN_LOCK:
672 		case BPF_TIMER:
673 		case BPF_REFCOUNT:
674 		case BPF_WORKQUEUE:
675 			/* Nothing to release */
676 			break;
677 		default:
678 			WARN_ON_ONCE(1);
679 			continue;
680 		}
681 	}
682 	kfree(rec);
683 }
684 
bpf_map_free_record(struct bpf_map * map)685 void bpf_map_free_record(struct bpf_map *map)
686 {
687 	btf_record_free(map->record);
688 	map->record = NULL;
689 }
690 
btf_record_dup(const struct btf_record * rec)691 struct btf_record *btf_record_dup(const struct btf_record *rec)
692 {
693 	const struct btf_field *fields;
694 	struct btf_record *new_rec;
695 	int ret, size, i;
696 
697 	if (IS_ERR_OR_NULL(rec))
698 		return NULL;
699 	size = struct_size(rec, fields, rec->cnt);
700 	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
701 	if (!new_rec)
702 		return ERR_PTR(-ENOMEM);
703 	/* Do a deep copy of the btf_record */
704 	fields = rec->fields;
705 	new_rec->cnt = 0;
706 	for (i = 0; i < rec->cnt; i++) {
707 		switch (fields[i].type) {
708 		case BPF_KPTR_UNREF:
709 		case BPF_KPTR_REF:
710 		case BPF_KPTR_PERCPU:
711 		case BPF_UPTR:
712 			if (btf_is_kernel(fields[i].kptr.btf))
713 				btf_get(fields[i].kptr.btf);
714 			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
715 				ret = -ENXIO;
716 				goto free;
717 			}
718 			break;
719 		case BPF_LIST_HEAD:
720 		case BPF_LIST_NODE:
721 		case BPF_RB_ROOT:
722 		case BPF_RB_NODE:
723 		case BPF_SPIN_LOCK:
724 		case BPF_RES_SPIN_LOCK:
725 		case BPF_TIMER:
726 		case BPF_REFCOUNT:
727 		case BPF_WORKQUEUE:
728 			/* Nothing to acquire */
729 			break;
730 		default:
731 			ret = -EFAULT;
732 			WARN_ON_ONCE(1);
733 			goto free;
734 		}
735 		new_rec->cnt++;
736 	}
737 	return new_rec;
738 free:
739 	btf_record_free(new_rec);
740 	return ERR_PTR(ret);
741 }
742 
btf_record_equal(const struct btf_record * rec_a,const struct btf_record * rec_b)743 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
744 {
745 	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
746 	int size;
747 
748 	if (!a_has_fields && !b_has_fields)
749 		return true;
750 	if (a_has_fields != b_has_fields)
751 		return false;
752 	if (rec_a->cnt != rec_b->cnt)
753 		return false;
754 	size = struct_size(rec_a, fields, rec_a->cnt);
755 	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
756 	 * members are zeroed out. So memcmp is safe to do without worrying
757 	 * about padding/unused fields.
758 	 *
759 	 * While spin_lock, timer, and kptr have no relation to map BTF,
760 	 * list_head metadata is specific to map BTF, the btf and value_rec
761 	 * members in particular. btf is the map BTF, while value_rec points to
762 	 * btf_record in that map BTF.
763 	 *
764 	 * So while by default, we don't rely on the map BTF (which the records
765 	 * were parsed from) matching for both records, which is not backwards
766 	 * compatible, in case list_head is part of it, we implicitly rely on
767 	 * that by way of depending on memcmp succeeding for it.
768 	 */
769 	return !memcmp(rec_a, rec_b, size);
770 }
771 
bpf_obj_free_timer(const struct btf_record * rec,void * obj)772 void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
773 {
774 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
775 		return;
776 	bpf_timer_cancel_and_free(obj + rec->timer_off);
777 }
778 
bpf_obj_free_workqueue(const struct btf_record * rec,void * obj)779 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
780 {
781 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
782 		return;
783 	bpf_wq_cancel_and_free(obj + rec->wq_off);
784 }
785 
bpf_obj_free_fields(const struct btf_record * rec,void * obj)786 void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
787 {
788 	const struct btf_field *fields;
789 	int i;
790 
791 	if (IS_ERR_OR_NULL(rec))
792 		return;
793 	fields = rec->fields;
794 	for (i = 0; i < rec->cnt; i++) {
795 		struct btf_struct_meta *pointee_struct_meta;
796 		const struct btf_field *field = &fields[i];
797 		void *field_ptr = obj + field->offset;
798 		void *xchgd_field;
799 
800 		switch (fields[i].type) {
801 		case BPF_SPIN_LOCK:
802 		case BPF_RES_SPIN_LOCK:
803 			break;
804 		case BPF_TIMER:
805 			bpf_timer_cancel_and_free(field_ptr);
806 			break;
807 		case BPF_WORKQUEUE:
808 			bpf_wq_cancel_and_free(field_ptr);
809 			break;
810 		case BPF_KPTR_UNREF:
811 			WRITE_ONCE(*(u64 *)field_ptr, 0);
812 			break;
813 		case BPF_KPTR_REF:
814 		case BPF_KPTR_PERCPU:
815 			xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
816 			if (!xchgd_field)
817 				break;
818 
819 			if (!btf_is_kernel(field->kptr.btf)) {
820 				pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
821 									   field->kptr.btf_id);
822 				__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
823 								 pointee_struct_meta->record : NULL,
824 								 fields[i].type == BPF_KPTR_PERCPU);
825 			} else {
826 				field->kptr.dtor(xchgd_field);
827 			}
828 			break;
829 		case BPF_UPTR:
830 			/* The caller ensured that no one is using the uptr */
831 			unpin_uptr_kaddr(*(void **)field_ptr);
832 			break;
833 		case BPF_LIST_HEAD:
834 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
835 				continue;
836 			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
837 			break;
838 		case BPF_RB_ROOT:
839 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
840 				continue;
841 			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
842 			break;
843 		case BPF_LIST_NODE:
844 		case BPF_RB_NODE:
845 		case BPF_REFCOUNT:
846 			break;
847 		default:
848 			WARN_ON_ONCE(1);
849 			continue;
850 		}
851 	}
852 }
853 
bpf_map_free(struct bpf_map * map)854 static void bpf_map_free(struct bpf_map *map)
855 {
856 	struct btf_record *rec = map->record;
857 	struct btf *btf = map->btf;
858 
859 	/* implementation dependent freeing. Disabling migration to simplify
860 	 * the free of values or special fields allocated from bpf memory
861 	 * allocator.
862 	 */
863 	migrate_disable();
864 	map->ops->map_free(map);
865 	migrate_enable();
866 
867 	/* Delay freeing of btf_record for maps, as map_free
868 	 * callback usually needs access to them. It is better to do it here
869 	 * than require each callback to do the free itself manually.
870 	 *
871 	 * Note that the btf_record stashed in map->inner_map_meta->record was
872 	 * already freed using the map_free callback for map in map case which
873 	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
874 	 * template bpf_map struct used during verification.
875 	 */
876 	btf_record_free(rec);
877 	/* Delay freeing of btf for maps, as map_free callback may need
878 	 * struct_meta info which will be freed with btf_put().
879 	 */
880 	btf_put(btf);
881 }
882 
883 /* called from workqueue */
bpf_map_free_deferred(struct work_struct * work)884 static void bpf_map_free_deferred(struct work_struct *work)
885 {
886 	struct bpf_map *map = container_of(work, struct bpf_map, work);
887 
888 	security_bpf_map_free(map);
889 	bpf_map_release_memcg(map);
890 	bpf_map_owner_free(map);
891 	bpf_map_free(map);
892 }
893 
bpf_map_put_uref(struct bpf_map * map)894 static void bpf_map_put_uref(struct bpf_map *map)
895 {
896 	if (atomic64_dec_and_test(&map->usercnt)) {
897 		if (map->ops->map_release_uref)
898 			map->ops->map_release_uref(map);
899 	}
900 }
901 
bpf_map_free_in_work(struct bpf_map * map)902 static void bpf_map_free_in_work(struct bpf_map *map)
903 {
904 	INIT_WORK(&map->work, bpf_map_free_deferred);
905 	/* Avoid spawning kworkers, since they all might contend
906 	 * for the same mutex like slab_mutex.
907 	 */
908 	queue_work(system_unbound_wq, &map->work);
909 }
910 
bpf_map_free_rcu_gp(struct rcu_head * rcu)911 static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
912 {
913 	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
914 }
915 
bpf_map_free_mult_rcu_gp(struct rcu_head * rcu)916 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
917 {
918 	if (rcu_trace_implies_rcu_gp())
919 		bpf_map_free_rcu_gp(rcu);
920 	else
921 		call_rcu(rcu, bpf_map_free_rcu_gp);
922 }
923 
924 /* decrement map refcnt and schedule it for freeing via workqueue
925  * (underlying map implementation ops->map_free() might sleep)
926  */
bpf_map_put(struct bpf_map * map)927 void bpf_map_put(struct bpf_map *map)
928 {
929 	if (atomic64_dec_and_test(&map->refcnt)) {
930 		/* bpf_map_free_id() must be called first */
931 		bpf_map_free_id(map);
932 
933 		WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
934 		if (READ_ONCE(map->free_after_mult_rcu_gp))
935 			call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
936 		else if (READ_ONCE(map->free_after_rcu_gp))
937 			call_rcu(&map->rcu, bpf_map_free_rcu_gp);
938 		else
939 			bpf_map_free_in_work(map);
940 	}
941 }
942 EXPORT_SYMBOL_GPL(bpf_map_put);
943 
bpf_map_put_with_uref(struct bpf_map * map)944 void bpf_map_put_with_uref(struct bpf_map *map)
945 {
946 	bpf_map_put_uref(map);
947 	bpf_map_put(map);
948 }
949 
bpf_map_release(struct inode * inode,struct file * filp)950 static int bpf_map_release(struct inode *inode, struct file *filp)
951 {
952 	struct bpf_map *map = filp->private_data;
953 
954 	if (map->ops->map_release)
955 		map->ops->map_release(map, filp);
956 
957 	bpf_map_put_with_uref(map);
958 	return 0;
959 }
960 
map_get_sys_perms(struct bpf_map * map,struct fd f)961 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
962 {
963 	fmode_t mode = fd_file(f)->f_mode;
964 
965 	/* Our file permissions may have been overridden by global
966 	 * map permissions facing syscall side.
967 	 */
968 	if (READ_ONCE(map->frozen))
969 		mode &= ~FMODE_CAN_WRITE;
970 	return mode;
971 }
972 
973 #ifdef CONFIG_PROC_FS
974 /* Show the memory usage of a bpf map */
bpf_map_memory_usage(const struct bpf_map * map)975 static u64 bpf_map_memory_usage(const struct bpf_map *map)
976 {
977 	return map->ops->map_mem_usage(map);
978 }
979 
bpf_map_show_fdinfo(struct seq_file * m,struct file * filp)980 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
981 {
982 	struct bpf_map *map = filp->private_data;
983 	u32 type = 0, jited = 0;
984 
985 	spin_lock(&map->owner_lock);
986 	if (map->owner) {
987 		type  = map->owner->type;
988 		jited = map->owner->jited;
989 	}
990 	spin_unlock(&map->owner_lock);
991 
992 	seq_printf(m,
993 		   "map_type:\t%u\n"
994 		   "key_size:\t%u\n"
995 		   "value_size:\t%u\n"
996 		   "max_entries:\t%u\n"
997 		   "map_flags:\t%#x\n"
998 		   "map_extra:\t%#llx\n"
999 		   "memlock:\t%llu\n"
1000 		   "map_id:\t%u\n"
1001 		   "frozen:\t%u\n",
1002 		   map->map_type,
1003 		   map->key_size,
1004 		   map->value_size,
1005 		   map->max_entries,
1006 		   map->map_flags,
1007 		   (unsigned long long)map->map_extra,
1008 		   bpf_map_memory_usage(map),
1009 		   map->id,
1010 		   READ_ONCE(map->frozen));
1011 	if (type) {
1012 		seq_printf(m, "owner_prog_type:\t%u\n", type);
1013 		seq_printf(m, "owner_jited:\t%u\n", jited);
1014 	}
1015 }
1016 #endif
1017 
bpf_dummy_read(struct file * filp,char __user * buf,size_t siz,loff_t * ppos)1018 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
1019 			      loff_t *ppos)
1020 {
1021 	/* We need this handler such that alloc_file() enables
1022 	 * f_mode with FMODE_CAN_READ.
1023 	 */
1024 	return -EINVAL;
1025 }
1026 
bpf_dummy_write(struct file * filp,const char __user * buf,size_t siz,loff_t * ppos)1027 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
1028 			       size_t siz, loff_t *ppos)
1029 {
1030 	/* We need this handler such that alloc_file() enables
1031 	 * f_mode with FMODE_CAN_WRITE.
1032 	 */
1033 	return -EINVAL;
1034 }
1035 
1036 /* called for any extra memory-mapped regions (except initial) */
bpf_map_mmap_open(struct vm_area_struct * vma)1037 static void bpf_map_mmap_open(struct vm_area_struct *vma)
1038 {
1039 	struct bpf_map *map = vma->vm_file->private_data;
1040 
1041 	if (vma->vm_flags & VM_MAYWRITE)
1042 		bpf_map_write_active_inc(map);
1043 }
1044 
1045 /* called for all unmapped memory region (including initial) */
bpf_map_mmap_close(struct vm_area_struct * vma)1046 static void bpf_map_mmap_close(struct vm_area_struct *vma)
1047 {
1048 	struct bpf_map *map = vma->vm_file->private_data;
1049 
1050 	if (vma->vm_flags & VM_MAYWRITE)
1051 		bpf_map_write_active_dec(map);
1052 }
1053 
1054 static const struct vm_operations_struct bpf_map_default_vmops = {
1055 	.open		= bpf_map_mmap_open,
1056 	.close		= bpf_map_mmap_close,
1057 };
1058 
bpf_map_mmap(struct file * filp,struct vm_area_struct * vma)1059 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
1060 {
1061 	struct bpf_map *map = filp->private_data;
1062 	int err = 0;
1063 
1064 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
1065 		return -ENOTSUPP;
1066 
1067 	if (!(vma->vm_flags & VM_SHARED))
1068 		return -EINVAL;
1069 
1070 	mutex_lock(&map->freeze_mutex);
1071 
1072 	if (vma->vm_flags & VM_WRITE) {
1073 		if (map->frozen) {
1074 			err = -EPERM;
1075 			goto out;
1076 		}
1077 		/* map is meant to be read-only, so do not allow mapping as
1078 		 * writable, because it's possible to leak a writable page
1079 		 * reference and allows user-space to still modify it after
1080 		 * freezing, while verifier will assume contents do not change
1081 		 */
1082 		if (map->map_flags & BPF_F_RDONLY_PROG) {
1083 			err = -EACCES;
1084 			goto out;
1085 		}
1086 		bpf_map_write_active_inc(map);
1087 	}
1088 out:
1089 	mutex_unlock(&map->freeze_mutex);
1090 	if (err)
1091 		return err;
1092 
1093 	/* set default open/close callbacks */
1094 	vma->vm_ops = &bpf_map_default_vmops;
1095 	vma->vm_private_data = map;
1096 	vm_flags_clear(vma, VM_MAYEXEC);
1097 	/* If mapping is read-only, then disallow potentially re-mapping with
1098 	 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
1099 	 * means that as far as BPF map's memory-mapped VMAs are concerned,
1100 	 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
1101 	 * both should be set, so we can forget about VM_MAYWRITE and always
1102 	 * check just VM_WRITE
1103 	 */
1104 	if (!(vma->vm_flags & VM_WRITE))
1105 		vm_flags_clear(vma, VM_MAYWRITE);
1106 
1107 	err = map->ops->map_mmap(map, vma);
1108 	if (err) {
1109 		if (vma->vm_flags & VM_WRITE)
1110 			bpf_map_write_active_dec(map);
1111 	}
1112 
1113 	return err;
1114 }
1115 
bpf_map_poll(struct file * filp,struct poll_table_struct * pts)1116 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
1117 {
1118 	struct bpf_map *map = filp->private_data;
1119 
1120 	if (map->ops->map_poll)
1121 		return map->ops->map_poll(map, filp, pts);
1122 
1123 	return EPOLLERR;
1124 }
1125 
bpf_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1126 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
1127 					   unsigned long len, unsigned long pgoff,
1128 					   unsigned long flags)
1129 {
1130 	struct bpf_map *map = filp->private_data;
1131 
1132 	if (map->ops->map_get_unmapped_area)
1133 		return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
1134 #ifdef CONFIG_MMU
1135 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
1136 #else
1137 	return addr;
1138 #endif
1139 }
1140 
1141 const struct file_operations bpf_map_fops = {
1142 #ifdef CONFIG_PROC_FS
1143 	.show_fdinfo	= bpf_map_show_fdinfo,
1144 #endif
1145 	.release	= bpf_map_release,
1146 	.read		= bpf_dummy_read,
1147 	.write		= bpf_dummy_write,
1148 	.mmap		= bpf_map_mmap,
1149 	.poll		= bpf_map_poll,
1150 	.get_unmapped_area = bpf_get_unmapped_area,
1151 };
1152 
bpf_map_new_fd(struct bpf_map * map,int flags)1153 int bpf_map_new_fd(struct bpf_map *map, int flags)
1154 {
1155 	int ret;
1156 
1157 	ret = security_bpf_map(map, OPEN_FMODE(flags));
1158 	if (ret < 0)
1159 		return ret;
1160 
1161 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1162 				flags | O_CLOEXEC);
1163 }
1164 
bpf_get_file_flag(int flags)1165 int bpf_get_file_flag(int flags)
1166 {
1167 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1168 		return -EINVAL;
1169 	if (flags & BPF_F_RDONLY)
1170 		return O_RDONLY;
1171 	if (flags & BPF_F_WRONLY)
1172 		return O_WRONLY;
1173 	return O_RDWR;
1174 }
1175 
1176 /* helper macro to check that unused fields 'union bpf_attr' are zero */
1177 #define CHECK_ATTR(CMD) \
1178 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1179 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
1180 		   sizeof(*attr) - \
1181 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1182 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
1183 
1184 /* dst and src must have at least "size" number of bytes.
1185  * Return strlen on success and < 0 on error.
1186  */
bpf_obj_name_cpy(char * dst,const char * src,unsigned int size)1187 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1188 {
1189 	const char *end = src + size;
1190 	const char *orig_src = src;
1191 
1192 	memset(dst, 0, size);
1193 	/* Copy all isalnum(), '_' and '.' chars. */
1194 	while (src < end && *src) {
1195 		if (!isalnum(*src) &&
1196 		    *src != '_' && *src != '.')
1197 			return -EINVAL;
1198 		*dst++ = *src++;
1199 	}
1200 
1201 	/* No '\0' found in "size" number of bytes */
1202 	if (src == end)
1203 		return -EINVAL;
1204 
1205 	return src - orig_src;
1206 }
1207 
map_check_no_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)1208 int map_check_no_btf(const struct bpf_map *map,
1209 		     const struct btf *btf,
1210 		     const struct btf_type *key_type,
1211 		     const struct btf_type *value_type)
1212 {
1213 	return -ENOTSUPP;
1214 }
1215 
map_check_btf(struct bpf_map * map,struct bpf_token * token,const struct btf * btf,u32 btf_key_id,u32 btf_value_id)1216 static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1217 			 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1218 {
1219 	const struct btf_type *key_type, *value_type;
1220 	u32 key_size, value_size;
1221 	int ret = 0;
1222 
1223 	/* Some maps allow key to be unspecified. */
1224 	if (btf_key_id) {
1225 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1226 		if (!key_type || key_size != map->key_size)
1227 			return -EINVAL;
1228 	} else {
1229 		key_type = btf_type_by_id(btf, 0);
1230 		if (!map->ops->map_check_btf)
1231 			return -EINVAL;
1232 	}
1233 
1234 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1235 	if (!value_type || value_size != map->value_size)
1236 		return -EINVAL;
1237 
1238 	map->record = btf_parse_fields(btf, value_type,
1239 				       BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1240 				       BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
1241 				       map->value_size);
1242 	if (!IS_ERR_OR_NULL(map->record)) {
1243 		int i;
1244 
1245 		if (!bpf_token_capable(token, CAP_BPF)) {
1246 			ret = -EPERM;
1247 			goto free_map_tab;
1248 		}
1249 		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1250 			ret = -EACCES;
1251 			goto free_map_tab;
1252 		}
1253 		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1254 			switch (map->record->field_mask & (1 << i)) {
1255 			case 0:
1256 				continue;
1257 			case BPF_SPIN_LOCK:
1258 			case BPF_RES_SPIN_LOCK:
1259 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1260 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1261 				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1262 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1263 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1264 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1265 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1266 					ret = -EOPNOTSUPP;
1267 					goto free_map_tab;
1268 				}
1269 				break;
1270 			case BPF_TIMER:
1271 			case BPF_WORKQUEUE:
1272 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1273 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1274 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1275 					ret = -EOPNOTSUPP;
1276 					goto free_map_tab;
1277 				}
1278 				break;
1279 			case BPF_KPTR_UNREF:
1280 			case BPF_KPTR_REF:
1281 			case BPF_KPTR_PERCPU:
1282 			case BPF_REFCOUNT:
1283 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1284 				    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1285 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1286 				    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1287 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1288 				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1289 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1290 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1291 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1292 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1293 					ret = -EOPNOTSUPP;
1294 					goto free_map_tab;
1295 				}
1296 				break;
1297 			case BPF_UPTR:
1298 				if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
1299 					ret = -EOPNOTSUPP;
1300 					goto free_map_tab;
1301 				}
1302 				break;
1303 			case BPF_LIST_HEAD:
1304 			case BPF_RB_ROOT:
1305 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1306 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1307 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1308 					ret = -EOPNOTSUPP;
1309 					goto free_map_tab;
1310 				}
1311 				break;
1312 			default:
1313 				/* Fail if map_type checks are missing for a field type */
1314 				ret = -EOPNOTSUPP;
1315 				goto free_map_tab;
1316 			}
1317 		}
1318 	}
1319 
1320 	ret = btf_check_and_fixup_fields(btf, map->record);
1321 	if (ret < 0)
1322 		goto free_map_tab;
1323 
1324 	if (map->ops->map_check_btf) {
1325 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1326 		if (ret < 0)
1327 			goto free_map_tab;
1328 	}
1329 
1330 	return ret;
1331 free_map_tab:
1332 	bpf_map_free_record(map);
1333 	return ret;
1334 }
1335 
bpf_net_capable(void)1336 static bool bpf_net_capable(void)
1337 {
1338 	return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1339 }
1340 
1341 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1342 /* called via syscall */
map_create(union bpf_attr * attr,bool kernel)1343 static int map_create(union bpf_attr *attr, bool kernel)
1344 {
1345 	const struct bpf_map_ops *ops;
1346 	struct bpf_token *token = NULL;
1347 	int numa_node = bpf_map_attr_numa_node(attr);
1348 	u32 map_type = attr->map_type;
1349 	struct bpf_map *map;
1350 	bool token_flag;
1351 	int f_flags;
1352 	int err;
1353 
1354 	err = CHECK_ATTR(BPF_MAP_CREATE);
1355 	if (err)
1356 		return -EINVAL;
1357 
1358 	/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1359 	 * to avoid per-map type checks tripping on unknown flag
1360 	 */
1361 	token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1362 	attr->map_flags &= ~BPF_F_TOKEN_FD;
1363 
1364 	if (attr->btf_vmlinux_value_type_id) {
1365 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1366 		    attr->btf_key_type_id || attr->btf_value_type_id)
1367 			return -EINVAL;
1368 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1369 		return -EINVAL;
1370 	}
1371 
1372 	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1373 	    attr->map_type != BPF_MAP_TYPE_ARENA &&
1374 	    attr->map_extra != 0)
1375 		return -EINVAL;
1376 
1377 	f_flags = bpf_get_file_flag(attr->map_flags);
1378 	if (f_flags < 0)
1379 		return f_flags;
1380 
1381 	if (numa_node != NUMA_NO_NODE &&
1382 	    ((unsigned int)numa_node >= nr_node_ids ||
1383 	     !node_online(numa_node)))
1384 		return -EINVAL;
1385 
1386 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1387 	map_type = attr->map_type;
1388 	if (map_type >= ARRAY_SIZE(bpf_map_types))
1389 		return -EINVAL;
1390 	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1391 	ops = bpf_map_types[map_type];
1392 	if (!ops)
1393 		return -EINVAL;
1394 
1395 	if (ops->map_alloc_check) {
1396 		err = ops->map_alloc_check(attr);
1397 		if (err)
1398 			return err;
1399 	}
1400 	if (attr->map_ifindex)
1401 		ops = &bpf_map_offload_ops;
1402 	if (!ops->map_mem_usage)
1403 		return -EINVAL;
1404 
1405 	if (token_flag) {
1406 		token = bpf_token_get_from_fd(attr->map_token_fd);
1407 		if (IS_ERR(token))
1408 			return PTR_ERR(token);
1409 
1410 		/* if current token doesn't grant map creation permissions,
1411 		 * then we can't use this token, so ignore it and rely on
1412 		 * system-wide capabilities checks
1413 		 */
1414 		if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1415 		    !bpf_token_allow_map_type(token, attr->map_type)) {
1416 			bpf_token_put(token);
1417 			token = NULL;
1418 		}
1419 	}
1420 
1421 	err = -EPERM;
1422 
1423 	/* Intent here is for unprivileged_bpf_disabled to block BPF map
1424 	 * creation for unprivileged users; other actions depend
1425 	 * on fd availability and access to bpffs, so are dependent on
1426 	 * object creation success. Even with unprivileged BPF disabled,
1427 	 * capability checks are still carried out.
1428 	 */
1429 	if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1430 		goto put_token;
1431 
1432 	/* check privileged map type permissions */
1433 	switch (map_type) {
1434 	case BPF_MAP_TYPE_ARRAY:
1435 	case BPF_MAP_TYPE_PERCPU_ARRAY:
1436 	case BPF_MAP_TYPE_PROG_ARRAY:
1437 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1438 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1439 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1440 	case BPF_MAP_TYPE_HASH:
1441 	case BPF_MAP_TYPE_PERCPU_HASH:
1442 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1443 	case BPF_MAP_TYPE_RINGBUF:
1444 	case BPF_MAP_TYPE_USER_RINGBUF:
1445 	case BPF_MAP_TYPE_CGROUP_STORAGE:
1446 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1447 		/* unprivileged */
1448 		break;
1449 	case BPF_MAP_TYPE_SK_STORAGE:
1450 	case BPF_MAP_TYPE_INODE_STORAGE:
1451 	case BPF_MAP_TYPE_TASK_STORAGE:
1452 	case BPF_MAP_TYPE_CGRP_STORAGE:
1453 	case BPF_MAP_TYPE_BLOOM_FILTER:
1454 	case BPF_MAP_TYPE_LPM_TRIE:
1455 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1456 	case BPF_MAP_TYPE_STACK_TRACE:
1457 	case BPF_MAP_TYPE_QUEUE:
1458 	case BPF_MAP_TYPE_STACK:
1459 	case BPF_MAP_TYPE_LRU_HASH:
1460 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1461 	case BPF_MAP_TYPE_STRUCT_OPS:
1462 	case BPF_MAP_TYPE_CPUMAP:
1463 	case BPF_MAP_TYPE_ARENA:
1464 		if (!bpf_token_capable(token, CAP_BPF))
1465 			goto put_token;
1466 		break;
1467 	case BPF_MAP_TYPE_SOCKMAP:
1468 	case BPF_MAP_TYPE_SOCKHASH:
1469 	case BPF_MAP_TYPE_DEVMAP:
1470 	case BPF_MAP_TYPE_DEVMAP_HASH:
1471 	case BPF_MAP_TYPE_XSKMAP:
1472 		if (!bpf_token_capable(token, CAP_NET_ADMIN))
1473 			goto put_token;
1474 		break;
1475 	default:
1476 		WARN(1, "unsupported map type %d", map_type);
1477 		goto put_token;
1478 	}
1479 
1480 	map = ops->map_alloc(attr);
1481 	if (IS_ERR(map)) {
1482 		err = PTR_ERR(map);
1483 		goto put_token;
1484 	}
1485 	map->ops = ops;
1486 	map->map_type = map_type;
1487 
1488 	err = bpf_obj_name_cpy(map->name, attr->map_name,
1489 			       sizeof(attr->map_name));
1490 	if (err < 0)
1491 		goto free_map;
1492 
1493 	preempt_disable();
1494 	map->cookie = gen_cookie_next(&bpf_map_cookie);
1495 	preempt_enable();
1496 
1497 	atomic64_set(&map->refcnt, 1);
1498 	atomic64_set(&map->usercnt, 1);
1499 	mutex_init(&map->freeze_mutex);
1500 	spin_lock_init(&map->owner_lock);
1501 
1502 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1503 	    /* Even the map's value is a kernel's struct,
1504 	     * the bpf_prog.o must have BTF to begin with
1505 	     * to figure out the corresponding kernel's
1506 	     * counter part.  Thus, attr->btf_fd has
1507 	     * to be valid also.
1508 	     */
1509 	    attr->btf_vmlinux_value_type_id) {
1510 		struct btf *btf;
1511 
1512 		btf = btf_get_by_fd(attr->btf_fd);
1513 		if (IS_ERR(btf)) {
1514 			err = PTR_ERR(btf);
1515 			goto free_map;
1516 		}
1517 		if (btf_is_kernel(btf)) {
1518 			btf_put(btf);
1519 			err = -EACCES;
1520 			goto free_map;
1521 		}
1522 		map->btf = btf;
1523 
1524 		if (attr->btf_value_type_id) {
1525 			err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1526 					    attr->btf_value_type_id);
1527 			if (err)
1528 				goto free_map;
1529 		}
1530 
1531 		map->btf_key_type_id = attr->btf_key_type_id;
1532 		map->btf_value_type_id = attr->btf_value_type_id;
1533 		map->btf_vmlinux_value_type_id =
1534 			attr->btf_vmlinux_value_type_id;
1535 	}
1536 
1537 	err = security_bpf_map_create(map, attr, token, kernel);
1538 	if (err)
1539 		goto free_map_sec;
1540 
1541 	err = bpf_map_alloc_id(map);
1542 	if (err)
1543 		goto free_map_sec;
1544 
1545 	bpf_map_save_memcg(map);
1546 	bpf_token_put(token);
1547 
1548 	err = bpf_map_new_fd(map, f_flags);
1549 	if (err < 0) {
1550 		/* failed to allocate fd.
1551 		 * bpf_map_put_with_uref() is needed because the above
1552 		 * bpf_map_alloc_id() has published the map
1553 		 * to the userspace and the userspace may
1554 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1555 		 */
1556 		bpf_map_put_with_uref(map);
1557 		return err;
1558 	}
1559 
1560 	return err;
1561 
1562 free_map_sec:
1563 	security_bpf_map_free(map);
1564 free_map:
1565 	bpf_map_free(map);
1566 put_token:
1567 	bpf_token_put(token);
1568 	return err;
1569 }
1570 
bpf_map_inc(struct bpf_map * map)1571 void bpf_map_inc(struct bpf_map *map)
1572 {
1573 	atomic64_inc(&map->refcnt);
1574 }
1575 EXPORT_SYMBOL_GPL(bpf_map_inc);
1576 
bpf_map_inc_with_uref(struct bpf_map * map)1577 void bpf_map_inc_with_uref(struct bpf_map *map)
1578 {
1579 	atomic64_inc(&map->refcnt);
1580 	atomic64_inc(&map->usercnt);
1581 }
1582 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1583 
bpf_map_get(u32 ufd)1584 struct bpf_map *bpf_map_get(u32 ufd)
1585 {
1586 	CLASS(fd, f)(ufd);
1587 	struct bpf_map *map = __bpf_map_get(f);
1588 
1589 	if (!IS_ERR(map))
1590 		bpf_map_inc(map);
1591 
1592 	return map;
1593 }
1594 EXPORT_SYMBOL_NS(bpf_map_get, "BPF_INTERNAL");
1595 
bpf_map_get_with_uref(u32 ufd)1596 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1597 {
1598 	CLASS(fd, f)(ufd);
1599 	struct bpf_map *map = __bpf_map_get(f);
1600 
1601 	if (!IS_ERR(map))
1602 		bpf_map_inc_with_uref(map);
1603 
1604 	return map;
1605 }
1606 
1607 /* map_idr_lock should have been held or the map should have been
1608  * protected by rcu read lock.
1609  */
__bpf_map_inc_not_zero(struct bpf_map * map,bool uref)1610 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1611 {
1612 	int refold;
1613 
1614 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1615 	if (!refold)
1616 		return ERR_PTR(-ENOENT);
1617 	if (uref)
1618 		atomic64_inc(&map->usercnt);
1619 
1620 	return map;
1621 }
1622 
bpf_map_inc_not_zero(struct bpf_map * map)1623 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1624 {
1625 	lockdep_assert(rcu_read_lock_held());
1626 	return __bpf_map_inc_not_zero(map, false);
1627 }
1628 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1629 
bpf_stackmap_copy(struct bpf_map * map,void * key,void * value)1630 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1631 {
1632 	return -ENOTSUPP;
1633 }
1634 
__bpf_copy_key(void __user * ukey,u64 key_size)1635 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1636 {
1637 	if (key_size)
1638 		return vmemdup_user(ukey, key_size);
1639 
1640 	if (ukey)
1641 		return ERR_PTR(-EINVAL);
1642 
1643 	return NULL;
1644 }
1645 
___bpf_copy_key(bpfptr_t ukey,u64 key_size)1646 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1647 {
1648 	if (key_size)
1649 		return kvmemdup_bpfptr(ukey, key_size);
1650 
1651 	if (!bpfptr_is_null(ukey))
1652 		return ERR_PTR(-EINVAL);
1653 
1654 	return NULL;
1655 }
1656 
1657 /* last field in 'union bpf_attr' used by this command */
1658 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1659 
map_lookup_elem(union bpf_attr * attr)1660 static int map_lookup_elem(union bpf_attr *attr)
1661 {
1662 	void __user *ukey = u64_to_user_ptr(attr->key);
1663 	void __user *uvalue = u64_to_user_ptr(attr->value);
1664 	struct bpf_map *map;
1665 	void *key, *value;
1666 	u32 value_size;
1667 	int err;
1668 
1669 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1670 		return -EINVAL;
1671 
1672 	if (attr->flags & ~BPF_F_LOCK)
1673 		return -EINVAL;
1674 
1675 	CLASS(fd, f)(attr->map_fd);
1676 	map = __bpf_map_get(f);
1677 	if (IS_ERR(map))
1678 		return PTR_ERR(map);
1679 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1680 		return -EPERM;
1681 
1682 	if ((attr->flags & BPF_F_LOCK) &&
1683 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1684 		return -EINVAL;
1685 
1686 	key = __bpf_copy_key(ukey, map->key_size);
1687 	if (IS_ERR(key))
1688 		return PTR_ERR(key);
1689 
1690 	value_size = bpf_map_value_size(map);
1691 
1692 	err = -ENOMEM;
1693 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1694 	if (!value)
1695 		goto free_key;
1696 
1697 	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1698 		if (copy_from_user(value, uvalue, value_size))
1699 			err = -EFAULT;
1700 		else
1701 			err = bpf_map_copy_value(map, key, value, attr->flags);
1702 		goto free_value;
1703 	}
1704 
1705 	err = bpf_map_copy_value(map, key, value, attr->flags);
1706 	if (err)
1707 		goto free_value;
1708 
1709 	err = -EFAULT;
1710 	if (copy_to_user(uvalue, value, value_size) != 0)
1711 		goto free_value;
1712 
1713 	err = 0;
1714 
1715 free_value:
1716 	kvfree(value);
1717 free_key:
1718 	kvfree(key);
1719 	return err;
1720 }
1721 
1722 
1723 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1724 
map_update_elem(union bpf_attr * attr,bpfptr_t uattr)1725 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1726 {
1727 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1728 	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1729 	struct bpf_map *map;
1730 	void *key, *value;
1731 	u32 value_size;
1732 	int err;
1733 
1734 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1735 		return -EINVAL;
1736 
1737 	CLASS(fd, f)(attr->map_fd);
1738 	map = __bpf_map_get(f);
1739 	if (IS_ERR(map))
1740 		return PTR_ERR(map);
1741 	bpf_map_write_active_inc(map);
1742 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1743 		err = -EPERM;
1744 		goto err_put;
1745 	}
1746 
1747 	if ((attr->flags & BPF_F_LOCK) &&
1748 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1749 		err = -EINVAL;
1750 		goto err_put;
1751 	}
1752 
1753 	key = ___bpf_copy_key(ukey, map->key_size);
1754 	if (IS_ERR(key)) {
1755 		err = PTR_ERR(key);
1756 		goto err_put;
1757 	}
1758 
1759 	value_size = bpf_map_value_size(map);
1760 	value = kvmemdup_bpfptr(uvalue, value_size);
1761 	if (IS_ERR(value)) {
1762 		err = PTR_ERR(value);
1763 		goto free_key;
1764 	}
1765 
1766 	err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
1767 	if (!err)
1768 		maybe_wait_bpf_programs(map);
1769 
1770 	kvfree(value);
1771 free_key:
1772 	kvfree(key);
1773 err_put:
1774 	bpf_map_write_active_dec(map);
1775 	return err;
1776 }
1777 
1778 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1779 
map_delete_elem(union bpf_attr * attr,bpfptr_t uattr)1780 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1781 {
1782 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1783 	struct bpf_map *map;
1784 	void *key;
1785 	int err;
1786 
1787 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1788 		return -EINVAL;
1789 
1790 	CLASS(fd, f)(attr->map_fd);
1791 	map = __bpf_map_get(f);
1792 	if (IS_ERR(map))
1793 		return PTR_ERR(map);
1794 	bpf_map_write_active_inc(map);
1795 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1796 		err = -EPERM;
1797 		goto err_put;
1798 	}
1799 
1800 	key = ___bpf_copy_key(ukey, map->key_size);
1801 	if (IS_ERR(key)) {
1802 		err = PTR_ERR(key);
1803 		goto err_put;
1804 	}
1805 
1806 	if (bpf_map_is_offloaded(map)) {
1807 		err = bpf_map_offload_delete_elem(map, key);
1808 		goto out;
1809 	} else if (IS_FD_PROG_ARRAY(map) ||
1810 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1811 		/* These maps require sleepable context */
1812 		err = map->ops->map_delete_elem(map, key);
1813 		goto out;
1814 	}
1815 
1816 	bpf_disable_instrumentation();
1817 	rcu_read_lock();
1818 	err = map->ops->map_delete_elem(map, key);
1819 	rcu_read_unlock();
1820 	bpf_enable_instrumentation();
1821 	if (!err)
1822 		maybe_wait_bpf_programs(map);
1823 out:
1824 	kvfree(key);
1825 err_put:
1826 	bpf_map_write_active_dec(map);
1827 	return err;
1828 }
1829 
1830 /* last field in 'union bpf_attr' used by this command */
1831 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1832 
map_get_next_key(union bpf_attr * attr)1833 static int map_get_next_key(union bpf_attr *attr)
1834 {
1835 	void __user *ukey = u64_to_user_ptr(attr->key);
1836 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1837 	struct bpf_map *map;
1838 	void *key, *next_key;
1839 	int err;
1840 
1841 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1842 		return -EINVAL;
1843 
1844 	CLASS(fd, f)(attr->map_fd);
1845 	map = __bpf_map_get(f);
1846 	if (IS_ERR(map))
1847 		return PTR_ERR(map);
1848 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1849 		return -EPERM;
1850 
1851 	if (ukey) {
1852 		key = __bpf_copy_key(ukey, map->key_size);
1853 		if (IS_ERR(key))
1854 			return PTR_ERR(key);
1855 	} else {
1856 		key = NULL;
1857 	}
1858 
1859 	err = -ENOMEM;
1860 	next_key = kvmalloc(map->key_size, GFP_USER);
1861 	if (!next_key)
1862 		goto free_key;
1863 
1864 	if (bpf_map_is_offloaded(map)) {
1865 		err = bpf_map_offload_get_next_key(map, key, next_key);
1866 		goto out;
1867 	}
1868 
1869 	rcu_read_lock();
1870 	err = map->ops->map_get_next_key(map, key, next_key);
1871 	rcu_read_unlock();
1872 out:
1873 	if (err)
1874 		goto free_next_key;
1875 
1876 	err = -EFAULT;
1877 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1878 		goto free_next_key;
1879 
1880 	err = 0;
1881 
1882 free_next_key:
1883 	kvfree(next_key);
1884 free_key:
1885 	kvfree(key);
1886 	return err;
1887 }
1888 
generic_map_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1889 int generic_map_delete_batch(struct bpf_map *map,
1890 			     const union bpf_attr *attr,
1891 			     union bpf_attr __user *uattr)
1892 {
1893 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1894 	u32 cp, max_count;
1895 	int err = 0;
1896 	void *key;
1897 
1898 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1899 		return -EINVAL;
1900 
1901 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1902 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1903 		return -EINVAL;
1904 	}
1905 
1906 	max_count = attr->batch.count;
1907 	if (!max_count)
1908 		return 0;
1909 
1910 	if (put_user(0, &uattr->batch.count))
1911 		return -EFAULT;
1912 
1913 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1914 	if (!key)
1915 		return -ENOMEM;
1916 
1917 	for (cp = 0; cp < max_count; cp++) {
1918 		err = -EFAULT;
1919 		if (copy_from_user(key, keys + cp * map->key_size,
1920 				   map->key_size))
1921 			break;
1922 
1923 		if (bpf_map_is_offloaded(map)) {
1924 			err = bpf_map_offload_delete_elem(map, key);
1925 			break;
1926 		}
1927 
1928 		bpf_disable_instrumentation();
1929 		rcu_read_lock();
1930 		err = map->ops->map_delete_elem(map, key);
1931 		rcu_read_unlock();
1932 		bpf_enable_instrumentation();
1933 		if (err)
1934 			break;
1935 		cond_resched();
1936 	}
1937 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1938 		err = -EFAULT;
1939 
1940 	kvfree(key);
1941 
1942 	return err;
1943 }
1944 
generic_map_update_batch(struct bpf_map * map,struct file * map_file,const union bpf_attr * attr,union bpf_attr __user * uattr)1945 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1946 			     const union bpf_attr *attr,
1947 			     union bpf_attr __user *uattr)
1948 {
1949 	void __user *values = u64_to_user_ptr(attr->batch.values);
1950 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1951 	u32 value_size, cp, max_count;
1952 	void *key, *value;
1953 	int err = 0;
1954 
1955 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1956 		return -EINVAL;
1957 
1958 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1959 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1960 		return -EINVAL;
1961 	}
1962 
1963 	value_size = bpf_map_value_size(map);
1964 
1965 	max_count = attr->batch.count;
1966 	if (!max_count)
1967 		return 0;
1968 
1969 	if (put_user(0, &uattr->batch.count))
1970 		return -EFAULT;
1971 
1972 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1973 	if (!key)
1974 		return -ENOMEM;
1975 
1976 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1977 	if (!value) {
1978 		kvfree(key);
1979 		return -ENOMEM;
1980 	}
1981 
1982 	for (cp = 0; cp < max_count; cp++) {
1983 		err = -EFAULT;
1984 		if (copy_from_user(key, keys + cp * map->key_size,
1985 		    map->key_size) ||
1986 		    copy_from_user(value, values + cp * value_size, value_size))
1987 			break;
1988 
1989 		err = bpf_map_update_value(map, map_file, key, value,
1990 					   attr->batch.elem_flags);
1991 
1992 		if (err)
1993 			break;
1994 		cond_resched();
1995 	}
1996 
1997 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1998 		err = -EFAULT;
1999 
2000 	kvfree(value);
2001 	kvfree(key);
2002 
2003 	return err;
2004 }
2005 
generic_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)2006 int generic_map_lookup_batch(struct bpf_map *map,
2007 				    const union bpf_attr *attr,
2008 				    union bpf_attr __user *uattr)
2009 {
2010 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
2011 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
2012 	void __user *values = u64_to_user_ptr(attr->batch.values);
2013 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
2014 	void *buf, *buf_prevkey, *prev_key, *key, *value;
2015 	u32 value_size, cp, max_count;
2016 	int err;
2017 
2018 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
2019 		return -EINVAL;
2020 
2021 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
2022 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
2023 		return -EINVAL;
2024 
2025 	value_size = bpf_map_value_size(map);
2026 
2027 	max_count = attr->batch.count;
2028 	if (!max_count)
2029 		return 0;
2030 
2031 	if (put_user(0, &uattr->batch.count))
2032 		return -EFAULT;
2033 
2034 	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
2035 	if (!buf_prevkey)
2036 		return -ENOMEM;
2037 
2038 	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
2039 	if (!buf) {
2040 		kvfree(buf_prevkey);
2041 		return -ENOMEM;
2042 	}
2043 
2044 	err = -EFAULT;
2045 	prev_key = NULL;
2046 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
2047 		goto free_buf;
2048 	key = buf;
2049 	value = key + map->key_size;
2050 	if (ubatch)
2051 		prev_key = buf_prevkey;
2052 
2053 	for (cp = 0; cp < max_count;) {
2054 		rcu_read_lock();
2055 		err = map->ops->map_get_next_key(map, prev_key, key);
2056 		rcu_read_unlock();
2057 		if (err)
2058 			break;
2059 		err = bpf_map_copy_value(map, key, value,
2060 					 attr->batch.elem_flags);
2061 
2062 		if (err == -ENOENT)
2063 			goto next_key;
2064 
2065 		if (err)
2066 			goto free_buf;
2067 
2068 		if (copy_to_user(keys + cp * map->key_size, key,
2069 				 map->key_size)) {
2070 			err = -EFAULT;
2071 			goto free_buf;
2072 		}
2073 		if (copy_to_user(values + cp * value_size, value, value_size)) {
2074 			err = -EFAULT;
2075 			goto free_buf;
2076 		}
2077 
2078 		cp++;
2079 next_key:
2080 		if (!prev_key)
2081 			prev_key = buf_prevkey;
2082 
2083 		swap(prev_key, key);
2084 		cond_resched();
2085 	}
2086 
2087 	if (err == -EFAULT)
2088 		goto free_buf;
2089 
2090 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
2091 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
2092 		err = -EFAULT;
2093 
2094 free_buf:
2095 	kvfree(buf_prevkey);
2096 	kvfree(buf);
2097 	return err;
2098 }
2099 
2100 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
2101 
map_lookup_and_delete_elem(union bpf_attr * attr)2102 static int map_lookup_and_delete_elem(union bpf_attr *attr)
2103 {
2104 	void __user *ukey = u64_to_user_ptr(attr->key);
2105 	void __user *uvalue = u64_to_user_ptr(attr->value);
2106 	struct bpf_map *map;
2107 	void *key, *value;
2108 	u32 value_size;
2109 	int err;
2110 
2111 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2112 		return -EINVAL;
2113 
2114 	if (attr->flags & ~BPF_F_LOCK)
2115 		return -EINVAL;
2116 
2117 	CLASS(fd, f)(attr->map_fd);
2118 	map = __bpf_map_get(f);
2119 	if (IS_ERR(map))
2120 		return PTR_ERR(map);
2121 	bpf_map_write_active_inc(map);
2122 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2123 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2124 		err = -EPERM;
2125 		goto err_put;
2126 	}
2127 
2128 	if (attr->flags &&
2129 	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
2130 	     map->map_type == BPF_MAP_TYPE_STACK)) {
2131 		err = -EINVAL;
2132 		goto err_put;
2133 	}
2134 
2135 	if ((attr->flags & BPF_F_LOCK) &&
2136 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2137 		err = -EINVAL;
2138 		goto err_put;
2139 	}
2140 
2141 	key = __bpf_copy_key(ukey, map->key_size);
2142 	if (IS_ERR(key)) {
2143 		err = PTR_ERR(key);
2144 		goto err_put;
2145 	}
2146 
2147 	value_size = bpf_map_value_size(map);
2148 
2149 	err = -ENOMEM;
2150 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2151 	if (!value)
2152 		goto free_key;
2153 
2154 	err = -ENOTSUPP;
2155 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2156 	    map->map_type == BPF_MAP_TYPE_STACK) {
2157 		err = map->ops->map_pop_elem(map, value);
2158 	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
2159 		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2160 		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2161 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2162 		if (!bpf_map_is_offloaded(map)) {
2163 			bpf_disable_instrumentation();
2164 			rcu_read_lock();
2165 			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2166 			rcu_read_unlock();
2167 			bpf_enable_instrumentation();
2168 		}
2169 	}
2170 
2171 	if (err)
2172 		goto free_value;
2173 
2174 	if (copy_to_user(uvalue, value, value_size) != 0) {
2175 		err = -EFAULT;
2176 		goto free_value;
2177 	}
2178 
2179 	err = 0;
2180 
2181 free_value:
2182 	kvfree(value);
2183 free_key:
2184 	kvfree(key);
2185 err_put:
2186 	bpf_map_write_active_dec(map);
2187 	return err;
2188 }
2189 
2190 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
2191 
map_freeze(const union bpf_attr * attr)2192 static int map_freeze(const union bpf_attr *attr)
2193 {
2194 	int err = 0;
2195 	struct bpf_map *map;
2196 
2197 	if (CHECK_ATTR(BPF_MAP_FREEZE))
2198 		return -EINVAL;
2199 
2200 	CLASS(fd, f)(attr->map_fd);
2201 	map = __bpf_map_get(f);
2202 	if (IS_ERR(map))
2203 		return PTR_ERR(map);
2204 
2205 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
2206 		return -ENOTSUPP;
2207 
2208 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
2209 		return -EPERM;
2210 
2211 	mutex_lock(&map->freeze_mutex);
2212 	if (bpf_map_write_active(map)) {
2213 		err = -EBUSY;
2214 		goto err_put;
2215 	}
2216 	if (READ_ONCE(map->frozen)) {
2217 		err = -EBUSY;
2218 		goto err_put;
2219 	}
2220 
2221 	WRITE_ONCE(map->frozen, true);
2222 err_put:
2223 	mutex_unlock(&map->freeze_mutex);
2224 	return err;
2225 }
2226 
2227 static const struct bpf_prog_ops * const bpf_prog_types[] = {
2228 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2229 	[_id] = & _name ## _prog_ops,
2230 #define BPF_MAP_TYPE(_id, _ops)
2231 #define BPF_LINK_TYPE(_id, _name)
2232 #include <linux/bpf_types.h>
2233 #undef BPF_PROG_TYPE
2234 #undef BPF_MAP_TYPE
2235 #undef BPF_LINK_TYPE
2236 };
2237 
find_prog_type(enum bpf_prog_type type,struct bpf_prog * prog)2238 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2239 {
2240 	const struct bpf_prog_ops *ops;
2241 
2242 	if (type >= ARRAY_SIZE(bpf_prog_types))
2243 		return -EINVAL;
2244 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2245 	ops = bpf_prog_types[type];
2246 	if (!ops)
2247 		return -EINVAL;
2248 
2249 	if (!bpf_prog_is_offloaded(prog->aux))
2250 		prog->aux->ops = ops;
2251 	else
2252 		prog->aux->ops = &bpf_offload_prog_ops;
2253 	prog->type = type;
2254 	return 0;
2255 }
2256 
2257 enum bpf_audit {
2258 	BPF_AUDIT_LOAD,
2259 	BPF_AUDIT_UNLOAD,
2260 	BPF_AUDIT_MAX,
2261 };
2262 
2263 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2264 	[BPF_AUDIT_LOAD]   = "LOAD",
2265 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
2266 };
2267 
bpf_audit_prog(const struct bpf_prog * prog,unsigned int op)2268 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2269 {
2270 	struct audit_context *ctx = NULL;
2271 	struct audit_buffer *ab;
2272 
2273 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2274 		return;
2275 	if (audit_enabled == AUDIT_OFF)
2276 		return;
2277 	if (!in_irq() && !irqs_disabled())
2278 		ctx = audit_context();
2279 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2280 	if (unlikely(!ab))
2281 		return;
2282 	audit_log_format(ab, "prog-id=%u op=%s",
2283 			 prog->aux->id, bpf_audit_str[op]);
2284 	audit_log_end(ab);
2285 }
2286 
bpf_prog_alloc_id(struct bpf_prog * prog)2287 static int bpf_prog_alloc_id(struct bpf_prog *prog)
2288 {
2289 	int id;
2290 
2291 	idr_preload(GFP_KERNEL);
2292 	spin_lock_bh(&prog_idr_lock);
2293 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2294 	if (id > 0)
2295 		prog->aux->id = id;
2296 	spin_unlock_bh(&prog_idr_lock);
2297 	idr_preload_end();
2298 
2299 	/* id is in [1, INT_MAX) */
2300 	if (WARN_ON_ONCE(!id))
2301 		return -ENOSPC;
2302 
2303 	return id > 0 ? 0 : id;
2304 }
2305 
bpf_prog_free_id(struct bpf_prog * prog)2306 void bpf_prog_free_id(struct bpf_prog *prog)
2307 {
2308 	unsigned long flags;
2309 
2310 	/* cBPF to eBPF migrations are currently not in the idr store.
2311 	 * Offloaded programs are removed from the store when their device
2312 	 * disappears - even if someone grabs an fd to them they are unusable,
2313 	 * simply waiting for refcnt to drop to be freed.
2314 	 */
2315 	if (!prog->aux->id)
2316 		return;
2317 
2318 	spin_lock_irqsave(&prog_idr_lock, flags);
2319 	idr_remove(&prog_idr, prog->aux->id);
2320 	prog->aux->id = 0;
2321 	spin_unlock_irqrestore(&prog_idr_lock, flags);
2322 }
2323 
__bpf_prog_put_rcu(struct rcu_head * rcu)2324 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2325 {
2326 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2327 
2328 	kvfree(aux->func_info);
2329 	kfree(aux->func_info_aux);
2330 	free_uid(aux->user);
2331 	security_bpf_prog_free(aux->prog);
2332 	bpf_prog_free(aux->prog);
2333 }
2334 
__bpf_prog_put_noref(struct bpf_prog * prog,bool deferred)2335 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2336 {
2337 	bpf_prog_kallsyms_del_all(prog);
2338 	btf_put(prog->aux->btf);
2339 	module_put(prog->aux->mod);
2340 	kvfree(prog->aux->jited_linfo);
2341 	kvfree(prog->aux->linfo);
2342 	kfree(prog->aux->kfunc_tab);
2343 	kfree(prog->aux->ctx_arg_info);
2344 	if (prog->aux->attach_btf)
2345 		btf_put(prog->aux->attach_btf);
2346 
2347 	if (deferred) {
2348 		if (prog->sleepable)
2349 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2350 		else
2351 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2352 	} else {
2353 		__bpf_prog_put_rcu(&prog->aux->rcu);
2354 	}
2355 }
2356 
bpf_prog_put_deferred(struct work_struct * work)2357 static void bpf_prog_put_deferred(struct work_struct *work)
2358 {
2359 	struct bpf_prog_aux *aux;
2360 	struct bpf_prog *prog;
2361 
2362 	aux = container_of(work, struct bpf_prog_aux, work);
2363 	prog = aux->prog;
2364 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2365 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2366 	bpf_prog_free_id(prog);
2367 	__bpf_prog_put_noref(prog, true);
2368 }
2369 
__bpf_prog_put(struct bpf_prog * prog)2370 static void __bpf_prog_put(struct bpf_prog *prog)
2371 {
2372 	struct bpf_prog_aux *aux = prog->aux;
2373 
2374 	if (atomic64_dec_and_test(&aux->refcnt)) {
2375 		if (in_irq() || irqs_disabled()) {
2376 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2377 			schedule_work(&aux->work);
2378 		} else {
2379 			bpf_prog_put_deferred(&aux->work);
2380 		}
2381 	}
2382 }
2383 
bpf_prog_put(struct bpf_prog * prog)2384 void bpf_prog_put(struct bpf_prog *prog)
2385 {
2386 	__bpf_prog_put(prog);
2387 }
2388 EXPORT_SYMBOL_GPL(bpf_prog_put);
2389 
bpf_prog_release(struct inode * inode,struct file * filp)2390 static int bpf_prog_release(struct inode *inode, struct file *filp)
2391 {
2392 	struct bpf_prog *prog = filp->private_data;
2393 
2394 	bpf_prog_put(prog);
2395 	return 0;
2396 }
2397 
2398 struct bpf_prog_kstats {
2399 	u64 nsecs;
2400 	u64 cnt;
2401 	u64 misses;
2402 };
2403 
bpf_prog_inc_misses_counter(struct bpf_prog * prog)2404 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2405 {
2406 	struct bpf_prog_stats *stats;
2407 	unsigned int flags;
2408 
2409 	stats = this_cpu_ptr(prog->stats);
2410 	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2411 	u64_stats_inc(&stats->misses);
2412 	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2413 }
2414 
bpf_prog_get_stats(const struct bpf_prog * prog,struct bpf_prog_kstats * stats)2415 static void bpf_prog_get_stats(const struct bpf_prog *prog,
2416 			       struct bpf_prog_kstats *stats)
2417 {
2418 	u64 nsecs = 0, cnt = 0, misses = 0;
2419 	int cpu;
2420 
2421 	for_each_possible_cpu(cpu) {
2422 		const struct bpf_prog_stats *st;
2423 		unsigned int start;
2424 		u64 tnsecs, tcnt, tmisses;
2425 
2426 		st = per_cpu_ptr(prog->stats, cpu);
2427 		do {
2428 			start = u64_stats_fetch_begin(&st->syncp);
2429 			tnsecs = u64_stats_read(&st->nsecs);
2430 			tcnt = u64_stats_read(&st->cnt);
2431 			tmisses = u64_stats_read(&st->misses);
2432 		} while (u64_stats_fetch_retry(&st->syncp, start));
2433 		nsecs += tnsecs;
2434 		cnt += tcnt;
2435 		misses += tmisses;
2436 	}
2437 	stats->nsecs = nsecs;
2438 	stats->cnt = cnt;
2439 	stats->misses = misses;
2440 }
2441 
2442 #ifdef CONFIG_PROC_FS
bpf_prog_show_fdinfo(struct seq_file * m,struct file * filp)2443 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2444 {
2445 	const struct bpf_prog *prog = filp->private_data;
2446 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2447 	struct bpf_prog_kstats stats;
2448 
2449 	bpf_prog_get_stats(prog, &stats);
2450 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2451 	seq_printf(m,
2452 		   "prog_type:\t%u\n"
2453 		   "prog_jited:\t%u\n"
2454 		   "prog_tag:\t%s\n"
2455 		   "memlock:\t%llu\n"
2456 		   "prog_id:\t%u\n"
2457 		   "run_time_ns:\t%llu\n"
2458 		   "run_cnt:\t%llu\n"
2459 		   "recursion_misses:\t%llu\n"
2460 		   "verified_insns:\t%u\n",
2461 		   prog->type,
2462 		   prog->jited,
2463 		   prog_tag,
2464 		   prog->pages * 1ULL << PAGE_SHIFT,
2465 		   prog->aux->id,
2466 		   stats.nsecs,
2467 		   stats.cnt,
2468 		   stats.misses,
2469 		   prog->aux->verified_insns);
2470 }
2471 #endif
2472 
2473 const struct file_operations bpf_prog_fops = {
2474 #ifdef CONFIG_PROC_FS
2475 	.show_fdinfo	= bpf_prog_show_fdinfo,
2476 #endif
2477 	.release	= bpf_prog_release,
2478 	.read		= bpf_dummy_read,
2479 	.write		= bpf_dummy_write,
2480 };
2481 
bpf_prog_new_fd(struct bpf_prog * prog)2482 int bpf_prog_new_fd(struct bpf_prog *prog)
2483 {
2484 	int ret;
2485 
2486 	ret = security_bpf_prog(prog);
2487 	if (ret < 0)
2488 		return ret;
2489 
2490 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2491 				O_RDWR | O_CLOEXEC);
2492 }
2493 
bpf_prog_add(struct bpf_prog * prog,int i)2494 void bpf_prog_add(struct bpf_prog *prog, int i)
2495 {
2496 	atomic64_add(i, &prog->aux->refcnt);
2497 }
2498 EXPORT_SYMBOL_GPL(bpf_prog_add);
2499 
bpf_prog_sub(struct bpf_prog * prog,int i)2500 void bpf_prog_sub(struct bpf_prog *prog, int i)
2501 {
2502 	/* Only to be used for undoing previous bpf_prog_add() in some
2503 	 * error path. We still know that another entity in our call
2504 	 * path holds a reference to the program, thus atomic_sub() can
2505 	 * be safely used in such cases!
2506 	 */
2507 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2508 }
2509 EXPORT_SYMBOL_GPL(bpf_prog_sub);
2510 
bpf_prog_inc(struct bpf_prog * prog)2511 void bpf_prog_inc(struct bpf_prog *prog)
2512 {
2513 	atomic64_inc(&prog->aux->refcnt);
2514 }
2515 EXPORT_SYMBOL_GPL(bpf_prog_inc);
2516 
2517 /* prog_idr_lock should have been held */
bpf_prog_inc_not_zero(struct bpf_prog * prog)2518 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2519 {
2520 	int refold;
2521 
2522 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2523 
2524 	if (!refold)
2525 		return ERR_PTR(-ENOENT);
2526 
2527 	return prog;
2528 }
2529 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2530 
bpf_prog_get_ok(struct bpf_prog * prog,enum bpf_prog_type * attach_type,bool attach_drv)2531 bool bpf_prog_get_ok(struct bpf_prog *prog,
2532 			    enum bpf_prog_type *attach_type, bool attach_drv)
2533 {
2534 	/* not an attachment, just a refcount inc, always allow */
2535 	if (!attach_type)
2536 		return true;
2537 
2538 	if (prog->type != *attach_type)
2539 		return false;
2540 	if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2541 		return false;
2542 
2543 	return true;
2544 }
2545 
__bpf_prog_get(u32 ufd,enum bpf_prog_type * attach_type,bool attach_drv)2546 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2547 				       bool attach_drv)
2548 {
2549 	CLASS(fd, f)(ufd);
2550 	struct bpf_prog *prog;
2551 
2552 	if (fd_empty(f))
2553 		return ERR_PTR(-EBADF);
2554 	if (fd_file(f)->f_op != &bpf_prog_fops)
2555 		return ERR_PTR(-EINVAL);
2556 
2557 	prog = fd_file(f)->private_data;
2558 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
2559 		return ERR_PTR(-EINVAL);
2560 
2561 	bpf_prog_inc(prog);
2562 	return prog;
2563 }
2564 
bpf_prog_get(u32 ufd)2565 struct bpf_prog *bpf_prog_get(u32 ufd)
2566 {
2567 	return __bpf_prog_get(ufd, NULL, false);
2568 }
2569 
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)2570 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2571 				       bool attach_drv)
2572 {
2573 	return __bpf_prog_get(ufd, &type, attach_drv);
2574 }
2575 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2576 
2577 /* Initially all BPF programs could be loaded w/o specifying
2578  * expected_attach_type. Later for some of them specifying expected_attach_type
2579  * at load time became required so that program could be validated properly.
2580  * Programs of types that are allowed to be loaded both w/ and w/o (for
2581  * backward compatibility) expected_attach_type, should have the default attach
2582  * type assigned to expected_attach_type for the latter case, so that it can be
2583  * validated later at attach time.
2584  *
2585  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2586  * prog type requires it but has some attach types that have to be backward
2587  * compatible.
2588  */
bpf_prog_load_fixup_attach_type(union bpf_attr * attr)2589 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2590 {
2591 	switch (attr->prog_type) {
2592 	case BPF_PROG_TYPE_CGROUP_SOCK:
2593 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2594 		 * exist so checking for non-zero is the way to go here.
2595 		 */
2596 		if (!attr->expected_attach_type)
2597 			attr->expected_attach_type =
2598 				BPF_CGROUP_INET_SOCK_CREATE;
2599 		break;
2600 	case BPF_PROG_TYPE_SK_REUSEPORT:
2601 		if (!attr->expected_attach_type)
2602 			attr->expected_attach_type =
2603 				BPF_SK_REUSEPORT_SELECT;
2604 		break;
2605 	}
2606 }
2607 
2608 static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,enum bpf_attach_type expected_attach_type,struct btf * attach_btf,u32 btf_id,struct bpf_prog * dst_prog)2609 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2610 			   enum bpf_attach_type expected_attach_type,
2611 			   struct btf *attach_btf, u32 btf_id,
2612 			   struct bpf_prog *dst_prog)
2613 {
2614 	if (btf_id) {
2615 		if (btf_id > BTF_MAX_TYPE)
2616 			return -EINVAL;
2617 
2618 		if (!attach_btf && !dst_prog)
2619 			return -EINVAL;
2620 
2621 		switch (prog_type) {
2622 		case BPF_PROG_TYPE_TRACING:
2623 		case BPF_PROG_TYPE_LSM:
2624 		case BPF_PROG_TYPE_STRUCT_OPS:
2625 		case BPF_PROG_TYPE_EXT:
2626 			break;
2627 		default:
2628 			return -EINVAL;
2629 		}
2630 	}
2631 
2632 	if (attach_btf && (!btf_id || dst_prog))
2633 		return -EINVAL;
2634 
2635 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2636 	    prog_type != BPF_PROG_TYPE_EXT)
2637 		return -EINVAL;
2638 
2639 	switch (prog_type) {
2640 	case BPF_PROG_TYPE_CGROUP_SOCK:
2641 		switch (expected_attach_type) {
2642 		case BPF_CGROUP_INET_SOCK_CREATE:
2643 		case BPF_CGROUP_INET_SOCK_RELEASE:
2644 		case BPF_CGROUP_INET4_POST_BIND:
2645 		case BPF_CGROUP_INET6_POST_BIND:
2646 			return 0;
2647 		default:
2648 			return -EINVAL;
2649 		}
2650 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2651 		switch (expected_attach_type) {
2652 		case BPF_CGROUP_INET4_BIND:
2653 		case BPF_CGROUP_INET6_BIND:
2654 		case BPF_CGROUP_INET4_CONNECT:
2655 		case BPF_CGROUP_INET6_CONNECT:
2656 		case BPF_CGROUP_UNIX_CONNECT:
2657 		case BPF_CGROUP_INET4_GETPEERNAME:
2658 		case BPF_CGROUP_INET6_GETPEERNAME:
2659 		case BPF_CGROUP_UNIX_GETPEERNAME:
2660 		case BPF_CGROUP_INET4_GETSOCKNAME:
2661 		case BPF_CGROUP_INET6_GETSOCKNAME:
2662 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2663 		case BPF_CGROUP_UDP4_SENDMSG:
2664 		case BPF_CGROUP_UDP6_SENDMSG:
2665 		case BPF_CGROUP_UNIX_SENDMSG:
2666 		case BPF_CGROUP_UDP4_RECVMSG:
2667 		case BPF_CGROUP_UDP6_RECVMSG:
2668 		case BPF_CGROUP_UNIX_RECVMSG:
2669 			return 0;
2670 		default:
2671 			return -EINVAL;
2672 		}
2673 	case BPF_PROG_TYPE_CGROUP_SKB:
2674 		switch (expected_attach_type) {
2675 		case BPF_CGROUP_INET_INGRESS:
2676 		case BPF_CGROUP_INET_EGRESS:
2677 			return 0;
2678 		default:
2679 			return -EINVAL;
2680 		}
2681 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2682 		switch (expected_attach_type) {
2683 		case BPF_CGROUP_SETSOCKOPT:
2684 		case BPF_CGROUP_GETSOCKOPT:
2685 			return 0;
2686 		default:
2687 			return -EINVAL;
2688 		}
2689 	case BPF_PROG_TYPE_SK_LOOKUP:
2690 		if (expected_attach_type == BPF_SK_LOOKUP)
2691 			return 0;
2692 		return -EINVAL;
2693 	case BPF_PROG_TYPE_SK_REUSEPORT:
2694 		switch (expected_attach_type) {
2695 		case BPF_SK_REUSEPORT_SELECT:
2696 		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2697 			return 0;
2698 		default:
2699 			return -EINVAL;
2700 		}
2701 	case BPF_PROG_TYPE_NETFILTER:
2702 		if (expected_attach_type == BPF_NETFILTER)
2703 			return 0;
2704 		return -EINVAL;
2705 	case BPF_PROG_TYPE_SYSCALL:
2706 	case BPF_PROG_TYPE_EXT:
2707 		if (expected_attach_type)
2708 			return -EINVAL;
2709 		fallthrough;
2710 	default:
2711 		return 0;
2712 	}
2713 }
2714 
is_net_admin_prog_type(enum bpf_prog_type prog_type)2715 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2716 {
2717 	switch (prog_type) {
2718 	case BPF_PROG_TYPE_SCHED_CLS:
2719 	case BPF_PROG_TYPE_SCHED_ACT:
2720 	case BPF_PROG_TYPE_XDP:
2721 	case BPF_PROG_TYPE_LWT_IN:
2722 	case BPF_PROG_TYPE_LWT_OUT:
2723 	case BPF_PROG_TYPE_LWT_XMIT:
2724 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2725 	case BPF_PROG_TYPE_SK_SKB:
2726 	case BPF_PROG_TYPE_SK_MSG:
2727 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2728 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2729 	case BPF_PROG_TYPE_CGROUP_SOCK:
2730 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2731 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2732 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2733 	case BPF_PROG_TYPE_SOCK_OPS:
2734 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2735 	case BPF_PROG_TYPE_NETFILTER:
2736 		return true;
2737 	case BPF_PROG_TYPE_CGROUP_SKB:
2738 		/* always unpriv */
2739 	case BPF_PROG_TYPE_SK_REUSEPORT:
2740 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2741 	default:
2742 		return false;
2743 	}
2744 }
2745 
is_perfmon_prog_type(enum bpf_prog_type prog_type)2746 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2747 {
2748 	switch (prog_type) {
2749 	case BPF_PROG_TYPE_KPROBE:
2750 	case BPF_PROG_TYPE_TRACEPOINT:
2751 	case BPF_PROG_TYPE_PERF_EVENT:
2752 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2753 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2754 	case BPF_PROG_TYPE_TRACING:
2755 	case BPF_PROG_TYPE_LSM:
2756 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2757 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2758 		return true;
2759 	default:
2760 		return false;
2761 	}
2762 }
2763 
2764 /* last field in 'union bpf_attr' used by this command */
2765 #define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt
2766 
bpf_prog_load(union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)2767 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2768 {
2769 	enum bpf_prog_type type = attr->prog_type;
2770 	struct bpf_prog *prog, *dst_prog = NULL;
2771 	struct btf *attach_btf = NULL;
2772 	struct bpf_token *token = NULL;
2773 	bool bpf_cap;
2774 	int err;
2775 	char license[128];
2776 
2777 	if (CHECK_ATTR(BPF_PROG_LOAD))
2778 		return -EINVAL;
2779 
2780 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2781 				 BPF_F_ANY_ALIGNMENT |
2782 				 BPF_F_TEST_STATE_FREQ |
2783 				 BPF_F_SLEEPABLE |
2784 				 BPF_F_TEST_RND_HI32 |
2785 				 BPF_F_XDP_HAS_FRAGS |
2786 				 BPF_F_XDP_DEV_BOUND_ONLY |
2787 				 BPF_F_TEST_REG_INVARIANTS |
2788 				 BPF_F_TOKEN_FD))
2789 		return -EINVAL;
2790 
2791 	bpf_prog_load_fixup_attach_type(attr);
2792 
2793 	if (attr->prog_flags & BPF_F_TOKEN_FD) {
2794 		token = bpf_token_get_from_fd(attr->prog_token_fd);
2795 		if (IS_ERR(token))
2796 			return PTR_ERR(token);
2797 		/* if current token doesn't grant prog loading permissions,
2798 		 * then we can't use this token, so ignore it and rely on
2799 		 * system-wide capabilities checks
2800 		 */
2801 		if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2802 		    !bpf_token_allow_prog_type(token, attr->prog_type,
2803 					       attr->expected_attach_type)) {
2804 			bpf_token_put(token);
2805 			token = NULL;
2806 		}
2807 	}
2808 
2809 	bpf_cap = bpf_token_capable(token, CAP_BPF);
2810 	err = -EPERM;
2811 
2812 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2813 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2814 	    !bpf_cap)
2815 		goto put_token;
2816 
2817 	/* Intent here is for unprivileged_bpf_disabled to block BPF program
2818 	 * creation for unprivileged users; other actions depend
2819 	 * on fd availability and access to bpffs, so are dependent on
2820 	 * object creation success. Even with unprivileged BPF disabled,
2821 	 * capability checks are still carried out for these
2822 	 * and other operations.
2823 	 */
2824 	if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2825 		goto put_token;
2826 
2827 	if (attr->insn_cnt == 0 ||
2828 	    attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2829 		err = -E2BIG;
2830 		goto put_token;
2831 	}
2832 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2833 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2834 	    !bpf_cap)
2835 		goto put_token;
2836 
2837 	if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2838 		goto put_token;
2839 	if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2840 		goto put_token;
2841 
2842 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2843 	 * or btf, we need to check which one it is
2844 	 */
2845 	if (attr->attach_prog_fd) {
2846 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2847 		if (IS_ERR(dst_prog)) {
2848 			dst_prog = NULL;
2849 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2850 			if (IS_ERR(attach_btf)) {
2851 				err = -EINVAL;
2852 				goto put_token;
2853 			}
2854 			if (!btf_is_kernel(attach_btf)) {
2855 				/* attaching through specifying bpf_prog's BTF
2856 				 * objects directly might be supported eventually
2857 				 */
2858 				btf_put(attach_btf);
2859 				err = -ENOTSUPP;
2860 				goto put_token;
2861 			}
2862 		}
2863 	} else if (attr->attach_btf_id) {
2864 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2865 		attach_btf = bpf_get_btf_vmlinux();
2866 		if (IS_ERR(attach_btf)) {
2867 			err = PTR_ERR(attach_btf);
2868 			goto put_token;
2869 		}
2870 		if (!attach_btf) {
2871 			err = -EINVAL;
2872 			goto put_token;
2873 		}
2874 		btf_get(attach_btf);
2875 	}
2876 
2877 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2878 				       attach_btf, attr->attach_btf_id,
2879 				       dst_prog)) {
2880 		if (dst_prog)
2881 			bpf_prog_put(dst_prog);
2882 		if (attach_btf)
2883 			btf_put(attach_btf);
2884 		err = -EINVAL;
2885 		goto put_token;
2886 	}
2887 
2888 	/* plain bpf_prog allocation */
2889 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2890 	if (!prog) {
2891 		if (dst_prog)
2892 			bpf_prog_put(dst_prog);
2893 		if (attach_btf)
2894 			btf_put(attach_btf);
2895 		err = -EINVAL;
2896 		goto put_token;
2897 	}
2898 
2899 	prog->expected_attach_type = attr->expected_attach_type;
2900 	prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2901 	prog->aux->attach_btf = attach_btf;
2902 	prog->aux->attach_btf_id = attr->attach_btf_id;
2903 	prog->aux->dst_prog = dst_prog;
2904 	prog->aux->dev_bound = !!attr->prog_ifindex;
2905 	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2906 
2907 	/* move token into prog->aux, reuse taken refcnt */
2908 	prog->aux->token = token;
2909 	token = NULL;
2910 
2911 	prog->aux->user = get_current_user();
2912 	prog->len = attr->insn_cnt;
2913 
2914 	err = -EFAULT;
2915 	if (copy_from_bpfptr(prog->insns,
2916 			     make_bpfptr(attr->insns, uattr.is_kernel),
2917 			     bpf_prog_insn_size(prog)) != 0)
2918 		goto free_prog;
2919 	/* copy eBPF program license from user space */
2920 	if (strncpy_from_bpfptr(license,
2921 				make_bpfptr(attr->license, uattr.is_kernel),
2922 				sizeof(license) - 1) < 0)
2923 		goto free_prog;
2924 	license[sizeof(license) - 1] = 0;
2925 
2926 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2927 	prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2928 
2929 	prog->orig_prog = NULL;
2930 	prog->jited = 0;
2931 
2932 	atomic64_set(&prog->aux->refcnt, 1);
2933 
2934 	if (bpf_prog_is_dev_bound(prog->aux)) {
2935 		err = bpf_prog_dev_bound_init(prog, attr);
2936 		if (err)
2937 			goto free_prog;
2938 	}
2939 
2940 	if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2941 	    bpf_prog_is_dev_bound(dst_prog->aux)) {
2942 		err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2943 		if (err)
2944 			goto free_prog;
2945 	}
2946 
2947 	/*
2948 	 * Bookkeeping for managing the program attachment chain.
2949 	 *
2950 	 * It might be tempting to set attach_tracing_prog flag at the attachment
2951 	 * time, but this will not prevent from loading bunch of tracing prog
2952 	 * first, then attach them one to another.
2953 	 *
2954 	 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2955 	 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2956 	 * programs cannot change attachment target.
2957 	 */
2958 	if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2959 	    dst_prog->type == BPF_PROG_TYPE_TRACING) {
2960 		prog->aux->attach_tracing_prog = true;
2961 	}
2962 
2963 	/* find program type: socket_filter vs tracing_filter */
2964 	err = find_prog_type(type, prog);
2965 	if (err < 0)
2966 		goto free_prog;
2967 
2968 	prog->aux->load_time = ktime_get_boottime_ns();
2969 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2970 			       sizeof(attr->prog_name));
2971 	if (err < 0)
2972 		goto free_prog;
2973 
2974 	err = security_bpf_prog_load(prog, attr, token, uattr.is_kernel);
2975 	if (err)
2976 		goto free_prog_sec;
2977 
2978 	/* run eBPF verifier */
2979 	err = bpf_check(&prog, attr, uattr, uattr_size);
2980 	if (err < 0)
2981 		goto free_used_maps;
2982 
2983 	prog = bpf_prog_select_runtime(prog, &err);
2984 	if (err < 0)
2985 		goto free_used_maps;
2986 
2987 	err = bpf_prog_alloc_id(prog);
2988 	if (err)
2989 		goto free_used_maps;
2990 
2991 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2992 	 * effectively publicly exposed. However, retrieving via
2993 	 * bpf_prog_get_fd_by_id() will take another reference,
2994 	 * therefore it cannot be gone underneath us.
2995 	 *
2996 	 * Only for the time /after/ successful bpf_prog_new_fd()
2997 	 * and before returning to userspace, we might just hold
2998 	 * one reference and any parallel close on that fd could
2999 	 * rip everything out. Hence, below notifications must
3000 	 * happen before bpf_prog_new_fd().
3001 	 *
3002 	 * Also, any failure handling from this point onwards must
3003 	 * be using bpf_prog_put() given the program is exposed.
3004 	 */
3005 	bpf_prog_kallsyms_add(prog);
3006 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
3007 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
3008 
3009 	err = bpf_prog_new_fd(prog);
3010 	if (err < 0)
3011 		bpf_prog_put(prog);
3012 	return err;
3013 
3014 free_used_maps:
3015 	/* In case we have subprogs, we need to wait for a grace
3016 	 * period before we can tear down JIT memory since symbols
3017 	 * are already exposed under kallsyms.
3018 	 */
3019 	__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
3020 	return err;
3021 
3022 free_prog_sec:
3023 	security_bpf_prog_free(prog);
3024 free_prog:
3025 	free_uid(prog->aux->user);
3026 	if (prog->aux->attach_btf)
3027 		btf_put(prog->aux->attach_btf);
3028 	bpf_prog_free(prog);
3029 put_token:
3030 	bpf_token_put(token);
3031 	return err;
3032 }
3033 
3034 #define BPF_OBJ_LAST_FIELD path_fd
3035 
bpf_obj_pin(const union bpf_attr * attr)3036 static int bpf_obj_pin(const union bpf_attr *attr)
3037 {
3038 	int path_fd;
3039 
3040 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
3041 		return -EINVAL;
3042 
3043 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3044 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3045 		return -EINVAL;
3046 
3047 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3048 	return bpf_obj_pin_user(attr->bpf_fd, path_fd,
3049 				u64_to_user_ptr(attr->pathname));
3050 }
3051 
bpf_obj_get(const union bpf_attr * attr)3052 static int bpf_obj_get(const union bpf_attr *attr)
3053 {
3054 	int path_fd;
3055 
3056 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
3057 	    attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
3058 		return -EINVAL;
3059 
3060 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3061 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3062 		return -EINVAL;
3063 
3064 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3065 	return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
3066 				attr->file_flags);
3067 }
3068 
3069 /* bpf_link_init_sleepable() allows to specify whether BPF link itself has
3070  * "sleepable" semantics, which normally would mean that BPF link's attach
3071  * hook can dereference link or link's underlying program for some time after
3072  * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3073  * BPF program itself can be non-sleepable, yet, because it's transitively
3074  * reachable through BPF link, its freeing has to be delayed until after RCU
3075  * Tasks Trace GP.
3076  */
bpf_link_init_sleepable(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog,enum bpf_attach_type attach_type,bool sleepable)3077 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
3078 			     const struct bpf_link_ops *ops, struct bpf_prog *prog,
3079 			     enum bpf_attach_type attach_type, bool sleepable)
3080 {
3081 	WARN_ON(ops->dealloc && ops->dealloc_deferred);
3082 	atomic64_set(&link->refcnt, 1);
3083 	link->type = type;
3084 	link->sleepable = sleepable;
3085 	link->id = 0;
3086 	link->ops = ops;
3087 	link->prog = prog;
3088 	link->attach_type = attach_type;
3089 }
3090 
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog,enum bpf_attach_type attach_type)3091 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
3092 		   const struct bpf_link_ops *ops, struct bpf_prog *prog,
3093 		   enum bpf_attach_type attach_type)
3094 {
3095 	bpf_link_init_sleepable(link, type, ops, prog, attach_type, false);
3096 }
3097 
bpf_link_free_id(int id)3098 static void bpf_link_free_id(int id)
3099 {
3100 	if (!id)
3101 		return;
3102 
3103 	spin_lock_bh(&link_idr_lock);
3104 	idr_remove(&link_idr, id);
3105 	spin_unlock_bh(&link_idr_lock);
3106 }
3107 
3108 /* Clean up bpf_link and corresponding anon_inode file and FD. After
3109  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3110  * anon_inode's release() call. This helper marks bpf_link as
3111  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3112  * is not decremented, it's the responsibility of a calling code that failed
3113  * to complete bpf_link initialization.
3114  * This helper eventually calls link's dealloc callback, but does not call
3115  * link's release callback.
3116  */
bpf_link_cleanup(struct bpf_link_primer * primer)3117 void bpf_link_cleanup(struct bpf_link_primer *primer)
3118 {
3119 	primer->link->prog = NULL;
3120 	bpf_link_free_id(primer->id);
3121 	fput(primer->file);
3122 	put_unused_fd(primer->fd);
3123 }
3124 
bpf_link_inc(struct bpf_link * link)3125 void bpf_link_inc(struct bpf_link *link)
3126 {
3127 	atomic64_inc(&link->refcnt);
3128 }
3129 
bpf_link_dealloc(struct bpf_link * link)3130 static void bpf_link_dealloc(struct bpf_link *link)
3131 {
3132 	/* now that we know that bpf_link itself can't be reached, put underlying BPF program */
3133 	if (link->prog)
3134 		bpf_prog_put(link->prog);
3135 
3136 	/* free bpf_link and its containing memory */
3137 	if (link->ops->dealloc_deferred)
3138 		link->ops->dealloc_deferred(link);
3139 	else
3140 		link->ops->dealloc(link);
3141 }
3142 
bpf_link_defer_dealloc_rcu_gp(struct rcu_head * rcu)3143 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3144 {
3145 	struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3146 
3147 	bpf_link_dealloc(link);
3148 }
3149 
bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head * rcu)3150 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3151 {
3152 	if (rcu_trace_implies_rcu_gp())
3153 		bpf_link_defer_dealloc_rcu_gp(rcu);
3154 	else
3155 		call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3156 }
3157 
3158 /* bpf_link_free is guaranteed to be called from process context */
bpf_link_free(struct bpf_link * link)3159 static void bpf_link_free(struct bpf_link *link)
3160 {
3161 	const struct bpf_link_ops *ops = link->ops;
3162 
3163 	bpf_link_free_id(link->id);
3164 	/* detach BPF program, clean up used resources */
3165 	if (link->prog)
3166 		ops->release(link);
3167 	if (ops->dealloc_deferred) {
3168 		/* Schedule BPF link deallocation, which will only then
3169 		 * trigger putting BPF program refcount.
3170 		 * If underlying BPF program is sleepable or BPF link's target
3171 		 * attach hookpoint is sleepable or otherwise requires RCU GPs
3172 		 * to ensure link and its underlying BPF program is not
3173 		 * reachable anymore, we need to first wait for RCU tasks
3174 		 * trace sync, and then go through "classic" RCU grace period
3175 		 */
3176 		if (link->sleepable || (link->prog && link->prog->sleepable))
3177 			call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3178 		else
3179 			call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3180 	} else if (ops->dealloc) {
3181 		bpf_link_dealloc(link);
3182 	}
3183 }
3184 
bpf_link_put_deferred(struct work_struct * work)3185 static void bpf_link_put_deferred(struct work_struct *work)
3186 {
3187 	struct bpf_link *link = container_of(work, struct bpf_link, work);
3188 
3189 	bpf_link_free(link);
3190 }
3191 
3192 /* bpf_link_put might be called from atomic context. It needs to be called
3193  * from sleepable context in order to acquire sleeping locks during the process.
3194  */
bpf_link_put(struct bpf_link * link)3195 void bpf_link_put(struct bpf_link *link)
3196 {
3197 	if (!atomic64_dec_and_test(&link->refcnt))
3198 		return;
3199 
3200 	INIT_WORK(&link->work, bpf_link_put_deferred);
3201 	schedule_work(&link->work);
3202 }
3203 EXPORT_SYMBOL(bpf_link_put);
3204 
bpf_link_put_direct(struct bpf_link * link)3205 static void bpf_link_put_direct(struct bpf_link *link)
3206 {
3207 	if (!atomic64_dec_and_test(&link->refcnt))
3208 		return;
3209 	bpf_link_free(link);
3210 }
3211 
bpf_link_release(struct inode * inode,struct file * filp)3212 static int bpf_link_release(struct inode *inode, struct file *filp)
3213 {
3214 	struct bpf_link *link = filp->private_data;
3215 
3216 	bpf_link_put_direct(link);
3217 	return 0;
3218 }
3219 
3220 #ifdef CONFIG_PROC_FS
3221 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3222 #define BPF_MAP_TYPE(_id, _ops)
3223 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3224 static const char *bpf_link_type_strs[] = {
3225 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3226 #include <linux/bpf_types.h>
3227 };
3228 #undef BPF_PROG_TYPE
3229 #undef BPF_MAP_TYPE
3230 #undef BPF_LINK_TYPE
3231 
bpf_link_show_fdinfo(struct seq_file * m,struct file * filp)3232 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3233 {
3234 	const struct bpf_link *link = filp->private_data;
3235 	const struct bpf_prog *prog = link->prog;
3236 	enum bpf_link_type type = link->type;
3237 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3238 
3239 	if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
3240 		if (link->type == BPF_LINK_TYPE_KPROBE_MULTI)
3241 			seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_KPROBE_MULTI_RETURN ?
3242 				   "kretprobe_multi" : "kprobe_multi");
3243 		else if (link->type == BPF_LINK_TYPE_UPROBE_MULTI)
3244 			seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_UPROBE_MULTI_RETURN ?
3245 				   "uretprobe_multi" : "uprobe_multi");
3246 		else
3247 			seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
3248 	} else {
3249 		WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
3250 		seq_printf(m, "link_type:\t<%u>\n", type);
3251 	}
3252 	seq_printf(m, "link_id:\t%u\n", link->id);
3253 
3254 	if (prog) {
3255 		bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3256 		seq_printf(m,
3257 			   "prog_tag:\t%s\n"
3258 			   "prog_id:\t%u\n",
3259 			   prog_tag,
3260 			   prog->aux->id);
3261 	}
3262 	if (link->ops->show_fdinfo)
3263 		link->ops->show_fdinfo(link, m);
3264 }
3265 #endif
3266 
bpf_link_poll(struct file * file,struct poll_table_struct * pts)3267 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
3268 {
3269 	struct bpf_link *link = file->private_data;
3270 
3271 	return link->ops->poll(file, pts);
3272 }
3273 
3274 static const struct file_operations bpf_link_fops = {
3275 #ifdef CONFIG_PROC_FS
3276 	.show_fdinfo	= bpf_link_show_fdinfo,
3277 #endif
3278 	.release	= bpf_link_release,
3279 	.read		= bpf_dummy_read,
3280 	.write		= bpf_dummy_write,
3281 };
3282 
3283 static const struct file_operations bpf_link_fops_poll = {
3284 #ifdef CONFIG_PROC_FS
3285 	.show_fdinfo	= bpf_link_show_fdinfo,
3286 #endif
3287 	.release	= bpf_link_release,
3288 	.read		= bpf_dummy_read,
3289 	.write		= bpf_dummy_write,
3290 	.poll		= bpf_link_poll,
3291 };
3292 
bpf_link_alloc_id(struct bpf_link * link)3293 static int bpf_link_alloc_id(struct bpf_link *link)
3294 {
3295 	int id;
3296 
3297 	idr_preload(GFP_KERNEL);
3298 	spin_lock_bh(&link_idr_lock);
3299 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3300 	spin_unlock_bh(&link_idr_lock);
3301 	idr_preload_end();
3302 
3303 	return id;
3304 }
3305 
3306 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3307  * reserving unused FD and allocating ID from link_idr. This is to be paired
3308  * with bpf_link_settle() to install FD and ID and expose bpf_link to
3309  * user-space, if bpf_link is successfully attached. If not, bpf_link and
3310  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3311  * transient state is passed around in struct bpf_link_primer.
3312  * This is preferred way to create and initialize bpf_link, especially when
3313  * there are complicated and expensive operations in between creating bpf_link
3314  * itself and attaching it to BPF hook. By using bpf_link_prime() and
3315  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3316  * expensive (and potentially failing) roll back operations in a rare case
3317  * that file, FD, or ID can't be allocated.
3318  */
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)3319 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3320 {
3321 	struct file *file;
3322 	int fd, id;
3323 
3324 	fd = get_unused_fd_flags(O_CLOEXEC);
3325 	if (fd < 0)
3326 		return fd;
3327 
3328 
3329 	id = bpf_link_alloc_id(link);
3330 	if (id < 0) {
3331 		put_unused_fd(fd);
3332 		return id;
3333 	}
3334 
3335 	file = anon_inode_getfile("bpf_link",
3336 				  link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3337 				  link, O_CLOEXEC);
3338 	if (IS_ERR(file)) {
3339 		bpf_link_free_id(id);
3340 		put_unused_fd(fd);
3341 		return PTR_ERR(file);
3342 	}
3343 
3344 	primer->link = link;
3345 	primer->file = file;
3346 	primer->fd = fd;
3347 	primer->id = id;
3348 	return 0;
3349 }
3350 
bpf_link_settle(struct bpf_link_primer * primer)3351 int bpf_link_settle(struct bpf_link_primer *primer)
3352 {
3353 	/* make bpf_link fetchable by ID */
3354 	spin_lock_bh(&link_idr_lock);
3355 	primer->link->id = primer->id;
3356 	spin_unlock_bh(&link_idr_lock);
3357 	/* make bpf_link fetchable by FD */
3358 	fd_install(primer->fd, primer->file);
3359 	/* pass through installed FD */
3360 	return primer->fd;
3361 }
3362 
bpf_link_new_fd(struct bpf_link * link)3363 int bpf_link_new_fd(struct bpf_link *link)
3364 {
3365 	return anon_inode_getfd("bpf-link",
3366 				link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3367 				link, O_CLOEXEC);
3368 }
3369 
bpf_link_get_from_fd(u32 ufd)3370 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3371 {
3372 	CLASS(fd, f)(ufd);
3373 	struct bpf_link *link;
3374 
3375 	if (fd_empty(f))
3376 		return ERR_PTR(-EBADF);
3377 	if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
3378 		return ERR_PTR(-EINVAL);
3379 
3380 	link = fd_file(f)->private_data;
3381 	bpf_link_inc(link);
3382 	return link;
3383 }
3384 EXPORT_SYMBOL_NS(bpf_link_get_from_fd, "BPF_INTERNAL");
3385 
bpf_tracing_link_release(struct bpf_link * link)3386 static void bpf_tracing_link_release(struct bpf_link *link)
3387 {
3388 	struct bpf_tracing_link *tr_link =
3389 		container_of(link, struct bpf_tracing_link, link.link);
3390 
3391 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3392 						tr_link->trampoline,
3393 						tr_link->tgt_prog));
3394 
3395 	bpf_trampoline_put(tr_link->trampoline);
3396 
3397 	/* tgt_prog is NULL if target is a kernel function */
3398 	if (tr_link->tgt_prog)
3399 		bpf_prog_put(tr_link->tgt_prog);
3400 }
3401 
bpf_tracing_link_dealloc(struct bpf_link * link)3402 static void bpf_tracing_link_dealloc(struct bpf_link *link)
3403 {
3404 	struct bpf_tracing_link *tr_link =
3405 		container_of(link, struct bpf_tracing_link, link.link);
3406 
3407 	kfree(tr_link);
3408 }
3409 
bpf_tracing_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)3410 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3411 					 struct seq_file *seq)
3412 {
3413 	struct bpf_tracing_link *tr_link =
3414 		container_of(link, struct bpf_tracing_link, link.link);
3415 	u32 target_btf_id, target_obj_id;
3416 
3417 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3418 				  &target_obj_id, &target_btf_id);
3419 	seq_printf(seq,
3420 		   "attach_type:\t%d\n"
3421 		   "target_obj_id:\t%u\n"
3422 		   "target_btf_id:\t%u\n"
3423 		   "cookie:\t%llu\n",
3424 		   link->attach_type,
3425 		   target_obj_id,
3426 		   target_btf_id,
3427 		   tr_link->link.cookie);
3428 }
3429 
bpf_tracing_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3430 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3431 					   struct bpf_link_info *info)
3432 {
3433 	struct bpf_tracing_link *tr_link =
3434 		container_of(link, struct bpf_tracing_link, link.link);
3435 
3436 	info->tracing.attach_type = link->attach_type;
3437 	info->tracing.cookie = tr_link->link.cookie;
3438 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3439 				  &info->tracing.target_obj_id,
3440 				  &info->tracing.target_btf_id);
3441 
3442 	return 0;
3443 }
3444 
3445 static const struct bpf_link_ops bpf_tracing_link_lops = {
3446 	.release = bpf_tracing_link_release,
3447 	.dealloc = bpf_tracing_link_dealloc,
3448 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
3449 	.fill_link_info = bpf_tracing_link_fill_link_info,
3450 };
3451 
bpf_tracing_prog_attach(struct bpf_prog * prog,int tgt_prog_fd,u32 btf_id,u64 bpf_cookie,enum bpf_attach_type attach_type)3452 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3453 				   int tgt_prog_fd,
3454 				   u32 btf_id,
3455 				   u64 bpf_cookie,
3456 				   enum bpf_attach_type attach_type)
3457 {
3458 	struct bpf_link_primer link_primer;
3459 	struct bpf_prog *tgt_prog = NULL;
3460 	struct bpf_trampoline *tr = NULL;
3461 	struct bpf_tracing_link *link;
3462 	u64 key = 0;
3463 	int err;
3464 
3465 	switch (prog->type) {
3466 	case BPF_PROG_TYPE_TRACING:
3467 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3468 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
3469 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
3470 			err = -EINVAL;
3471 			goto out_put_prog;
3472 		}
3473 		break;
3474 	case BPF_PROG_TYPE_EXT:
3475 		if (prog->expected_attach_type != 0) {
3476 			err = -EINVAL;
3477 			goto out_put_prog;
3478 		}
3479 		break;
3480 	case BPF_PROG_TYPE_LSM:
3481 		if (prog->expected_attach_type != BPF_LSM_MAC) {
3482 			err = -EINVAL;
3483 			goto out_put_prog;
3484 		}
3485 		break;
3486 	default:
3487 		err = -EINVAL;
3488 		goto out_put_prog;
3489 	}
3490 
3491 	if (!!tgt_prog_fd != !!btf_id) {
3492 		err = -EINVAL;
3493 		goto out_put_prog;
3494 	}
3495 
3496 	if (tgt_prog_fd) {
3497 		/*
3498 		 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3499 		 * part would be changed to implement the same for
3500 		 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3501 		 * attach_tracing_prog flag is set.
3502 		 */
3503 		if (prog->type != BPF_PROG_TYPE_EXT) {
3504 			err = -EINVAL;
3505 			goto out_put_prog;
3506 		}
3507 
3508 		tgt_prog = bpf_prog_get(tgt_prog_fd);
3509 		if (IS_ERR(tgt_prog)) {
3510 			err = PTR_ERR(tgt_prog);
3511 			tgt_prog = NULL;
3512 			goto out_put_prog;
3513 		}
3514 
3515 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3516 	}
3517 
3518 	link = kzalloc(sizeof(*link), GFP_USER);
3519 	if (!link) {
3520 		err = -ENOMEM;
3521 		goto out_put_prog;
3522 	}
3523 	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3524 		      &bpf_tracing_link_lops, prog, attach_type);
3525 
3526 	link->link.cookie = bpf_cookie;
3527 
3528 	mutex_lock(&prog->aux->dst_mutex);
3529 
3530 	/* There are a few possible cases here:
3531 	 *
3532 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3533 	 *   and not yet attached to anything, so we can use the values stored
3534 	 *   in prog->aux
3535 	 *
3536 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3537 	 *   attached to a target and its initial target was cleared (below)
3538 	 *
3539 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3540 	 *   target_btf_id using the link_create API.
3541 	 *
3542 	 * - if tgt_prog == NULL when this function was called using the old
3543 	 *   raw_tracepoint_open API, and we need a target from prog->aux
3544 	 *
3545 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3546 	 *   was detached and is going for re-attachment.
3547 	 *
3548 	 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3549 	 *   are NULL, then program was already attached and user did not provide
3550 	 *   tgt_prog_fd so we have no way to find out or create trampoline
3551 	 */
3552 	if (!prog->aux->dst_trampoline && !tgt_prog) {
3553 		/*
3554 		 * Allow re-attach for TRACING and LSM programs. If it's
3555 		 * currently linked, bpf_trampoline_link_prog will fail.
3556 		 * EXT programs need to specify tgt_prog_fd, so they
3557 		 * re-attach in separate code path.
3558 		 */
3559 		if (prog->type != BPF_PROG_TYPE_TRACING &&
3560 		    prog->type != BPF_PROG_TYPE_LSM) {
3561 			err = -EINVAL;
3562 			goto out_unlock;
3563 		}
3564 		/* We can allow re-attach only if we have valid attach_btf. */
3565 		if (!prog->aux->attach_btf) {
3566 			err = -EINVAL;
3567 			goto out_unlock;
3568 		}
3569 		btf_id = prog->aux->attach_btf_id;
3570 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3571 	}
3572 
3573 	if (!prog->aux->dst_trampoline ||
3574 	    (key && key != prog->aux->dst_trampoline->key)) {
3575 		/* If there is no saved target, or the specified target is
3576 		 * different from the destination specified at load time, we
3577 		 * need a new trampoline and a check for compatibility
3578 		 */
3579 		struct bpf_attach_target_info tgt_info = {};
3580 
3581 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3582 					      &tgt_info);
3583 		if (err)
3584 			goto out_unlock;
3585 
3586 		if (tgt_info.tgt_mod) {
3587 			module_put(prog->aux->mod);
3588 			prog->aux->mod = tgt_info.tgt_mod;
3589 		}
3590 
3591 		tr = bpf_trampoline_get(key, &tgt_info);
3592 		if (!tr) {
3593 			err = -ENOMEM;
3594 			goto out_unlock;
3595 		}
3596 	} else {
3597 		/* The caller didn't specify a target, or the target was the
3598 		 * same as the destination supplied during program load. This
3599 		 * means we can reuse the trampoline and reference from program
3600 		 * load time, and there is no need to allocate a new one. This
3601 		 * can only happen once for any program, as the saved values in
3602 		 * prog->aux are cleared below.
3603 		 */
3604 		tr = prog->aux->dst_trampoline;
3605 		tgt_prog = prog->aux->dst_prog;
3606 	}
3607 
3608 	err = bpf_link_prime(&link->link.link, &link_primer);
3609 	if (err)
3610 		goto out_unlock;
3611 
3612 	err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog);
3613 	if (err) {
3614 		bpf_link_cleanup(&link_primer);
3615 		link = NULL;
3616 		goto out_unlock;
3617 	}
3618 
3619 	link->tgt_prog = tgt_prog;
3620 	link->trampoline = tr;
3621 
3622 	/* Always clear the trampoline and target prog from prog->aux to make
3623 	 * sure the original attach destination is not kept alive after a
3624 	 * program is (re-)attached to another target.
3625 	 */
3626 	if (prog->aux->dst_prog &&
3627 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3628 		/* got extra prog ref from syscall, or attaching to different prog */
3629 		bpf_prog_put(prog->aux->dst_prog);
3630 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3631 		/* we allocated a new trampoline, so free the old one */
3632 		bpf_trampoline_put(prog->aux->dst_trampoline);
3633 
3634 	prog->aux->dst_prog = NULL;
3635 	prog->aux->dst_trampoline = NULL;
3636 	mutex_unlock(&prog->aux->dst_mutex);
3637 
3638 	return bpf_link_settle(&link_primer);
3639 out_unlock:
3640 	if (tr && tr != prog->aux->dst_trampoline)
3641 		bpf_trampoline_put(tr);
3642 	mutex_unlock(&prog->aux->dst_mutex);
3643 	kfree(link);
3644 out_put_prog:
3645 	if (tgt_prog_fd && tgt_prog)
3646 		bpf_prog_put(tgt_prog);
3647 	return err;
3648 }
3649 
bpf_raw_tp_link_release(struct bpf_link * link)3650 static void bpf_raw_tp_link_release(struct bpf_link *link)
3651 {
3652 	struct bpf_raw_tp_link *raw_tp =
3653 		container_of(link, struct bpf_raw_tp_link, link);
3654 
3655 	bpf_probe_unregister(raw_tp->btp, raw_tp);
3656 	bpf_put_raw_tracepoint(raw_tp->btp);
3657 }
3658 
bpf_raw_tp_link_dealloc(struct bpf_link * link)3659 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3660 {
3661 	struct bpf_raw_tp_link *raw_tp =
3662 		container_of(link, struct bpf_raw_tp_link, link);
3663 
3664 	kfree(raw_tp);
3665 }
3666 
bpf_raw_tp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)3667 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3668 					struct seq_file *seq)
3669 {
3670 	struct bpf_raw_tp_link *raw_tp_link =
3671 		container_of(link, struct bpf_raw_tp_link, link);
3672 
3673 	seq_printf(seq,
3674 		   "tp_name:\t%s\n"
3675 		   "cookie:\t%llu\n",
3676 		   raw_tp_link->btp->tp->name,
3677 		   raw_tp_link->cookie);
3678 }
3679 
bpf_copy_to_user(char __user * ubuf,const char * buf,u32 ulen,u32 len)3680 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3681 			    u32 len)
3682 {
3683 	if (ulen >= len + 1) {
3684 		if (copy_to_user(ubuf, buf, len + 1))
3685 			return -EFAULT;
3686 	} else {
3687 		char zero = '\0';
3688 
3689 		if (copy_to_user(ubuf, buf, ulen - 1))
3690 			return -EFAULT;
3691 		if (put_user(zero, ubuf + ulen - 1))
3692 			return -EFAULT;
3693 		return -ENOSPC;
3694 	}
3695 
3696 	return 0;
3697 }
3698 
bpf_raw_tp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3699 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3700 					  struct bpf_link_info *info)
3701 {
3702 	struct bpf_raw_tp_link *raw_tp_link =
3703 		container_of(link, struct bpf_raw_tp_link, link);
3704 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3705 	const char *tp_name = raw_tp_link->btp->tp->name;
3706 	u32 ulen = info->raw_tracepoint.tp_name_len;
3707 	size_t tp_len = strlen(tp_name);
3708 
3709 	if (!ulen ^ !ubuf)
3710 		return -EINVAL;
3711 
3712 	info->raw_tracepoint.tp_name_len = tp_len + 1;
3713 	info->raw_tracepoint.cookie = raw_tp_link->cookie;
3714 
3715 	if (!ubuf)
3716 		return 0;
3717 
3718 	return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3719 }
3720 
3721 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3722 	.release = bpf_raw_tp_link_release,
3723 	.dealloc_deferred = bpf_raw_tp_link_dealloc,
3724 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3725 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3726 };
3727 
3728 #ifdef CONFIG_PERF_EVENTS
3729 struct bpf_perf_link {
3730 	struct bpf_link link;
3731 	struct file *perf_file;
3732 };
3733 
bpf_perf_link_release(struct bpf_link * link)3734 static void bpf_perf_link_release(struct bpf_link *link)
3735 {
3736 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3737 	struct perf_event *event = perf_link->perf_file->private_data;
3738 
3739 	perf_event_free_bpf_prog(event);
3740 	fput(perf_link->perf_file);
3741 }
3742 
bpf_perf_link_dealloc(struct bpf_link * link)3743 static void bpf_perf_link_dealloc(struct bpf_link *link)
3744 {
3745 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3746 
3747 	kfree(perf_link);
3748 }
3749 
bpf_perf_link_fill_common(const struct perf_event * event,char __user * uname,u32 * ulenp,u64 * probe_offset,u64 * probe_addr,u32 * fd_type,unsigned long * missed)3750 static int bpf_perf_link_fill_common(const struct perf_event *event,
3751 				     char __user *uname, u32 *ulenp,
3752 				     u64 *probe_offset, u64 *probe_addr,
3753 				     u32 *fd_type, unsigned long *missed)
3754 {
3755 	const char *buf;
3756 	u32 prog_id, ulen;
3757 	size_t len;
3758 	int err;
3759 
3760 	ulen = *ulenp;
3761 	if (!ulen ^ !uname)
3762 		return -EINVAL;
3763 
3764 	err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3765 				      probe_offset, probe_addr, missed);
3766 	if (err)
3767 		return err;
3768 
3769 	if (buf) {
3770 		len = strlen(buf);
3771 		*ulenp = len + 1;
3772 	} else {
3773 		*ulenp = 1;
3774 	}
3775 	if (!uname)
3776 		return 0;
3777 
3778 	if (buf) {
3779 		err = bpf_copy_to_user(uname, buf, ulen, len);
3780 		if (err)
3781 			return err;
3782 	} else {
3783 		char zero = '\0';
3784 
3785 		if (put_user(zero, uname))
3786 			return -EFAULT;
3787 	}
3788 	return 0;
3789 }
3790 
3791 #ifdef CONFIG_KPROBE_EVENTS
bpf_perf_link_fill_kprobe(const struct perf_event * event,struct bpf_link_info * info)3792 static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3793 				     struct bpf_link_info *info)
3794 {
3795 	unsigned long missed;
3796 	char __user *uname;
3797 	u64 addr, offset;
3798 	u32 ulen, type;
3799 	int err;
3800 
3801 	uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3802 	ulen = info->perf_event.kprobe.name_len;
3803 	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3804 					&type, &missed);
3805 	if (err)
3806 		return err;
3807 	if (type == BPF_FD_TYPE_KRETPROBE)
3808 		info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3809 	else
3810 		info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3811 	info->perf_event.kprobe.name_len = ulen;
3812 	info->perf_event.kprobe.offset = offset;
3813 	info->perf_event.kprobe.missed = missed;
3814 	if (!kallsyms_show_value(current_cred()))
3815 		addr = 0;
3816 	info->perf_event.kprobe.addr = addr;
3817 	info->perf_event.kprobe.cookie = event->bpf_cookie;
3818 	return 0;
3819 }
3820 
bpf_perf_link_fdinfo_kprobe(const struct perf_event * event,struct seq_file * seq)3821 static void bpf_perf_link_fdinfo_kprobe(const struct perf_event *event,
3822 					struct seq_file *seq)
3823 {
3824 	const char *name;
3825 	int err;
3826 	u32 prog_id, type;
3827 	u64 offset, addr;
3828 	unsigned long missed;
3829 
3830 	err = bpf_get_perf_event_info(event, &prog_id, &type, &name,
3831 				      &offset, &addr, &missed);
3832 	if (err)
3833 		return;
3834 
3835 	seq_printf(seq,
3836 		   "name:\t%s\n"
3837 		   "offset:\t%#llx\n"
3838 		   "missed:\t%lu\n"
3839 		   "addr:\t%#llx\n"
3840 		   "event_type:\t%s\n"
3841 		   "cookie:\t%llu\n",
3842 		   name, offset, missed, addr,
3843 		   type == BPF_FD_TYPE_KRETPROBE ?  "kretprobe" : "kprobe",
3844 		   event->bpf_cookie);
3845 }
3846 #endif
3847 
3848 #ifdef CONFIG_UPROBE_EVENTS
bpf_perf_link_fill_uprobe(const struct perf_event * event,struct bpf_link_info * info)3849 static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3850 				     struct bpf_link_info *info)
3851 {
3852 	u64 ref_ctr_offset, offset;
3853 	char __user *uname;
3854 	u32 ulen, type;
3855 	int err;
3856 
3857 	uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3858 	ulen = info->perf_event.uprobe.name_len;
3859 	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &ref_ctr_offset,
3860 					&type, NULL);
3861 	if (err)
3862 		return err;
3863 
3864 	if (type == BPF_FD_TYPE_URETPROBE)
3865 		info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3866 	else
3867 		info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3868 	info->perf_event.uprobe.name_len = ulen;
3869 	info->perf_event.uprobe.offset = offset;
3870 	info->perf_event.uprobe.cookie = event->bpf_cookie;
3871 	info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset;
3872 	return 0;
3873 }
3874 
bpf_perf_link_fdinfo_uprobe(const struct perf_event * event,struct seq_file * seq)3875 static void bpf_perf_link_fdinfo_uprobe(const struct perf_event *event,
3876 					struct seq_file *seq)
3877 {
3878 	const char *name;
3879 	int err;
3880 	u32 prog_id, type;
3881 	u64 offset, ref_ctr_offset;
3882 	unsigned long missed;
3883 
3884 	err = bpf_get_perf_event_info(event, &prog_id, &type, &name,
3885 				      &offset, &ref_ctr_offset, &missed);
3886 	if (err)
3887 		return;
3888 
3889 	seq_printf(seq,
3890 		   "name:\t%s\n"
3891 		   "offset:\t%#llx\n"
3892 		   "ref_ctr_offset:\t%#llx\n"
3893 		   "event_type:\t%s\n"
3894 		   "cookie:\t%llu\n",
3895 		   name, offset, ref_ctr_offset,
3896 		   type == BPF_FD_TYPE_URETPROBE ?  "uretprobe" : "uprobe",
3897 		   event->bpf_cookie);
3898 }
3899 #endif
3900 
bpf_perf_link_fill_probe(const struct perf_event * event,struct bpf_link_info * info)3901 static int bpf_perf_link_fill_probe(const struct perf_event *event,
3902 				    struct bpf_link_info *info)
3903 {
3904 #ifdef CONFIG_KPROBE_EVENTS
3905 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3906 		return bpf_perf_link_fill_kprobe(event, info);
3907 #endif
3908 #ifdef CONFIG_UPROBE_EVENTS
3909 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3910 		return bpf_perf_link_fill_uprobe(event, info);
3911 #endif
3912 	return -EOPNOTSUPP;
3913 }
3914 
bpf_perf_link_fill_tracepoint(const struct perf_event * event,struct bpf_link_info * info)3915 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3916 					 struct bpf_link_info *info)
3917 {
3918 	char __user *uname;
3919 	u32 ulen;
3920 	int err;
3921 
3922 	uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3923 	ulen = info->perf_event.tracepoint.name_len;
3924 	err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
3925 	if (err)
3926 		return err;
3927 
3928 	info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3929 	info->perf_event.tracepoint.name_len = ulen;
3930 	info->perf_event.tracepoint.cookie = event->bpf_cookie;
3931 	return 0;
3932 }
3933 
bpf_perf_link_fill_perf_event(const struct perf_event * event,struct bpf_link_info * info)3934 static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3935 					 struct bpf_link_info *info)
3936 {
3937 	info->perf_event.event.type = event->attr.type;
3938 	info->perf_event.event.config = event->attr.config;
3939 	info->perf_event.event.cookie = event->bpf_cookie;
3940 	info->perf_event.type = BPF_PERF_EVENT_EVENT;
3941 	return 0;
3942 }
3943 
bpf_perf_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3944 static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3945 					struct bpf_link_info *info)
3946 {
3947 	struct bpf_perf_link *perf_link;
3948 	const struct perf_event *event;
3949 
3950 	perf_link = container_of(link, struct bpf_perf_link, link);
3951 	event = perf_get_event(perf_link->perf_file);
3952 	if (IS_ERR(event))
3953 		return PTR_ERR(event);
3954 
3955 	switch (event->prog->type) {
3956 	case BPF_PROG_TYPE_PERF_EVENT:
3957 		return bpf_perf_link_fill_perf_event(event, info);
3958 	case BPF_PROG_TYPE_TRACEPOINT:
3959 		return bpf_perf_link_fill_tracepoint(event, info);
3960 	case BPF_PROG_TYPE_KPROBE:
3961 		return bpf_perf_link_fill_probe(event, info);
3962 	default:
3963 		return -EOPNOTSUPP;
3964 	}
3965 }
3966 
bpf_perf_event_link_show_fdinfo(const struct perf_event * event,struct seq_file * seq)3967 static void bpf_perf_event_link_show_fdinfo(const struct perf_event *event,
3968 					    struct seq_file *seq)
3969 {
3970 	seq_printf(seq,
3971 		   "type:\t%u\n"
3972 		   "config:\t%llu\n"
3973 		   "event_type:\t%s\n"
3974 		   "cookie:\t%llu\n",
3975 		   event->attr.type, event->attr.config,
3976 		   "event", event->bpf_cookie);
3977 }
3978 
bpf_tracepoint_link_show_fdinfo(const struct perf_event * event,struct seq_file * seq)3979 static void bpf_tracepoint_link_show_fdinfo(const struct perf_event *event,
3980 					    struct seq_file *seq)
3981 {
3982 	int err;
3983 	const char *name;
3984 	u32 prog_id;
3985 
3986 	err = bpf_get_perf_event_info(event, &prog_id, NULL, &name, NULL,
3987 				      NULL, NULL);
3988 	if (err)
3989 		return;
3990 
3991 	seq_printf(seq,
3992 		   "tp_name:\t%s\n"
3993 		   "event_type:\t%s\n"
3994 		   "cookie:\t%llu\n",
3995 		   name, "tracepoint", event->bpf_cookie);
3996 }
3997 
bpf_probe_link_show_fdinfo(const struct perf_event * event,struct seq_file * seq)3998 static void bpf_probe_link_show_fdinfo(const struct perf_event *event,
3999 				       struct seq_file *seq)
4000 {
4001 #ifdef CONFIG_KPROBE_EVENTS
4002 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
4003 		return bpf_perf_link_fdinfo_kprobe(event, seq);
4004 #endif
4005 
4006 #ifdef CONFIG_UPROBE_EVENTS
4007 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
4008 		return bpf_perf_link_fdinfo_uprobe(event, seq);
4009 #endif
4010 }
4011 
bpf_perf_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)4012 static void bpf_perf_link_show_fdinfo(const struct bpf_link *link,
4013 				      struct seq_file *seq)
4014 {
4015 	struct bpf_perf_link *perf_link;
4016 	const struct perf_event *event;
4017 
4018 	perf_link = container_of(link, struct bpf_perf_link, link);
4019 	event = perf_get_event(perf_link->perf_file);
4020 	if (IS_ERR(event))
4021 		return;
4022 
4023 	switch (event->prog->type) {
4024 	case BPF_PROG_TYPE_PERF_EVENT:
4025 		return bpf_perf_event_link_show_fdinfo(event, seq);
4026 	case BPF_PROG_TYPE_TRACEPOINT:
4027 		return bpf_tracepoint_link_show_fdinfo(event, seq);
4028 	case BPF_PROG_TYPE_KPROBE:
4029 		return bpf_probe_link_show_fdinfo(event, seq);
4030 	default:
4031 		return;
4032 	}
4033 }
4034 
4035 static const struct bpf_link_ops bpf_perf_link_lops = {
4036 	.release = bpf_perf_link_release,
4037 	.dealloc = bpf_perf_link_dealloc,
4038 	.fill_link_info = bpf_perf_link_fill_link_info,
4039 	.show_fdinfo = bpf_perf_link_show_fdinfo,
4040 };
4041 
bpf_perf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)4042 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4043 {
4044 	struct bpf_link_primer link_primer;
4045 	struct bpf_perf_link *link;
4046 	struct perf_event *event;
4047 	struct file *perf_file;
4048 	int err;
4049 
4050 	if (attr->link_create.flags)
4051 		return -EINVAL;
4052 
4053 	perf_file = perf_event_get(attr->link_create.target_fd);
4054 	if (IS_ERR(perf_file))
4055 		return PTR_ERR(perf_file);
4056 
4057 	link = kzalloc(sizeof(*link), GFP_USER);
4058 	if (!link) {
4059 		err = -ENOMEM;
4060 		goto out_put_file;
4061 	}
4062 	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog,
4063 		      attr->link_create.attach_type);
4064 	link->perf_file = perf_file;
4065 
4066 	err = bpf_link_prime(&link->link, &link_primer);
4067 	if (err) {
4068 		kfree(link);
4069 		goto out_put_file;
4070 	}
4071 
4072 	event = perf_file->private_data;
4073 	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
4074 	if (err) {
4075 		bpf_link_cleanup(&link_primer);
4076 		goto out_put_file;
4077 	}
4078 	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
4079 	bpf_prog_inc(prog);
4080 
4081 	return bpf_link_settle(&link_primer);
4082 
4083 out_put_file:
4084 	fput(perf_file);
4085 	return err;
4086 }
4087 #else
bpf_perf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)4088 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4089 {
4090 	return -EOPNOTSUPP;
4091 }
4092 #endif /* CONFIG_PERF_EVENTS */
4093 
bpf_raw_tp_link_attach(struct bpf_prog * prog,const char __user * user_tp_name,u64 cookie,enum bpf_attach_type attach_type)4094 static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
4095 				  const char __user *user_tp_name, u64 cookie,
4096 				  enum bpf_attach_type attach_type)
4097 {
4098 	struct bpf_link_primer link_primer;
4099 	struct bpf_raw_tp_link *link;
4100 	struct bpf_raw_event_map *btp;
4101 	const char *tp_name;
4102 	char buf[128];
4103 	int err;
4104 
4105 	switch (prog->type) {
4106 	case BPF_PROG_TYPE_TRACING:
4107 	case BPF_PROG_TYPE_EXT:
4108 	case BPF_PROG_TYPE_LSM:
4109 		if (user_tp_name)
4110 			/* The attach point for this category of programs
4111 			 * should be specified via btf_id during program load.
4112 			 */
4113 			return -EINVAL;
4114 		if (prog->type == BPF_PROG_TYPE_TRACING &&
4115 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
4116 			tp_name = prog->aux->attach_func_name;
4117 			break;
4118 		}
4119 		return bpf_tracing_prog_attach(prog, 0, 0, 0, attach_type);
4120 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
4121 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
4122 		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
4123 			return -EFAULT;
4124 		buf[sizeof(buf) - 1] = 0;
4125 		tp_name = buf;
4126 		break;
4127 	default:
4128 		return -EINVAL;
4129 	}
4130 
4131 	btp = bpf_get_raw_tracepoint(tp_name);
4132 	if (!btp)
4133 		return -ENOENT;
4134 
4135 	link = kzalloc(sizeof(*link), GFP_USER);
4136 	if (!link) {
4137 		err = -ENOMEM;
4138 		goto out_put_btp;
4139 	}
4140 	bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
4141 				&bpf_raw_tp_link_lops, prog, attach_type,
4142 				tracepoint_is_faultable(btp->tp));
4143 	link->btp = btp;
4144 	link->cookie = cookie;
4145 
4146 	err = bpf_link_prime(&link->link, &link_primer);
4147 	if (err) {
4148 		kfree(link);
4149 		goto out_put_btp;
4150 	}
4151 
4152 	err = bpf_probe_register(link->btp, link);
4153 	if (err) {
4154 		bpf_link_cleanup(&link_primer);
4155 		goto out_put_btp;
4156 	}
4157 
4158 	return bpf_link_settle(&link_primer);
4159 
4160 out_put_btp:
4161 	bpf_put_raw_tracepoint(btp);
4162 	return err;
4163 }
4164 
4165 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
4166 
bpf_raw_tracepoint_open(const union bpf_attr * attr)4167 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
4168 {
4169 	struct bpf_prog *prog;
4170 	void __user *tp_name;
4171 	__u64 cookie;
4172 	int fd;
4173 
4174 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
4175 		return -EINVAL;
4176 
4177 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
4178 	if (IS_ERR(prog))
4179 		return PTR_ERR(prog);
4180 
4181 	tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
4182 	cookie = attr->raw_tracepoint.cookie;
4183 	fd = bpf_raw_tp_link_attach(prog, tp_name, cookie, prog->expected_attach_type);
4184 	if (fd < 0)
4185 		bpf_prog_put(prog);
4186 	return fd;
4187 }
4188 
4189 static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)4190 attach_type_to_prog_type(enum bpf_attach_type attach_type)
4191 {
4192 	switch (attach_type) {
4193 	case BPF_CGROUP_INET_INGRESS:
4194 	case BPF_CGROUP_INET_EGRESS:
4195 		return BPF_PROG_TYPE_CGROUP_SKB;
4196 	case BPF_CGROUP_INET_SOCK_CREATE:
4197 	case BPF_CGROUP_INET_SOCK_RELEASE:
4198 	case BPF_CGROUP_INET4_POST_BIND:
4199 	case BPF_CGROUP_INET6_POST_BIND:
4200 		return BPF_PROG_TYPE_CGROUP_SOCK;
4201 	case BPF_CGROUP_INET4_BIND:
4202 	case BPF_CGROUP_INET6_BIND:
4203 	case BPF_CGROUP_INET4_CONNECT:
4204 	case BPF_CGROUP_INET6_CONNECT:
4205 	case BPF_CGROUP_UNIX_CONNECT:
4206 	case BPF_CGROUP_INET4_GETPEERNAME:
4207 	case BPF_CGROUP_INET6_GETPEERNAME:
4208 	case BPF_CGROUP_UNIX_GETPEERNAME:
4209 	case BPF_CGROUP_INET4_GETSOCKNAME:
4210 	case BPF_CGROUP_INET6_GETSOCKNAME:
4211 	case BPF_CGROUP_UNIX_GETSOCKNAME:
4212 	case BPF_CGROUP_UDP4_SENDMSG:
4213 	case BPF_CGROUP_UDP6_SENDMSG:
4214 	case BPF_CGROUP_UNIX_SENDMSG:
4215 	case BPF_CGROUP_UDP4_RECVMSG:
4216 	case BPF_CGROUP_UDP6_RECVMSG:
4217 	case BPF_CGROUP_UNIX_RECVMSG:
4218 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
4219 	case BPF_CGROUP_SOCK_OPS:
4220 		return BPF_PROG_TYPE_SOCK_OPS;
4221 	case BPF_CGROUP_DEVICE:
4222 		return BPF_PROG_TYPE_CGROUP_DEVICE;
4223 	case BPF_SK_MSG_VERDICT:
4224 		return BPF_PROG_TYPE_SK_MSG;
4225 	case BPF_SK_SKB_STREAM_PARSER:
4226 	case BPF_SK_SKB_STREAM_VERDICT:
4227 	case BPF_SK_SKB_VERDICT:
4228 		return BPF_PROG_TYPE_SK_SKB;
4229 	case BPF_LIRC_MODE2:
4230 		return BPF_PROG_TYPE_LIRC_MODE2;
4231 	case BPF_FLOW_DISSECTOR:
4232 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
4233 	case BPF_CGROUP_SYSCTL:
4234 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
4235 	case BPF_CGROUP_GETSOCKOPT:
4236 	case BPF_CGROUP_SETSOCKOPT:
4237 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
4238 	case BPF_TRACE_ITER:
4239 	case BPF_TRACE_RAW_TP:
4240 	case BPF_TRACE_FENTRY:
4241 	case BPF_TRACE_FEXIT:
4242 	case BPF_MODIFY_RETURN:
4243 		return BPF_PROG_TYPE_TRACING;
4244 	case BPF_LSM_MAC:
4245 		return BPF_PROG_TYPE_LSM;
4246 	case BPF_SK_LOOKUP:
4247 		return BPF_PROG_TYPE_SK_LOOKUP;
4248 	case BPF_XDP:
4249 		return BPF_PROG_TYPE_XDP;
4250 	case BPF_LSM_CGROUP:
4251 		return BPF_PROG_TYPE_LSM;
4252 	case BPF_TCX_INGRESS:
4253 	case BPF_TCX_EGRESS:
4254 	case BPF_NETKIT_PRIMARY:
4255 	case BPF_NETKIT_PEER:
4256 		return BPF_PROG_TYPE_SCHED_CLS;
4257 	default:
4258 		return BPF_PROG_TYPE_UNSPEC;
4259 	}
4260 }
4261 
bpf_prog_attach_check_attach_type(const struct bpf_prog * prog,enum bpf_attach_type attach_type)4262 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
4263 					     enum bpf_attach_type attach_type)
4264 {
4265 	enum bpf_prog_type ptype;
4266 
4267 	switch (prog->type) {
4268 	case BPF_PROG_TYPE_CGROUP_SOCK:
4269 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4270 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4271 	case BPF_PROG_TYPE_SK_LOOKUP:
4272 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
4273 	case BPF_PROG_TYPE_CGROUP_SKB:
4274 		if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
4275 			/* cg-skb progs can be loaded by unpriv user.
4276 			 * check permissions at attach time.
4277 			 */
4278 			return -EPERM;
4279 
4280 		ptype = attach_type_to_prog_type(attach_type);
4281 		if (prog->type != ptype)
4282 			return -EINVAL;
4283 
4284 		return prog->enforce_expected_attach_type &&
4285 			prog->expected_attach_type != attach_type ?
4286 			-EINVAL : 0;
4287 	case BPF_PROG_TYPE_EXT:
4288 		return 0;
4289 	case BPF_PROG_TYPE_NETFILTER:
4290 		if (attach_type != BPF_NETFILTER)
4291 			return -EINVAL;
4292 		return 0;
4293 	case BPF_PROG_TYPE_PERF_EVENT:
4294 	case BPF_PROG_TYPE_TRACEPOINT:
4295 		if (attach_type != BPF_PERF_EVENT)
4296 			return -EINVAL;
4297 		return 0;
4298 	case BPF_PROG_TYPE_KPROBE:
4299 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4300 		    attach_type != BPF_TRACE_KPROBE_MULTI)
4301 			return -EINVAL;
4302 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
4303 		    attach_type != BPF_TRACE_KPROBE_SESSION)
4304 			return -EINVAL;
4305 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4306 		    attach_type != BPF_TRACE_UPROBE_MULTI)
4307 			return -EINVAL;
4308 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
4309 		    attach_type != BPF_TRACE_UPROBE_SESSION)
4310 			return -EINVAL;
4311 		if (attach_type != BPF_PERF_EVENT &&
4312 		    attach_type != BPF_TRACE_KPROBE_MULTI &&
4313 		    attach_type != BPF_TRACE_KPROBE_SESSION &&
4314 		    attach_type != BPF_TRACE_UPROBE_MULTI &&
4315 		    attach_type != BPF_TRACE_UPROBE_SESSION)
4316 			return -EINVAL;
4317 		return 0;
4318 	case BPF_PROG_TYPE_SCHED_CLS:
4319 		if (attach_type != BPF_TCX_INGRESS &&
4320 		    attach_type != BPF_TCX_EGRESS &&
4321 		    attach_type != BPF_NETKIT_PRIMARY &&
4322 		    attach_type != BPF_NETKIT_PEER)
4323 			return -EINVAL;
4324 		return 0;
4325 	default:
4326 		ptype = attach_type_to_prog_type(attach_type);
4327 		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4328 			return -EINVAL;
4329 		return 0;
4330 	}
4331 }
4332 
is_cgroup_prog_type(enum bpf_prog_type ptype,enum bpf_attach_type atype,bool check_atype)4333 static bool is_cgroup_prog_type(enum bpf_prog_type ptype, enum bpf_attach_type atype,
4334 				bool check_atype)
4335 {
4336 	switch (ptype) {
4337 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4338 	case BPF_PROG_TYPE_CGROUP_SKB:
4339 	case BPF_PROG_TYPE_CGROUP_SOCK:
4340 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4341 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4342 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4343 	case BPF_PROG_TYPE_SOCK_OPS:
4344 		return true;
4345 	case BPF_PROG_TYPE_LSM:
4346 		return check_atype ? atype == BPF_LSM_CGROUP : true;
4347 	default:
4348 		return false;
4349 	}
4350 }
4351 
4352 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4353 
4354 #define BPF_F_ATTACH_MASK_BASE	\
4355 	(BPF_F_ALLOW_OVERRIDE |	\
4356 	 BPF_F_ALLOW_MULTI |	\
4357 	 BPF_F_REPLACE |	\
4358 	 BPF_F_PREORDER)
4359 
4360 #define BPF_F_ATTACH_MASK_MPROG	\
4361 	(BPF_F_REPLACE |	\
4362 	 BPF_F_BEFORE |		\
4363 	 BPF_F_AFTER |		\
4364 	 BPF_F_ID |		\
4365 	 BPF_F_LINK)
4366 
bpf_prog_attach(const union bpf_attr * attr)4367 static int bpf_prog_attach(const union bpf_attr *attr)
4368 {
4369 	enum bpf_prog_type ptype;
4370 	struct bpf_prog *prog;
4371 	int ret;
4372 
4373 	if (CHECK_ATTR(BPF_PROG_ATTACH))
4374 		return -EINVAL;
4375 
4376 	ptype = attach_type_to_prog_type(attr->attach_type);
4377 	if (ptype == BPF_PROG_TYPE_UNSPEC)
4378 		return -EINVAL;
4379 	if (bpf_mprog_supported(ptype)) {
4380 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4381 			return -EINVAL;
4382 	} else if (is_cgroup_prog_type(ptype, 0, false)) {
4383 		if (attr->attach_flags & ~(BPF_F_ATTACH_MASK_BASE | BPF_F_ATTACH_MASK_MPROG))
4384 			return -EINVAL;
4385 	} else {
4386 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4387 			return -EINVAL;
4388 		if (attr->relative_fd ||
4389 		    attr->expected_revision)
4390 			return -EINVAL;
4391 	}
4392 
4393 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4394 	if (IS_ERR(prog))
4395 		return PTR_ERR(prog);
4396 
4397 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4398 		bpf_prog_put(prog);
4399 		return -EINVAL;
4400 	}
4401 
4402 	if (is_cgroup_prog_type(ptype, prog->expected_attach_type, true)) {
4403 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4404 		goto out;
4405 	}
4406 
4407 	switch (ptype) {
4408 	case BPF_PROG_TYPE_SK_SKB:
4409 	case BPF_PROG_TYPE_SK_MSG:
4410 		ret = sock_map_get_from_fd(attr, prog);
4411 		break;
4412 	case BPF_PROG_TYPE_LIRC_MODE2:
4413 		ret = lirc_prog_attach(attr, prog);
4414 		break;
4415 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4416 		ret = netns_bpf_prog_attach(attr, prog);
4417 		break;
4418 	case BPF_PROG_TYPE_SCHED_CLS:
4419 		if (attr->attach_type == BPF_TCX_INGRESS ||
4420 		    attr->attach_type == BPF_TCX_EGRESS)
4421 			ret = tcx_prog_attach(attr, prog);
4422 		else
4423 			ret = netkit_prog_attach(attr, prog);
4424 		break;
4425 	default:
4426 		ret = -EINVAL;
4427 	}
4428 out:
4429 	if (ret)
4430 		bpf_prog_put(prog);
4431 	return ret;
4432 }
4433 
4434 #define BPF_PROG_DETACH_LAST_FIELD expected_revision
4435 
bpf_prog_detach(const union bpf_attr * attr)4436 static int bpf_prog_detach(const union bpf_attr *attr)
4437 {
4438 	struct bpf_prog *prog = NULL;
4439 	enum bpf_prog_type ptype;
4440 	int ret;
4441 
4442 	if (CHECK_ATTR(BPF_PROG_DETACH))
4443 		return -EINVAL;
4444 
4445 	ptype = attach_type_to_prog_type(attr->attach_type);
4446 	if (bpf_mprog_supported(ptype)) {
4447 		if (ptype == BPF_PROG_TYPE_UNSPEC)
4448 			return -EINVAL;
4449 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4450 			return -EINVAL;
4451 		if (attr->attach_bpf_fd) {
4452 			prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4453 			if (IS_ERR(prog))
4454 				return PTR_ERR(prog);
4455 		}
4456 	} else if (is_cgroup_prog_type(ptype, 0, false)) {
4457 		if (attr->attach_flags || attr->relative_fd)
4458 			return -EINVAL;
4459 	} else if (attr->attach_flags ||
4460 		   attr->relative_fd ||
4461 		   attr->expected_revision) {
4462 		return -EINVAL;
4463 	}
4464 
4465 	switch (ptype) {
4466 	case BPF_PROG_TYPE_SK_MSG:
4467 	case BPF_PROG_TYPE_SK_SKB:
4468 		ret = sock_map_prog_detach(attr, ptype);
4469 		break;
4470 	case BPF_PROG_TYPE_LIRC_MODE2:
4471 		ret = lirc_prog_detach(attr);
4472 		break;
4473 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4474 		ret = netns_bpf_prog_detach(attr, ptype);
4475 		break;
4476 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4477 	case BPF_PROG_TYPE_CGROUP_SKB:
4478 	case BPF_PROG_TYPE_CGROUP_SOCK:
4479 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4480 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4481 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4482 	case BPF_PROG_TYPE_SOCK_OPS:
4483 	case BPF_PROG_TYPE_LSM:
4484 		ret = cgroup_bpf_prog_detach(attr, ptype);
4485 		break;
4486 	case BPF_PROG_TYPE_SCHED_CLS:
4487 		if (attr->attach_type == BPF_TCX_INGRESS ||
4488 		    attr->attach_type == BPF_TCX_EGRESS)
4489 			ret = tcx_prog_detach(attr, prog);
4490 		else
4491 			ret = netkit_prog_detach(attr, prog);
4492 		break;
4493 	default:
4494 		ret = -EINVAL;
4495 	}
4496 
4497 	if (prog)
4498 		bpf_prog_put(prog);
4499 	return ret;
4500 }
4501 
4502 #define BPF_PROG_QUERY_LAST_FIELD query.revision
4503 
bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)4504 static int bpf_prog_query(const union bpf_attr *attr,
4505 			  union bpf_attr __user *uattr)
4506 {
4507 	if (!bpf_net_capable())
4508 		return -EPERM;
4509 	if (CHECK_ATTR(BPF_PROG_QUERY))
4510 		return -EINVAL;
4511 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4512 		return -EINVAL;
4513 
4514 	switch (attr->query.attach_type) {
4515 	case BPF_CGROUP_INET_INGRESS:
4516 	case BPF_CGROUP_INET_EGRESS:
4517 	case BPF_CGROUP_INET_SOCK_CREATE:
4518 	case BPF_CGROUP_INET_SOCK_RELEASE:
4519 	case BPF_CGROUP_INET4_BIND:
4520 	case BPF_CGROUP_INET6_BIND:
4521 	case BPF_CGROUP_INET4_POST_BIND:
4522 	case BPF_CGROUP_INET6_POST_BIND:
4523 	case BPF_CGROUP_INET4_CONNECT:
4524 	case BPF_CGROUP_INET6_CONNECT:
4525 	case BPF_CGROUP_UNIX_CONNECT:
4526 	case BPF_CGROUP_INET4_GETPEERNAME:
4527 	case BPF_CGROUP_INET6_GETPEERNAME:
4528 	case BPF_CGROUP_UNIX_GETPEERNAME:
4529 	case BPF_CGROUP_INET4_GETSOCKNAME:
4530 	case BPF_CGROUP_INET6_GETSOCKNAME:
4531 	case BPF_CGROUP_UNIX_GETSOCKNAME:
4532 	case BPF_CGROUP_UDP4_SENDMSG:
4533 	case BPF_CGROUP_UDP6_SENDMSG:
4534 	case BPF_CGROUP_UNIX_SENDMSG:
4535 	case BPF_CGROUP_UDP4_RECVMSG:
4536 	case BPF_CGROUP_UDP6_RECVMSG:
4537 	case BPF_CGROUP_UNIX_RECVMSG:
4538 	case BPF_CGROUP_SOCK_OPS:
4539 	case BPF_CGROUP_DEVICE:
4540 	case BPF_CGROUP_SYSCTL:
4541 	case BPF_CGROUP_GETSOCKOPT:
4542 	case BPF_CGROUP_SETSOCKOPT:
4543 	case BPF_LSM_CGROUP:
4544 		return cgroup_bpf_prog_query(attr, uattr);
4545 	case BPF_LIRC_MODE2:
4546 		return lirc_prog_query(attr, uattr);
4547 	case BPF_FLOW_DISSECTOR:
4548 	case BPF_SK_LOOKUP:
4549 		return netns_bpf_prog_query(attr, uattr);
4550 	case BPF_SK_SKB_STREAM_PARSER:
4551 	case BPF_SK_SKB_STREAM_VERDICT:
4552 	case BPF_SK_MSG_VERDICT:
4553 	case BPF_SK_SKB_VERDICT:
4554 		return sock_map_bpf_prog_query(attr, uattr);
4555 	case BPF_TCX_INGRESS:
4556 	case BPF_TCX_EGRESS:
4557 		return tcx_prog_query(attr, uattr);
4558 	case BPF_NETKIT_PRIMARY:
4559 	case BPF_NETKIT_PEER:
4560 		return netkit_prog_query(attr, uattr);
4561 	default:
4562 		return -EINVAL;
4563 	}
4564 }
4565 
4566 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4567 
bpf_prog_test_run(const union bpf_attr * attr,union bpf_attr __user * uattr)4568 static int bpf_prog_test_run(const union bpf_attr *attr,
4569 			     union bpf_attr __user *uattr)
4570 {
4571 	struct bpf_prog *prog;
4572 	int ret = -ENOTSUPP;
4573 
4574 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4575 		return -EINVAL;
4576 
4577 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4578 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
4579 		return -EINVAL;
4580 
4581 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4582 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
4583 		return -EINVAL;
4584 
4585 	prog = bpf_prog_get(attr->test.prog_fd);
4586 	if (IS_ERR(prog))
4587 		return PTR_ERR(prog);
4588 
4589 	if (prog->aux->ops->test_run)
4590 		ret = prog->aux->ops->test_run(prog, attr, uattr);
4591 
4592 	bpf_prog_put(prog);
4593 	return ret;
4594 }
4595 
4596 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4597 
bpf_obj_get_next_id(const union bpf_attr * attr,union bpf_attr __user * uattr,struct idr * idr,spinlock_t * lock)4598 static int bpf_obj_get_next_id(const union bpf_attr *attr,
4599 			       union bpf_attr __user *uattr,
4600 			       struct idr *idr,
4601 			       spinlock_t *lock)
4602 {
4603 	u32 next_id = attr->start_id;
4604 	int err = 0;
4605 
4606 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4607 		return -EINVAL;
4608 
4609 	if (!capable(CAP_SYS_ADMIN))
4610 		return -EPERM;
4611 
4612 	next_id++;
4613 	spin_lock_bh(lock);
4614 	if (!idr_get_next(idr, &next_id))
4615 		err = -ENOENT;
4616 	spin_unlock_bh(lock);
4617 
4618 	if (!err)
4619 		err = put_user(next_id, &uattr->next_id);
4620 
4621 	return err;
4622 }
4623 
bpf_map_get_curr_or_next(u32 * id)4624 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4625 {
4626 	struct bpf_map *map;
4627 
4628 	spin_lock_bh(&map_idr_lock);
4629 again:
4630 	map = idr_get_next(&map_idr, id);
4631 	if (map) {
4632 		map = __bpf_map_inc_not_zero(map, false);
4633 		if (IS_ERR(map)) {
4634 			(*id)++;
4635 			goto again;
4636 		}
4637 	}
4638 	spin_unlock_bh(&map_idr_lock);
4639 
4640 	return map;
4641 }
4642 
bpf_prog_get_curr_or_next(u32 * id)4643 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4644 {
4645 	struct bpf_prog *prog;
4646 
4647 	spin_lock_bh(&prog_idr_lock);
4648 again:
4649 	prog = idr_get_next(&prog_idr, id);
4650 	if (prog) {
4651 		prog = bpf_prog_inc_not_zero(prog);
4652 		if (IS_ERR(prog)) {
4653 			(*id)++;
4654 			goto again;
4655 		}
4656 	}
4657 	spin_unlock_bh(&prog_idr_lock);
4658 
4659 	return prog;
4660 }
4661 
4662 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4663 
bpf_prog_by_id(u32 id)4664 struct bpf_prog *bpf_prog_by_id(u32 id)
4665 {
4666 	struct bpf_prog *prog;
4667 
4668 	if (!id)
4669 		return ERR_PTR(-ENOENT);
4670 
4671 	spin_lock_bh(&prog_idr_lock);
4672 	prog = idr_find(&prog_idr, id);
4673 	if (prog)
4674 		prog = bpf_prog_inc_not_zero(prog);
4675 	else
4676 		prog = ERR_PTR(-ENOENT);
4677 	spin_unlock_bh(&prog_idr_lock);
4678 	return prog;
4679 }
4680 
bpf_prog_get_fd_by_id(const union bpf_attr * attr)4681 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4682 {
4683 	struct bpf_prog *prog;
4684 	u32 id = attr->prog_id;
4685 	int fd;
4686 
4687 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4688 		return -EINVAL;
4689 
4690 	if (!capable(CAP_SYS_ADMIN))
4691 		return -EPERM;
4692 
4693 	prog = bpf_prog_by_id(id);
4694 	if (IS_ERR(prog))
4695 		return PTR_ERR(prog);
4696 
4697 	fd = bpf_prog_new_fd(prog);
4698 	if (fd < 0)
4699 		bpf_prog_put(prog);
4700 
4701 	return fd;
4702 }
4703 
4704 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4705 
bpf_map_get_fd_by_id(const union bpf_attr * attr)4706 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4707 {
4708 	struct bpf_map *map;
4709 	u32 id = attr->map_id;
4710 	int f_flags;
4711 	int fd;
4712 
4713 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4714 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4715 		return -EINVAL;
4716 
4717 	if (!capable(CAP_SYS_ADMIN))
4718 		return -EPERM;
4719 
4720 	f_flags = bpf_get_file_flag(attr->open_flags);
4721 	if (f_flags < 0)
4722 		return f_flags;
4723 
4724 	spin_lock_bh(&map_idr_lock);
4725 	map = idr_find(&map_idr, id);
4726 	if (map)
4727 		map = __bpf_map_inc_not_zero(map, true);
4728 	else
4729 		map = ERR_PTR(-ENOENT);
4730 	spin_unlock_bh(&map_idr_lock);
4731 
4732 	if (IS_ERR(map))
4733 		return PTR_ERR(map);
4734 
4735 	fd = bpf_map_new_fd(map, f_flags);
4736 	if (fd < 0)
4737 		bpf_map_put_with_uref(map);
4738 
4739 	return fd;
4740 }
4741 
bpf_map_from_imm(const struct bpf_prog * prog,unsigned long addr,u32 * off,u32 * type)4742 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4743 					      unsigned long addr, u32 *off,
4744 					      u32 *type)
4745 {
4746 	const struct bpf_map *map;
4747 	int i;
4748 
4749 	mutex_lock(&prog->aux->used_maps_mutex);
4750 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4751 		map = prog->aux->used_maps[i];
4752 		if (map == (void *)addr) {
4753 			*type = BPF_PSEUDO_MAP_FD;
4754 			goto out;
4755 		}
4756 		if (!map->ops->map_direct_value_meta)
4757 			continue;
4758 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
4759 			*type = BPF_PSEUDO_MAP_VALUE;
4760 			goto out;
4761 		}
4762 	}
4763 	map = NULL;
4764 
4765 out:
4766 	mutex_unlock(&prog->aux->used_maps_mutex);
4767 	return map;
4768 }
4769 
bpf_insn_prepare_dump(const struct bpf_prog * prog,const struct cred * f_cred)4770 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4771 					      const struct cred *f_cred)
4772 {
4773 	const struct bpf_map *map;
4774 	struct bpf_insn *insns;
4775 	u32 off, type;
4776 	u64 imm;
4777 	u8 code;
4778 	int i;
4779 
4780 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4781 			GFP_USER);
4782 	if (!insns)
4783 		return insns;
4784 
4785 	for (i = 0; i < prog->len; i++) {
4786 		code = insns[i].code;
4787 
4788 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4789 			insns[i].code = BPF_JMP | BPF_CALL;
4790 			insns[i].imm = BPF_FUNC_tail_call;
4791 			/* fall-through */
4792 		}
4793 		if (code == (BPF_JMP | BPF_CALL) ||
4794 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
4795 			if (code == (BPF_JMP | BPF_CALL_ARGS))
4796 				insns[i].code = BPF_JMP | BPF_CALL;
4797 			if (!bpf_dump_raw_ok(f_cred))
4798 				insns[i].imm = 0;
4799 			continue;
4800 		}
4801 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4802 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4803 			continue;
4804 		}
4805 
4806 		if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4807 		     BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4808 			insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4809 			continue;
4810 		}
4811 
4812 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
4813 			continue;
4814 
4815 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4816 		map = bpf_map_from_imm(prog, imm, &off, &type);
4817 		if (map) {
4818 			insns[i].src_reg = type;
4819 			insns[i].imm = map->id;
4820 			insns[i + 1].imm = off;
4821 			continue;
4822 		}
4823 	}
4824 
4825 	return insns;
4826 }
4827 
set_info_rec_size(struct bpf_prog_info * info)4828 static int set_info_rec_size(struct bpf_prog_info *info)
4829 {
4830 	/*
4831 	 * Ensure info.*_rec_size is the same as kernel expected size
4832 	 *
4833 	 * or
4834 	 *
4835 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
4836 	 * zero.  In this case, the kernel will set the expected
4837 	 * _rec_size back to the info.
4838 	 */
4839 
4840 	if ((info->nr_func_info || info->func_info_rec_size) &&
4841 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
4842 		return -EINVAL;
4843 
4844 	if ((info->nr_line_info || info->line_info_rec_size) &&
4845 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
4846 		return -EINVAL;
4847 
4848 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4849 	    info->jited_line_info_rec_size != sizeof(__u64))
4850 		return -EINVAL;
4851 
4852 	info->func_info_rec_size = sizeof(struct bpf_func_info);
4853 	info->line_info_rec_size = sizeof(struct bpf_line_info);
4854 	info->jited_line_info_rec_size = sizeof(__u64);
4855 
4856 	return 0;
4857 }
4858 
bpf_prog_get_info_by_fd(struct file * file,struct bpf_prog * prog,const union bpf_attr * attr,union bpf_attr __user * uattr)4859 static int bpf_prog_get_info_by_fd(struct file *file,
4860 				   struct bpf_prog *prog,
4861 				   const union bpf_attr *attr,
4862 				   union bpf_attr __user *uattr)
4863 {
4864 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4865 	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4866 	struct bpf_prog_info info;
4867 	u32 info_len = attr->info.info_len;
4868 	struct bpf_prog_kstats stats;
4869 	char __user *uinsns;
4870 	u32 ulen;
4871 	int err;
4872 
4873 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4874 	if (err)
4875 		return err;
4876 	info_len = min_t(u32, sizeof(info), info_len);
4877 
4878 	memset(&info, 0, sizeof(info));
4879 	if (copy_from_user(&info, uinfo, info_len))
4880 		return -EFAULT;
4881 
4882 	info.type = prog->type;
4883 	info.id = prog->aux->id;
4884 	info.load_time = prog->aux->load_time;
4885 	info.created_by_uid = from_kuid_munged(current_user_ns(),
4886 					       prog->aux->user->uid);
4887 	info.gpl_compatible = prog->gpl_compatible;
4888 
4889 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
4890 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4891 
4892 	mutex_lock(&prog->aux->used_maps_mutex);
4893 	ulen = info.nr_map_ids;
4894 	info.nr_map_ids = prog->aux->used_map_cnt;
4895 	ulen = min_t(u32, info.nr_map_ids, ulen);
4896 	if (ulen) {
4897 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4898 		u32 i;
4899 
4900 		for (i = 0; i < ulen; i++)
4901 			if (put_user(prog->aux->used_maps[i]->id,
4902 				     &user_map_ids[i])) {
4903 				mutex_unlock(&prog->aux->used_maps_mutex);
4904 				return -EFAULT;
4905 			}
4906 	}
4907 	mutex_unlock(&prog->aux->used_maps_mutex);
4908 
4909 	err = set_info_rec_size(&info);
4910 	if (err)
4911 		return err;
4912 
4913 	bpf_prog_get_stats(prog, &stats);
4914 	info.run_time_ns = stats.nsecs;
4915 	info.run_cnt = stats.cnt;
4916 	info.recursion_misses = stats.misses;
4917 
4918 	info.verified_insns = prog->aux->verified_insns;
4919 	if (prog->aux->btf)
4920 		info.btf_id = btf_obj_id(prog->aux->btf);
4921 
4922 	if (!bpf_capable()) {
4923 		info.jited_prog_len = 0;
4924 		info.xlated_prog_len = 0;
4925 		info.nr_jited_ksyms = 0;
4926 		info.nr_jited_func_lens = 0;
4927 		info.nr_func_info = 0;
4928 		info.nr_line_info = 0;
4929 		info.nr_jited_line_info = 0;
4930 		goto done;
4931 	}
4932 
4933 	ulen = info.xlated_prog_len;
4934 	info.xlated_prog_len = bpf_prog_insn_size(prog);
4935 	if (info.xlated_prog_len && ulen) {
4936 		struct bpf_insn *insns_sanitized;
4937 		bool fault;
4938 
4939 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4940 			info.xlated_prog_insns = 0;
4941 			goto done;
4942 		}
4943 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4944 		if (!insns_sanitized)
4945 			return -ENOMEM;
4946 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4947 		ulen = min_t(u32, info.xlated_prog_len, ulen);
4948 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
4949 		kfree(insns_sanitized);
4950 		if (fault)
4951 			return -EFAULT;
4952 	}
4953 
4954 	if (bpf_prog_is_offloaded(prog->aux)) {
4955 		err = bpf_prog_offload_info_fill(&info, prog);
4956 		if (err)
4957 			return err;
4958 		goto done;
4959 	}
4960 
4961 	/* NOTE: the following code is supposed to be skipped for offload.
4962 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
4963 	 * for offload.
4964 	 */
4965 	ulen = info.jited_prog_len;
4966 	if (prog->aux->func_cnt) {
4967 		u32 i;
4968 
4969 		info.jited_prog_len = 0;
4970 		for (i = 0; i < prog->aux->func_cnt; i++)
4971 			info.jited_prog_len += prog->aux->func[i]->jited_len;
4972 	} else {
4973 		info.jited_prog_len = prog->jited_len;
4974 	}
4975 
4976 	if (info.jited_prog_len && ulen) {
4977 		if (bpf_dump_raw_ok(file->f_cred)) {
4978 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4979 			ulen = min_t(u32, info.jited_prog_len, ulen);
4980 
4981 			/* for multi-function programs, copy the JITed
4982 			 * instructions for all the functions
4983 			 */
4984 			if (prog->aux->func_cnt) {
4985 				u32 len, free, i;
4986 				u8 *img;
4987 
4988 				free = ulen;
4989 				for (i = 0; i < prog->aux->func_cnt; i++) {
4990 					len = prog->aux->func[i]->jited_len;
4991 					len = min_t(u32, len, free);
4992 					img = (u8 *) prog->aux->func[i]->bpf_func;
4993 					if (copy_to_user(uinsns, img, len))
4994 						return -EFAULT;
4995 					uinsns += len;
4996 					free -= len;
4997 					if (!free)
4998 						break;
4999 				}
5000 			} else {
5001 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
5002 					return -EFAULT;
5003 			}
5004 		} else {
5005 			info.jited_prog_insns = 0;
5006 		}
5007 	}
5008 
5009 	ulen = info.nr_jited_ksyms;
5010 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
5011 	if (ulen) {
5012 		if (bpf_dump_raw_ok(file->f_cred)) {
5013 			unsigned long ksym_addr;
5014 			u64 __user *user_ksyms;
5015 			u32 i;
5016 
5017 			/* copy the address of the kernel symbol
5018 			 * corresponding to each function
5019 			 */
5020 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
5021 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
5022 			if (prog->aux->func_cnt) {
5023 				for (i = 0; i < ulen; i++) {
5024 					ksym_addr = (unsigned long)
5025 						prog->aux->func[i]->bpf_func;
5026 					if (put_user((u64) ksym_addr,
5027 						     &user_ksyms[i]))
5028 						return -EFAULT;
5029 				}
5030 			} else {
5031 				ksym_addr = (unsigned long) prog->bpf_func;
5032 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
5033 					return -EFAULT;
5034 			}
5035 		} else {
5036 			info.jited_ksyms = 0;
5037 		}
5038 	}
5039 
5040 	ulen = info.nr_jited_func_lens;
5041 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
5042 	if (ulen) {
5043 		if (bpf_dump_raw_ok(file->f_cred)) {
5044 			u32 __user *user_lens;
5045 			u32 func_len, i;
5046 
5047 			/* copy the JITed image lengths for each function */
5048 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
5049 			user_lens = u64_to_user_ptr(info.jited_func_lens);
5050 			if (prog->aux->func_cnt) {
5051 				for (i = 0; i < ulen; i++) {
5052 					func_len =
5053 						prog->aux->func[i]->jited_len;
5054 					if (put_user(func_len, &user_lens[i]))
5055 						return -EFAULT;
5056 				}
5057 			} else {
5058 				func_len = prog->jited_len;
5059 				if (put_user(func_len, &user_lens[0]))
5060 					return -EFAULT;
5061 			}
5062 		} else {
5063 			info.jited_func_lens = 0;
5064 		}
5065 	}
5066 
5067 	info.attach_btf_id = prog->aux->attach_btf_id;
5068 	if (attach_btf)
5069 		info.attach_btf_obj_id = btf_obj_id(attach_btf);
5070 
5071 	ulen = info.nr_func_info;
5072 	info.nr_func_info = prog->aux->func_info_cnt;
5073 	if (info.nr_func_info && ulen) {
5074 		char __user *user_finfo;
5075 
5076 		user_finfo = u64_to_user_ptr(info.func_info);
5077 		ulen = min_t(u32, info.nr_func_info, ulen);
5078 		if (copy_to_user(user_finfo, prog->aux->func_info,
5079 				 info.func_info_rec_size * ulen))
5080 			return -EFAULT;
5081 	}
5082 
5083 	ulen = info.nr_line_info;
5084 	info.nr_line_info = prog->aux->nr_linfo;
5085 	if (info.nr_line_info && ulen) {
5086 		__u8 __user *user_linfo;
5087 
5088 		user_linfo = u64_to_user_ptr(info.line_info);
5089 		ulen = min_t(u32, info.nr_line_info, ulen);
5090 		if (copy_to_user(user_linfo, prog->aux->linfo,
5091 				 info.line_info_rec_size * ulen))
5092 			return -EFAULT;
5093 	}
5094 
5095 	ulen = info.nr_jited_line_info;
5096 	if (prog->aux->jited_linfo)
5097 		info.nr_jited_line_info = prog->aux->nr_linfo;
5098 	else
5099 		info.nr_jited_line_info = 0;
5100 	if (info.nr_jited_line_info && ulen) {
5101 		if (bpf_dump_raw_ok(file->f_cred)) {
5102 			unsigned long line_addr;
5103 			__u64 __user *user_linfo;
5104 			u32 i;
5105 
5106 			user_linfo = u64_to_user_ptr(info.jited_line_info);
5107 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
5108 			for (i = 0; i < ulen; i++) {
5109 				line_addr = (unsigned long)prog->aux->jited_linfo[i];
5110 				if (put_user((__u64)line_addr, &user_linfo[i]))
5111 					return -EFAULT;
5112 			}
5113 		} else {
5114 			info.jited_line_info = 0;
5115 		}
5116 	}
5117 
5118 	ulen = info.nr_prog_tags;
5119 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
5120 	if (ulen) {
5121 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
5122 		u32 i;
5123 
5124 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
5125 		ulen = min_t(u32, info.nr_prog_tags, ulen);
5126 		if (prog->aux->func_cnt) {
5127 			for (i = 0; i < ulen; i++) {
5128 				if (copy_to_user(user_prog_tags[i],
5129 						 prog->aux->func[i]->tag,
5130 						 BPF_TAG_SIZE))
5131 					return -EFAULT;
5132 			}
5133 		} else {
5134 			if (copy_to_user(user_prog_tags[0],
5135 					 prog->tag, BPF_TAG_SIZE))
5136 				return -EFAULT;
5137 		}
5138 	}
5139 
5140 done:
5141 	if (copy_to_user(uinfo, &info, info_len) ||
5142 	    put_user(info_len, &uattr->info.info_len))
5143 		return -EFAULT;
5144 
5145 	return 0;
5146 }
5147 
bpf_map_get_info_by_fd(struct file * file,struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)5148 static int bpf_map_get_info_by_fd(struct file *file,
5149 				  struct bpf_map *map,
5150 				  const union bpf_attr *attr,
5151 				  union bpf_attr __user *uattr)
5152 {
5153 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5154 	struct bpf_map_info info;
5155 	u32 info_len = attr->info.info_len;
5156 	int err;
5157 
5158 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5159 	if (err)
5160 		return err;
5161 	info_len = min_t(u32, sizeof(info), info_len);
5162 
5163 	memset(&info, 0, sizeof(info));
5164 	info.type = map->map_type;
5165 	info.id = map->id;
5166 	info.key_size = map->key_size;
5167 	info.value_size = map->value_size;
5168 	info.max_entries = map->max_entries;
5169 	info.map_flags = map->map_flags;
5170 	info.map_extra = map->map_extra;
5171 	memcpy(info.name, map->name, sizeof(map->name));
5172 
5173 	if (map->btf) {
5174 		info.btf_id = btf_obj_id(map->btf);
5175 		info.btf_key_type_id = map->btf_key_type_id;
5176 		info.btf_value_type_id = map->btf_value_type_id;
5177 	}
5178 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5179 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
5180 		bpf_map_struct_ops_info_fill(&info, map);
5181 
5182 	if (bpf_map_is_offloaded(map)) {
5183 		err = bpf_map_offload_info_fill(&info, map);
5184 		if (err)
5185 			return err;
5186 	}
5187 
5188 	if (copy_to_user(uinfo, &info, info_len) ||
5189 	    put_user(info_len, &uattr->info.info_len))
5190 		return -EFAULT;
5191 
5192 	return 0;
5193 }
5194 
bpf_btf_get_info_by_fd(struct file * file,struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)5195 static int bpf_btf_get_info_by_fd(struct file *file,
5196 				  struct btf *btf,
5197 				  const union bpf_attr *attr,
5198 				  union bpf_attr __user *uattr)
5199 {
5200 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5201 	u32 info_len = attr->info.info_len;
5202 	int err;
5203 
5204 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
5205 	if (err)
5206 		return err;
5207 
5208 	return btf_get_info_by_fd(btf, attr, uattr);
5209 }
5210 
bpf_link_get_info_by_fd(struct file * file,struct bpf_link * link,const union bpf_attr * attr,union bpf_attr __user * uattr)5211 static int bpf_link_get_info_by_fd(struct file *file,
5212 				  struct bpf_link *link,
5213 				  const union bpf_attr *attr,
5214 				  union bpf_attr __user *uattr)
5215 {
5216 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5217 	struct bpf_link_info info;
5218 	u32 info_len = attr->info.info_len;
5219 	int err;
5220 
5221 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5222 	if (err)
5223 		return err;
5224 	info_len = min_t(u32, sizeof(info), info_len);
5225 
5226 	memset(&info, 0, sizeof(info));
5227 	if (copy_from_user(&info, uinfo, info_len))
5228 		return -EFAULT;
5229 
5230 	info.type = link->type;
5231 	info.id = link->id;
5232 	if (link->prog)
5233 		info.prog_id = link->prog->aux->id;
5234 
5235 	if (link->ops->fill_link_info) {
5236 		err = link->ops->fill_link_info(link, &info);
5237 		if (err)
5238 			return err;
5239 	}
5240 
5241 	if (copy_to_user(uinfo, &info, info_len) ||
5242 	    put_user(info_len, &uattr->info.info_len))
5243 		return -EFAULT;
5244 
5245 	return 0;
5246 }
5247 
5248 
token_get_info_by_fd(struct file * file,struct bpf_token * token,const union bpf_attr * attr,union bpf_attr __user * uattr)5249 static int token_get_info_by_fd(struct file *file,
5250 				struct bpf_token *token,
5251 				const union bpf_attr *attr,
5252 				union bpf_attr __user *uattr)
5253 {
5254 	struct bpf_token_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5255 	u32 info_len = attr->info.info_len;
5256 	int err;
5257 
5258 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
5259 	if (err)
5260 		return err;
5261 	return bpf_token_get_info_by_fd(token, attr, uattr);
5262 }
5263 
5264 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
5265 
bpf_obj_get_info_by_fd(const union bpf_attr * attr,union bpf_attr __user * uattr)5266 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
5267 				  union bpf_attr __user *uattr)
5268 {
5269 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
5270 		return -EINVAL;
5271 
5272 	CLASS(fd, f)(attr->info.bpf_fd);
5273 	if (fd_empty(f))
5274 		return -EBADFD;
5275 
5276 	if (fd_file(f)->f_op == &bpf_prog_fops)
5277 		return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5278 					      uattr);
5279 	else if (fd_file(f)->f_op == &bpf_map_fops)
5280 		return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5281 					     uattr);
5282 	else if (fd_file(f)->f_op == &btf_fops)
5283 		return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
5284 	else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
5285 		return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
5286 					      attr, uattr);
5287 	else if (fd_file(f)->f_op == &bpf_token_fops)
5288 		return token_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
5289 					    attr, uattr);
5290 	return -EINVAL;
5291 }
5292 
5293 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
5294 
bpf_btf_load(const union bpf_attr * attr,bpfptr_t uattr,__u32 uattr_size)5295 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
5296 {
5297 	struct bpf_token *token = NULL;
5298 
5299 	if (CHECK_ATTR(BPF_BTF_LOAD))
5300 		return -EINVAL;
5301 
5302 	if (attr->btf_flags & ~BPF_F_TOKEN_FD)
5303 		return -EINVAL;
5304 
5305 	if (attr->btf_flags & BPF_F_TOKEN_FD) {
5306 		token = bpf_token_get_from_fd(attr->btf_token_fd);
5307 		if (IS_ERR(token))
5308 			return PTR_ERR(token);
5309 		if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
5310 			bpf_token_put(token);
5311 			token = NULL;
5312 		}
5313 	}
5314 
5315 	if (!bpf_token_capable(token, CAP_BPF)) {
5316 		bpf_token_put(token);
5317 		return -EPERM;
5318 	}
5319 
5320 	bpf_token_put(token);
5321 
5322 	return btf_new_fd(attr, uattr, uattr_size);
5323 }
5324 
5325 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD fd_by_id_token_fd
5326 
bpf_btf_get_fd_by_id(const union bpf_attr * attr)5327 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5328 {
5329 	struct bpf_token *token = NULL;
5330 
5331 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5332 		return -EINVAL;
5333 
5334 	if (attr->open_flags & ~BPF_F_TOKEN_FD)
5335 		return -EINVAL;
5336 
5337 	if (attr->open_flags & BPF_F_TOKEN_FD) {
5338 		token = bpf_token_get_from_fd(attr->fd_by_id_token_fd);
5339 		if (IS_ERR(token))
5340 			return PTR_ERR(token);
5341 		if (!bpf_token_allow_cmd(token, BPF_BTF_GET_FD_BY_ID)) {
5342 			bpf_token_put(token);
5343 			token = NULL;
5344 		}
5345 	}
5346 
5347 	if (!bpf_token_capable(token, CAP_SYS_ADMIN)) {
5348 		bpf_token_put(token);
5349 		return -EPERM;
5350 	}
5351 
5352 	bpf_token_put(token);
5353 
5354 	return btf_get_fd_by_id(attr->btf_id);
5355 }
5356 
bpf_task_fd_query_copy(const union bpf_attr * attr,union bpf_attr __user * uattr,u32 prog_id,u32 fd_type,const char * buf,u64 probe_offset,u64 probe_addr)5357 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5358 				    union bpf_attr __user *uattr,
5359 				    u32 prog_id, u32 fd_type,
5360 				    const char *buf, u64 probe_offset,
5361 				    u64 probe_addr)
5362 {
5363 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5364 	u32 len = buf ? strlen(buf) : 0, input_len;
5365 	int err = 0;
5366 
5367 	if (put_user(len, &uattr->task_fd_query.buf_len))
5368 		return -EFAULT;
5369 	input_len = attr->task_fd_query.buf_len;
5370 	if (input_len && ubuf) {
5371 		if (!len) {
5372 			/* nothing to copy, just make ubuf NULL terminated */
5373 			char zero = '\0';
5374 
5375 			if (put_user(zero, ubuf))
5376 				return -EFAULT;
5377 		} else {
5378 			err = bpf_copy_to_user(ubuf, buf, input_len, len);
5379 			if (err == -EFAULT)
5380 				return err;
5381 		}
5382 	}
5383 
5384 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5385 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5386 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5387 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5388 		return -EFAULT;
5389 
5390 	return err;
5391 }
5392 
5393 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5394 
bpf_task_fd_query(const union bpf_attr * attr,union bpf_attr __user * uattr)5395 static int bpf_task_fd_query(const union bpf_attr *attr,
5396 			     union bpf_attr __user *uattr)
5397 {
5398 	pid_t pid = attr->task_fd_query.pid;
5399 	u32 fd = attr->task_fd_query.fd;
5400 	const struct perf_event *event;
5401 	struct task_struct *task;
5402 	struct file *file;
5403 	int err;
5404 
5405 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5406 		return -EINVAL;
5407 
5408 	if (!capable(CAP_SYS_ADMIN))
5409 		return -EPERM;
5410 
5411 	if (attr->task_fd_query.flags != 0)
5412 		return -EINVAL;
5413 
5414 	rcu_read_lock();
5415 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5416 	rcu_read_unlock();
5417 	if (!task)
5418 		return -ENOENT;
5419 
5420 	err = 0;
5421 	file = fget_task(task, fd);
5422 	put_task_struct(task);
5423 	if (!file)
5424 		return -EBADF;
5425 
5426 	if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
5427 		struct bpf_link *link = file->private_data;
5428 
5429 		if (link->ops == &bpf_raw_tp_link_lops) {
5430 			struct bpf_raw_tp_link *raw_tp =
5431 				container_of(link, struct bpf_raw_tp_link, link);
5432 			struct bpf_raw_event_map *btp = raw_tp->btp;
5433 
5434 			err = bpf_task_fd_query_copy(attr, uattr,
5435 						     raw_tp->link.prog->aux->id,
5436 						     BPF_FD_TYPE_RAW_TRACEPOINT,
5437 						     btp->tp->name, 0, 0);
5438 			goto put_file;
5439 		}
5440 		goto out_not_supp;
5441 	}
5442 
5443 	event = perf_get_event(file);
5444 	if (!IS_ERR(event)) {
5445 		u64 probe_offset, probe_addr;
5446 		u32 prog_id, fd_type;
5447 		const char *buf;
5448 
5449 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5450 					      &buf, &probe_offset,
5451 					      &probe_addr, NULL);
5452 		if (!err)
5453 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5454 						     fd_type, buf,
5455 						     probe_offset,
5456 						     probe_addr);
5457 		goto put_file;
5458 	}
5459 
5460 out_not_supp:
5461 	err = -ENOTSUPP;
5462 put_file:
5463 	fput(file);
5464 	return err;
5465 }
5466 
5467 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
5468 
5469 #define BPF_DO_BATCH(fn, ...)			\
5470 	do {					\
5471 		if (!fn) {			\
5472 			err = -ENOTSUPP;	\
5473 			goto err_put;		\
5474 		}				\
5475 		err = fn(__VA_ARGS__);		\
5476 	} while (0)
5477 
bpf_map_do_batch(const union bpf_attr * attr,union bpf_attr __user * uattr,int cmd)5478 static int bpf_map_do_batch(const union bpf_attr *attr,
5479 			    union bpf_attr __user *uattr,
5480 			    int cmd)
5481 {
5482 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
5483 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5484 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5485 	struct bpf_map *map;
5486 	int err;
5487 
5488 	if (CHECK_ATTR(BPF_MAP_BATCH))
5489 		return -EINVAL;
5490 
5491 	CLASS(fd, f)(attr->batch.map_fd);
5492 
5493 	map = __bpf_map_get(f);
5494 	if (IS_ERR(map))
5495 		return PTR_ERR(map);
5496 	if (has_write)
5497 		bpf_map_write_active_inc(map);
5498 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5499 		err = -EPERM;
5500 		goto err_put;
5501 	}
5502 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5503 		err = -EPERM;
5504 		goto err_put;
5505 	}
5506 
5507 	if (cmd == BPF_MAP_LOOKUP_BATCH)
5508 		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5509 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5510 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5511 	else if (cmd == BPF_MAP_UPDATE_BATCH)
5512 		BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
5513 	else
5514 		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5515 err_put:
5516 	if (has_write) {
5517 		maybe_wait_bpf_programs(map);
5518 		bpf_map_write_active_dec(map);
5519 	}
5520 	return err;
5521 }
5522 
5523 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
link_create(union bpf_attr * attr,bpfptr_t uattr)5524 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5525 {
5526 	struct bpf_prog *prog;
5527 	int ret;
5528 
5529 	if (CHECK_ATTR(BPF_LINK_CREATE))
5530 		return -EINVAL;
5531 
5532 	if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5533 		return bpf_struct_ops_link_create(attr);
5534 
5535 	prog = bpf_prog_get(attr->link_create.prog_fd);
5536 	if (IS_ERR(prog))
5537 		return PTR_ERR(prog);
5538 
5539 	ret = bpf_prog_attach_check_attach_type(prog,
5540 						attr->link_create.attach_type);
5541 	if (ret)
5542 		goto out;
5543 
5544 	switch (prog->type) {
5545 	case BPF_PROG_TYPE_CGROUP_SKB:
5546 	case BPF_PROG_TYPE_CGROUP_SOCK:
5547 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5548 	case BPF_PROG_TYPE_SOCK_OPS:
5549 	case BPF_PROG_TYPE_CGROUP_DEVICE:
5550 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
5551 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5552 		ret = cgroup_bpf_link_attach(attr, prog);
5553 		break;
5554 	case BPF_PROG_TYPE_EXT:
5555 		ret = bpf_tracing_prog_attach(prog,
5556 					      attr->link_create.target_fd,
5557 					      attr->link_create.target_btf_id,
5558 					      attr->link_create.tracing.cookie,
5559 					      attr->link_create.attach_type);
5560 		break;
5561 	case BPF_PROG_TYPE_LSM:
5562 	case BPF_PROG_TYPE_TRACING:
5563 		if (attr->link_create.attach_type != prog->expected_attach_type) {
5564 			ret = -EINVAL;
5565 			goto out;
5566 		}
5567 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5568 			ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie,
5569 						     attr->link_create.attach_type);
5570 		else if (prog->expected_attach_type == BPF_TRACE_ITER)
5571 			ret = bpf_iter_link_attach(attr, uattr, prog);
5572 		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5573 			ret = cgroup_bpf_link_attach(attr, prog);
5574 		else
5575 			ret = bpf_tracing_prog_attach(prog,
5576 						      attr->link_create.target_fd,
5577 						      attr->link_create.target_btf_id,
5578 						      attr->link_create.tracing.cookie,
5579 						      attr->link_create.attach_type);
5580 		break;
5581 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5582 	case BPF_PROG_TYPE_SK_LOOKUP:
5583 		ret = netns_bpf_link_create(attr, prog);
5584 		break;
5585 	case BPF_PROG_TYPE_SK_MSG:
5586 	case BPF_PROG_TYPE_SK_SKB:
5587 		ret = sock_map_link_create(attr, prog);
5588 		break;
5589 #ifdef CONFIG_NET
5590 	case BPF_PROG_TYPE_XDP:
5591 		ret = bpf_xdp_link_attach(attr, prog);
5592 		break;
5593 	case BPF_PROG_TYPE_SCHED_CLS:
5594 		if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5595 		    attr->link_create.attach_type == BPF_TCX_EGRESS)
5596 			ret = tcx_link_attach(attr, prog);
5597 		else
5598 			ret = netkit_link_attach(attr, prog);
5599 		break;
5600 	case BPF_PROG_TYPE_NETFILTER:
5601 		ret = bpf_nf_link_attach(attr, prog);
5602 		break;
5603 #endif
5604 	case BPF_PROG_TYPE_PERF_EVENT:
5605 	case BPF_PROG_TYPE_TRACEPOINT:
5606 		ret = bpf_perf_link_attach(attr, prog);
5607 		break;
5608 	case BPF_PROG_TYPE_KPROBE:
5609 		if (attr->link_create.attach_type == BPF_PERF_EVENT)
5610 			ret = bpf_perf_link_attach(attr, prog);
5611 		else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
5612 			 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
5613 			ret = bpf_kprobe_multi_link_attach(attr, prog);
5614 		else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
5615 			 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
5616 			ret = bpf_uprobe_multi_link_attach(attr, prog);
5617 		break;
5618 	default:
5619 		ret = -EINVAL;
5620 	}
5621 
5622 out:
5623 	if (ret < 0)
5624 		bpf_prog_put(prog);
5625 	return ret;
5626 }
5627 
link_update_map(struct bpf_link * link,union bpf_attr * attr)5628 static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5629 {
5630 	struct bpf_map *new_map, *old_map = NULL;
5631 	int ret;
5632 
5633 	new_map = bpf_map_get(attr->link_update.new_map_fd);
5634 	if (IS_ERR(new_map))
5635 		return PTR_ERR(new_map);
5636 
5637 	if (attr->link_update.flags & BPF_F_REPLACE) {
5638 		old_map = bpf_map_get(attr->link_update.old_map_fd);
5639 		if (IS_ERR(old_map)) {
5640 			ret = PTR_ERR(old_map);
5641 			goto out_put;
5642 		}
5643 	} else if (attr->link_update.old_map_fd) {
5644 		ret = -EINVAL;
5645 		goto out_put;
5646 	}
5647 
5648 	ret = link->ops->update_map(link, new_map, old_map);
5649 
5650 	if (old_map)
5651 		bpf_map_put(old_map);
5652 out_put:
5653 	bpf_map_put(new_map);
5654 	return ret;
5655 }
5656 
5657 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5658 
link_update(union bpf_attr * attr)5659 static int link_update(union bpf_attr *attr)
5660 {
5661 	struct bpf_prog *old_prog = NULL, *new_prog;
5662 	struct bpf_link *link;
5663 	u32 flags;
5664 	int ret;
5665 
5666 	if (CHECK_ATTR(BPF_LINK_UPDATE))
5667 		return -EINVAL;
5668 
5669 	flags = attr->link_update.flags;
5670 	if (flags & ~BPF_F_REPLACE)
5671 		return -EINVAL;
5672 
5673 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
5674 	if (IS_ERR(link))
5675 		return PTR_ERR(link);
5676 
5677 	if (link->ops->update_map) {
5678 		ret = link_update_map(link, attr);
5679 		goto out_put_link;
5680 	}
5681 
5682 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5683 	if (IS_ERR(new_prog)) {
5684 		ret = PTR_ERR(new_prog);
5685 		goto out_put_link;
5686 	}
5687 
5688 	if (flags & BPF_F_REPLACE) {
5689 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5690 		if (IS_ERR(old_prog)) {
5691 			ret = PTR_ERR(old_prog);
5692 			old_prog = NULL;
5693 			goto out_put_progs;
5694 		}
5695 	} else if (attr->link_update.old_prog_fd) {
5696 		ret = -EINVAL;
5697 		goto out_put_progs;
5698 	}
5699 
5700 	if (link->ops->update_prog)
5701 		ret = link->ops->update_prog(link, new_prog, old_prog);
5702 	else
5703 		ret = -EINVAL;
5704 
5705 out_put_progs:
5706 	if (old_prog)
5707 		bpf_prog_put(old_prog);
5708 	if (ret)
5709 		bpf_prog_put(new_prog);
5710 out_put_link:
5711 	bpf_link_put_direct(link);
5712 	return ret;
5713 }
5714 
5715 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5716 
link_detach(union bpf_attr * attr)5717 static int link_detach(union bpf_attr *attr)
5718 {
5719 	struct bpf_link *link;
5720 	int ret;
5721 
5722 	if (CHECK_ATTR(BPF_LINK_DETACH))
5723 		return -EINVAL;
5724 
5725 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5726 	if (IS_ERR(link))
5727 		return PTR_ERR(link);
5728 
5729 	if (link->ops->detach)
5730 		ret = link->ops->detach(link);
5731 	else
5732 		ret = -EOPNOTSUPP;
5733 
5734 	bpf_link_put_direct(link);
5735 	return ret;
5736 }
5737 
bpf_link_inc_not_zero(struct bpf_link * link)5738 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5739 {
5740 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5741 }
5742 EXPORT_SYMBOL(bpf_link_inc_not_zero);
5743 
bpf_link_by_id(u32 id)5744 struct bpf_link *bpf_link_by_id(u32 id)
5745 {
5746 	struct bpf_link *link;
5747 
5748 	if (!id)
5749 		return ERR_PTR(-ENOENT);
5750 
5751 	spin_lock_bh(&link_idr_lock);
5752 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
5753 	link = idr_find(&link_idr, id);
5754 	if (link) {
5755 		if (link->id)
5756 			link = bpf_link_inc_not_zero(link);
5757 		else
5758 			link = ERR_PTR(-EAGAIN);
5759 	} else {
5760 		link = ERR_PTR(-ENOENT);
5761 	}
5762 	spin_unlock_bh(&link_idr_lock);
5763 	return link;
5764 }
5765 
bpf_link_get_curr_or_next(u32 * id)5766 struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5767 {
5768 	struct bpf_link *link;
5769 
5770 	spin_lock_bh(&link_idr_lock);
5771 again:
5772 	link = idr_get_next(&link_idr, id);
5773 	if (link) {
5774 		link = bpf_link_inc_not_zero(link);
5775 		if (IS_ERR(link)) {
5776 			(*id)++;
5777 			goto again;
5778 		}
5779 	}
5780 	spin_unlock_bh(&link_idr_lock);
5781 
5782 	return link;
5783 }
5784 
5785 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5786 
bpf_link_get_fd_by_id(const union bpf_attr * attr)5787 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5788 {
5789 	struct bpf_link *link;
5790 	u32 id = attr->link_id;
5791 	int fd;
5792 
5793 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5794 		return -EINVAL;
5795 
5796 	if (!capable(CAP_SYS_ADMIN))
5797 		return -EPERM;
5798 
5799 	link = bpf_link_by_id(id);
5800 	if (IS_ERR(link))
5801 		return PTR_ERR(link);
5802 
5803 	fd = bpf_link_new_fd(link);
5804 	if (fd < 0)
5805 		bpf_link_put_direct(link);
5806 
5807 	return fd;
5808 }
5809 
5810 DEFINE_MUTEX(bpf_stats_enabled_mutex);
5811 
bpf_stats_release(struct inode * inode,struct file * file)5812 static int bpf_stats_release(struct inode *inode, struct file *file)
5813 {
5814 	mutex_lock(&bpf_stats_enabled_mutex);
5815 	static_key_slow_dec(&bpf_stats_enabled_key.key);
5816 	mutex_unlock(&bpf_stats_enabled_mutex);
5817 	return 0;
5818 }
5819 
5820 static const struct file_operations bpf_stats_fops = {
5821 	.release = bpf_stats_release,
5822 };
5823 
bpf_enable_runtime_stats(void)5824 static int bpf_enable_runtime_stats(void)
5825 {
5826 	int fd;
5827 
5828 	mutex_lock(&bpf_stats_enabled_mutex);
5829 
5830 	/* Set a very high limit to avoid overflow */
5831 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5832 		mutex_unlock(&bpf_stats_enabled_mutex);
5833 		return -EBUSY;
5834 	}
5835 
5836 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5837 	if (fd >= 0)
5838 		static_key_slow_inc(&bpf_stats_enabled_key.key);
5839 
5840 	mutex_unlock(&bpf_stats_enabled_mutex);
5841 	return fd;
5842 }
5843 
5844 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5845 
bpf_enable_stats(union bpf_attr * attr)5846 static int bpf_enable_stats(union bpf_attr *attr)
5847 {
5848 
5849 	if (CHECK_ATTR(BPF_ENABLE_STATS))
5850 		return -EINVAL;
5851 
5852 	if (!capable(CAP_SYS_ADMIN))
5853 		return -EPERM;
5854 
5855 	switch (attr->enable_stats.type) {
5856 	case BPF_STATS_RUN_TIME:
5857 		return bpf_enable_runtime_stats();
5858 	default:
5859 		break;
5860 	}
5861 	return -EINVAL;
5862 }
5863 
5864 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5865 
bpf_iter_create(union bpf_attr * attr)5866 static int bpf_iter_create(union bpf_attr *attr)
5867 {
5868 	struct bpf_link *link;
5869 	int err;
5870 
5871 	if (CHECK_ATTR(BPF_ITER_CREATE))
5872 		return -EINVAL;
5873 
5874 	if (attr->iter_create.flags)
5875 		return -EINVAL;
5876 
5877 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5878 	if (IS_ERR(link))
5879 		return PTR_ERR(link);
5880 
5881 	err = bpf_iter_new_fd(link);
5882 	bpf_link_put_direct(link);
5883 
5884 	return err;
5885 }
5886 
5887 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5888 
bpf_prog_bind_map(union bpf_attr * attr)5889 static int bpf_prog_bind_map(union bpf_attr *attr)
5890 {
5891 	struct bpf_prog *prog;
5892 	struct bpf_map *map;
5893 	struct bpf_map **used_maps_old, **used_maps_new;
5894 	int i, ret = 0;
5895 
5896 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5897 		return -EINVAL;
5898 
5899 	if (attr->prog_bind_map.flags)
5900 		return -EINVAL;
5901 
5902 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5903 	if (IS_ERR(prog))
5904 		return PTR_ERR(prog);
5905 
5906 	map = bpf_map_get(attr->prog_bind_map.map_fd);
5907 	if (IS_ERR(map)) {
5908 		ret = PTR_ERR(map);
5909 		goto out_prog_put;
5910 	}
5911 
5912 	mutex_lock(&prog->aux->used_maps_mutex);
5913 
5914 	used_maps_old = prog->aux->used_maps;
5915 
5916 	for (i = 0; i < prog->aux->used_map_cnt; i++)
5917 		if (used_maps_old[i] == map) {
5918 			bpf_map_put(map);
5919 			goto out_unlock;
5920 		}
5921 
5922 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5923 				      sizeof(used_maps_new[0]),
5924 				      GFP_KERNEL);
5925 	if (!used_maps_new) {
5926 		ret = -ENOMEM;
5927 		goto out_unlock;
5928 	}
5929 
5930 	/* The bpf program will not access the bpf map, but for the sake of
5931 	 * simplicity, increase sleepable_refcnt for sleepable program as well.
5932 	 */
5933 	if (prog->sleepable)
5934 		atomic64_inc(&map->sleepable_refcnt);
5935 	memcpy(used_maps_new, used_maps_old,
5936 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5937 	used_maps_new[prog->aux->used_map_cnt] = map;
5938 
5939 	prog->aux->used_map_cnt++;
5940 	prog->aux->used_maps = used_maps_new;
5941 
5942 	kfree(used_maps_old);
5943 
5944 out_unlock:
5945 	mutex_unlock(&prog->aux->used_maps_mutex);
5946 
5947 	if (ret)
5948 		bpf_map_put(map);
5949 out_prog_put:
5950 	bpf_prog_put(prog);
5951 	return ret;
5952 }
5953 
5954 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5955 
token_create(union bpf_attr * attr)5956 static int token_create(union bpf_attr *attr)
5957 {
5958 	if (CHECK_ATTR(BPF_TOKEN_CREATE))
5959 		return -EINVAL;
5960 
5961 	/* no flags are supported yet */
5962 	if (attr->token_create.flags)
5963 		return -EINVAL;
5964 
5965 	return bpf_token_create(attr);
5966 }
5967 
5968 #define BPF_PROG_STREAM_READ_BY_FD_LAST_FIELD prog_stream_read.prog_fd
5969 
prog_stream_read(union bpf_attr * attr)5970 static int prog_stream_read(union bpf_attr *attr)
5971 {
5972 	char __user *buf = u64_to_user_ptr(attr->prog_stream_read.stream_buf);
5973 	u32 len = attr->prog_stream_read.stream_buf_len;
5974 	struct bpf_prog *prog;
5975 	int ret;
5976 
5977 	if (CHECK_ATTR(BPF_PROG_STREAM_READ_BY_FD))
5978 		return -EINVAL;
5979 
5980 	prog = bpf_prog_get(attr->prog_stream_read.prog_fd);
5981 	if (IS_ERR(prog))
5982 		return PTR_ERR(prog);
5983 
5984 	ret = bpf_prog_stream_read(prog, attr->prog_stream_read.stream_id, buf, len);
5985 	bpf_prog_put(prog);
5986 
5987 	return ret;
5988 }
5989 
__sys_bpf(enum bpf_cmd cmd,bpfptr_t uattr,unsigned int size)5990 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
5991 {
5992 	union bpf_attr attr;
5993 	int err;
5994 
5995 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5996 	if (err)
5997 		return err;
5998 	size = min_t(u32, size, sizeof(attr));
5999 
6000 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
6001 	memset(&attr, 0, sizeof(attr));
6002 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
6003 		return -EFAULT;
6004 
6005 	err = security_bpf(cmd, &attr, size, uattr.is_kernel);
6006 	if (err < 0)
6007 		return err;
6008 
6009 	switch (cmd) {
6010 	case BPF_MAP_CREATE:
6011 		err = map_create(&attr, uattr.is_kernel);
6012 		break;
6013 	case BPF_MAP_LOOKUP_ELEM:
6014 		err = map_lookup_elem(&attr);
6015 		break;
6016 	case BPF_MAP_UPDATE_ELEM:
6017 		err = map_update_elem(&attr, uattr);
6018 		break;
6019 	case BPF_MAP_DELETE_ELEM:
6020 		err = map_delete_elem(&attr, uattr);
6021 		break;
6022 	case BPF_MAP_GET_NEXT_KEY:
6023 		err = map_get_next_key(&attr);
6024 		break;
6025 	case BPF_MAP_FREEZE:
6026 		err = map_freeze(&attr);
6027 		break;
6028 	case BPF_PROG_LOAD:
6029 		err = bpf_prog_load(&attr, uattr, size);
6030 		break;
6031 	case BPF_OBJ_PIN:
6032 		err = bpf_obj_pin(&attr);
6033 		break;
6034 	case BPF_OBJ_GET:
6035 		err = bpf_obj_get(&attr);
6036 		break;
6037 	case BPF_PROG_ATTACH:
6038 		err = bpf_prog_attach(&attr);
6039 		break;
6040 	case BPF_PROG_DETACH:
6041 		err = bpf_prog_detach(&attr);
6042 		break;
6043 	case BPF_PROG_QUERY:
6044 		err = bpf_prog_query(&attr, uattr.user);
6045 		break;
6046 	case BPF_PROG_TEST_RUN:
6047 		err = bpf_prog_test_run(&attr, uattr.user);
6048 		break;
6049 	case BPF_PROG_GET_NEXT_ID:
6050 		err = bpf_obj_get_next_id(&attr, uattr.user,
6051 					  &prog_idr, &prog_idr_lock);
6052 		break;
6053 	case BPF_MAP_GET_NEXT_ID:
6054 		err = bpf_obj_get_next_id(&attr, uattr.user,
6055 					  &map_idr, &map_idr_lock);
6056 		break;
6057 	case BPF_BTF_GET_NEXT_ID:
6058 		err = bpf_obj_get_next_id(&attr, uattr.user,
6059 					  &btf_idr, &btf_idr_lock);
6060 		break;
6061 	case BPF_PROG_GET_FD_BY_ID:
6062 		err = bpf_prog_get_fd_by_id(&attr);
6063 		break;
6064 	case BPF_MAP_GET_FD_BY_ID:
6065 		err = bpf_map_get_fd_by_id(&attr);
6066 		break;
6067 	case BPF_OBJ_GET_INFO_BY_FD:
6068 		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
6069 		break;
6070 	case BPF_RAW_TRACEPOINT_OPEN:
6071 		err = bpf_raw_tracepoint_open(&attr);
6072 		break;
6073 	case BPF_BTF_LOAD:
6074 		err = bpf_btf_load(&attr, uattr, size);
6075 		break;
6076 	case BPF_BTF_GET_FD_BY_ID:
6077 		err = bpf_btf_get_fd_by_id(&attr);
6078 		break;
6079 	case BPF_TASK_FD_QUERY:
6080 		err = bpf_task_fd_query(&attr, uattr.user);
6081 		break;
6082 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
6083 		err = map_lookup_and_delete_elem(&attr);
6084 		break;
6085 	case BPF_MAP_LOOKUP_BATCH:
6086 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
6087 		break;
6088 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
6089 		err = bpf_map_do_batch(&attr, uattr.user,
6090 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
6091 		break;
6092 	case BPF_MAP_UPDATE_BATCH:
6093 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
6094 		break;
6095 	case BPF_MAP_DELETE_BATCH:
6096 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
6097 		break;
6098 	case BPF_LINK_CREATE:
6099 		err = link_create(&attr, uattr);
6100 		break;
6101 	case BPF_LINK_UPDATE:
6102 		err = link_update(&attr);
6103 		break;
6104 	case BPF_LINK_GET_FD_BY_ID:
6105 		err = bpf_link_get_fd_by_id(&attr);
6106 		break;
6107 	case BPF_LINK_GET_NEXT_ID:
6108 		err = bpf_obj_get_next_id(&attr, uattr.user,
6109 					  &link_idr, &link_idr_lock);
6110 		break;
6111 	case BPF_ENABLE_STATS:
6112 		err = bpf_enable_stats(&attr);
6113 		break;
6114 	case BPF_ITER_CREATE:
6115 		err = bpf_iter_create(&attr);
6116 		break;
6117 	case BPF_LINK_DETACH:
6118 		err = link_detach(&attr);
6119 		break;
6120 	case BPF_PROG_BIND_MAP:
6121 		err = bpf_prog_bind_map(&attr);
6122 		break;
6123 	case BPF_TOKEN_CREATE:
6124 		err = token_create(&attr);
6125 		break;
6126 	case BPF_PROG_STREAM_READ_BY_FD:
6127 		err = prog_stream_read(&attr);
6128 		break;
6129 	default:
6130 		err = -EINVAL;
6131 		break;
6132 	}
6133 
6134 	return err;
6135 }
6136 
SYSCALL_DEFINE3(bpf,int,cmd,union bpf_attr __user *,uattr,unsigned int,size)6137 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
6138 {
6139 	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
6140 }
6141 
syscall_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)6142 static bool syscall_prog_is_valid_access(int off, int size,
6143 					 enum bpf_access_type type,
6144 					 const struct bpf_prog *prog,
6145 					 struct bpf_insn_access_aux *info)
6146 {
6147 	if (off < 0 || off >= U16_MAX)
6148 		return false;
6149 	if (off % size != 0)
6150 		return false;
6151 	return true;
6152 }
6153 
BPF_CALL_3(bpf_sys_bpf,int,cmd,union bpf_attr *,attr,u32,attr_size)6154 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
6155 {
6156 	switch (cmd) {
6157 	case BPF_MAP_CREATE:
6158 	case BPF_MAP_DELETE_ELEM:
6159 	case BPF_MAP_UPDATE_ELEM:
6160 	case BPF_MAP_FREEZE:
6161 	case BPF_MAP_GET_FD_BY_ID:
6162 	case BPF_PROG_LOAD:
6163 	case BPF_BTF_LOAD:
6164 	case BPF_LINK_CREATE:
6165 	case BPF_RAW_TRACEPOINT_OPEN:
6166 		break;
6167 	default:
6168 		return -EINVAL;
6169 	}
6170 	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
6171 }
6172 
6173 
6174 /* To shut up -Wmissing-prototypes.
6175  * This function is used by the kernel light skeleton
6176  * to load bpf programs when modules are loaded or during kernel boot.
6177  * See tools/lib/bpf/skel_internal.h
6178  */
6179 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
6180 
kern_sys_bpf(int cmd,union bpf_attr * attr,unsigned int size)6181 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
6182 {
6183 	struct bpf_prog * __maybe_unused prog;
6184 	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
6185 
6186 	switch (cmd) {
6187 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
6188 	case BPF_PROG_TEST_RUN:
6189 		if (attr->test.data_in || attr->test.data_out ||
6190 		    attr->test.ctx_out || attr->test.duration ||
6191 		    attr->test.repeat || attr->test.flags)
6192 			return -EINVAL;
6193 
6194 		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
6195 		if (IS_ERR(prog))
6196 			return PTR_ERR(prog);
6197 
6198 		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
6199 		    attr->test.ctx_size_in > U16_MAX) {
6200 			bpf_prog_put(prog);
6201 			return -EINVAL;
6202 		}
6203 
6204 		run_ctx.bpf_cookie = 0;
6205 		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
6206 			/* recursion detected */
6207 			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
6208 			bpf_prog_put(prog);
6209 			return -EBUSY;
6210 		}
6211 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
6212 		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
6213 						&run_ctx);
6214 		bpf_prog_put(prog);
6215 		return 0;
6216 #endif
6217 	default:
6218 		return ____bpf_sys_bpf(cmd, attr, size);
6219 	}
6220 }
6221 EXPORT_SYMBOL_NS(kern_sys_bpf, "BPF_INTERNAL");
6222 
6223 static const struct bpf_func_proto bpf_sys_bpf_proto = {
6224 	.func		= bpf_sys_bpf,
6225 	.gpl_only	= false,
6226 	.ret_type	= RET_INTEGER,
6227 	.arg1_type	= ARG_ANYTHING,
6228 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
6229 	.arg3_type	= ARG_CONST_SIZE,
6230 };
6231 
6232 const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)6233 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6234 {
6235 	return bpf_base_func_proto(func_id, prog);
6236 }
6237 
BPF_CALL_1(bpf_sys_close,u32,fd)6238 BPF_CALL_1(bpf_sys_close, u32, fd)
6239 {
6240 	/* When bpf program calls this helper there should not be
6241 	 * an fdget() without matching completed fdput().
6242 	 * This helper is allowed in the following callchain only:
6243 	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
6244 	 */
6245 	return close_fd(fd);
6246 }
6247 
6248 static const struct bpf_func_proto bpf_sys_close_proto = {
6249 	.func		= bpf_sys_close,
6250 	.gpl_only	= false,
6251 	.ret_type	= RET_INTEGER,
6252 	.arg1_type	= ARG_ANYTHING,
6253 };
6254 
BPF_CALL_4(bpf_kallsyms_lookup_name,const char *,name,int,name_sz,int,flags,u64 *,res)6255 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
6256 {
6257 	*res = 0;
6258 	if (flags)
6259 		return -EINVAL;
6260 
6261 	if (name_sz <= 1 || name[name_sz - 1])
6262 		return -EINVAL;
6263 
6264 	if (!bpf_dump_raw_ok(current_cred()))
6265 		return -EPERM;
6266 
6267 	*res = kallsyms_lookup_name(name);
6268 	return *res ? 0 : -ENOENT;
6269 }
6270 
6271 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
6272 	.func		= bpf_kallsyms_lookup_name,
6273 	.gpl_only	= false,
6274 	.ret_type	= RET_INTEGER,
6275 	.arg1_type	= ARG_PTR_TO_MEM,
6276 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
6277 	.arg3_type	= ARG_ANYTHING,
6278 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
6279 	.arg4_size	= sizeof(u64),
6280 };
6281 
6282 static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)6283 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6284 {
6285 	switch (func_id) {
6286 	case BPF_FUNC_sys_bpf:
6287 		return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
6288 		       ? NULL : &bpf_sys_bpf_proto;
6289 	case BPF_FUNC_btf_find_by_name_kind:
6290 		return &bpf_btf_find_by_name_kind_proto;
6291 	case BPF_FUNC_sys_close:
6292 		return &bpf_sys_close_proto;
6293 	case BPF_FUNC_kallsyms_lookup_name:
6294 		return &bpf_kallsyms_lookup_name_proto;
6295 	default:
6296 		return tracing_prog_func_proto(func_id, prog);
6297 	}
6298 }
6299 
6300 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
6301 	.get_func_proto  = syscall_prog_func_proto,
6302 	.is_valid_access = syscall_prog_is_valid_access,
6303 };
6304 
6305 const struct bpf_prog_ops bpf_syscall_prog_ops = {
6306 	.test_run = bpf_prog_test_run_syscall,
6307 };
6308 
6309 #ifdef CONFIG_SYSCTL
bpf_stats_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)6310 static int bpf_stats_handler(const struct ctl_table *table, int write,
6311 			     void *buffer, size_t *lenp, loff_t *ppos)
6312 {
6313 	struct static_key *key = (struct static_key *)table->data;
6314 	static int saved_val;
6315 	int val, ret;
6316 	struct ctl_table tmp = {
6317 		.data   = &val,
6318 		.maxlen = sizeof(val),
6319 		.mode   = table->mode,
6320 		.extra1 = SYSCTL_ZERO,
6321 		.extra2 = SYSCTL_ONE,
6322 	};
6323 
6324 	if (write && !capable(CAP_SYS_ADMIN))
6325 		return -EPERM;
6326 
6327 	mutex_lock(&bpf_stats_enabled_mutex);
6328 	val = saved_val;
6329 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6330 	if (write && !ret && val != saved_val) {
6331 		if (val)
6332 			static_key_slow_inc(key);
6333 		else
6334 			static_key_slow_dec(key);
6335 		saved_val = val;
6336 	}
6337 	mutex_unlock(&bpf_stats_enabled_mutex);
6338 	return ret;
6339 }
6340 
unpriv_ebpf_notify(int new_state)6341 void __weak unpriv_ebpf_notify(int new_state)
6342 {
6343 }
6344 
bpf_unpriv_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)6345 static int bpf_unpriv_handler(const struct ctl_table *table, int write,
6346 			      void *buffer, size_t *lenp, loff_t *ppos)
6347 {
6348 	int ret, unpriv_enable = *(int *)table->data;
6349 	bool locked_state = unpriv_enable == 1;
6350 	struct ctl_table tmp = *table;
6351 
6352 	if (write && !capable(CAP_SYS_ADMIN))
6353 		return -EPERM;
6354 
6355 	tmp.data = &unpriv_enable;
6356 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6357 	if (write && !ret) {
6358 		if (locked_state && unpriv_enable != 1)
6359 			return -EPERM;
6360 		*(int *)table->data = unpriv_enable;
6361 	}
6362 
6363 	if (write)
6364 		unpriv_ebpf_notify(unpriv_enable);
6365 
6366 	return ret;
6367 }
6368 
6369 static const struct ctl_table bpf_syscall_table[] = {
6370 	{
6371 		.procname	= "unprivileged_bpf_disabled",
6372 		.data		= &sysctl_unprivileged_bpf_disabled,
6373 		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
6374 		.mode		= 0644,
6375 		.proc_handler	= bpf_unpriv_handler,
6376 		.extra1		= SYSCTL_ZERO,
6377 		.extra2		= SYSCTL_TWO,
6378 	},
6379 	{
6380 		.procname	= "bpf_stats_enabled",
6381 		.data		= &bpf_stats_enabled_key.key,
6382 		.mode		= 0644,
6383 		.proc_handler	= bpf_stats_handler,
6384 	},
6385 };
6386 
bpf_syscall_sysctl_init(void)6387 static int __init bpf_syscall_sysctl_init(void)
6388 {
6389 	register_sysctl_init("kernel", bpf_syscall_table);
6390 	return 0;
6391 }
6392 late_initcall(bpf_syscall_sysctl_init);
6393 #endif /* CONFIG_SYSCTL */
6394