xref: /linux/kernel/bpf/arraymap.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016,2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
19 
20 #include "map_in_map.h"
21 
22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 	int i;
25 
26 	for (i = 0; i < array->map.max_entries; i++)
27 		free_percpu(array->pptrs[i]);
28 }
29 
30 static int bpf_array_alloc_percpu(struct bpf_array *array)
31 {
32 	void __percpu *ptr;
33 	int i;
34 
35 	for (i = 0; i < array->map.max_entries; i++) {
36 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
37 					 GFP_USER | __GFP_NOWARN);
38 		if (!ptr) {
39 			bpf_array_free_percpu(array);
40 			return -ENOMEM;
41 		}
42 		array->pptrs[i] = ptr;
43 	}
44 
45 	return 0;
46 }
47 
48 /* Called from syscall */
49 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
50 {
51 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 	struct bpf_array *array;
53 	u64 array_size;
54 	u32 elem_size;
55 
56 	/* check sanity of attributes */
57 	if (attr->max_entries == 0 || attr->key_size != 4 ||
58 	    attr->value_size == 0 || attr->map_flags)
59 		return ERR_PTR(-EINVAL);
60 
61 	if (attr->value_size > KMALLOC_MAX_SIZE)
62 		/* if value_size is bigger, the user space won't be able to
63 		 * access the elements.
64 		 */
65 		return ERR_PTR(-E2BIG);
66 
67 	elem_size = round_up(attr->value_size, 8);
68 
69 	array_size = sizeof(*array);
70 	if (percpu)
71 		array_size += (u64) attr->max_entries * sizeof(void *);
72 	else
73 		array_size += (u64) attr->max_entries * elem_size;
74 
75 	/* make sure there is no u32 overflow later in round_up() */
76 	if (array_size >= U32_MAX - PAGE_SIZE)
77 		return ERR_PTR(-ENOMEM);
78 
79 	/* allocate all map elements and zero-initialize them */
80 	array = bpf_map_area_alloc(array_size);
81 	if (!array)
82 		return ERR_PTR(-ENOMEM);
83 
84 	/* copy mandatory map attributes */
85 	array->map.map_type = attr->map_type;
86 	array->map.key_size = attr->key_size;
87 	array->map.value_size = attr->value_size;
88 	array->map.max_entries = attr->max_entries;
89 	array->elem_size = elem_size;
90 
91 	if (!percpu)
92 		goto out;
93 
94 	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
95 
96 	if (array_size >= U32_MAX - PAGE_SIZE ||
97 	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
98 		bpf_map_area_free(array);
99 		return ERR_PTR(-ENOMEM);
100 	}
101 out:
102 	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
103 
104 	return &array->map;
105 }
106 
107 /* Called from syscall or from eBPF program */
108 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
109 {
110 	struct bpf_array *array = container_of(map, struct bpf_array, map);
111 	u32 index = *(u32 *)key;
112 
113 	if (unlikely(index >= array->map.max_entries))
114 		return NULL;
115 
116 	return array->value + array->elem_size * index;
117 }
118 
119 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
120 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
121 {
122 	struct bpf_insn *insn = insn_buf;
123 	u32 elem_size = round_up(map->value_size, 8);
124 	const int ret = BPF_REG_0;
125 	const int map_ptr = BPF_REG_1;
126 	const int index = BPF_REG_2;
127 
128 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
129 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
130 	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
131 
132 	if (is_power_of_2(elem_size)) {
133 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
134 	} else {
135 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
136 	}
137 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
138 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
139 	*insn++ = BPF_MOV64_IMM(ret, 0);
140 	return insn - insn_buf;
141 }
142 
143 /* Called from eBPF program */
144 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
145 {
146 	struct bpf_array *array = container_of(map, struct bpf_array, map);
147 	u32 index = *(u32 *)key;
148 
149 	if (unlikely(index >= array->map.max_entries))
150 		return NULL;
151 
152 	return this_cpu_ptr(array->pptrs[index]);
153 }
154 
155 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
156 {
157 	struct bpf_array *array = container_of(map, struct bpf_array, map);
158 	u32 index = *(u32 *)key;
159 	void __percpu *pptr;
160 	int cpu, off = 0;
161 	u32 size;
162 
163 	if (unlikely(index >= array->map.max_entries))
164 		return -ENOENT;
165 
166 	/* per_cpu areas are zero-filled and bpf programs can only
167 	 * access 'value_size' of them, so copying rounded areas
168 	 * will not leak any kernel data
169 	 */
170 	size = round_up(map->value_size, 8);
171 	rcu_read_lock();
172 	pptr = array->pptrs[index];
173 	for_each_possible_cpu(cpu) {
174 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
175 		off += size;
176 	}
177 	rcu_read_unlock();
178 	return 0;
179 }
180 
181 /* Called from syscall */
182 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
183 {
184 	struct bpf_array *array = container_of(map, struct bpf_array, map);
185 	u32 index = *(u32 *)key;
186 	u32 *next = (u32 *)next_key;
187 
188 	if (index >= array->map.max_entries) {
189 		*next = 0;
190 		return 0;
191 	}
192 
193 	if (index == array->map.max_entries - 1)
194 		return -ENOENT;
195 
196 	*next = index + 1;
197 	return 0;
198 }
199 
200 /* Called from syscall or from eBPF program */
201 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
202 				 u64 map_flags)
203 {
204 	struct bpf_array *array = container_of(map, struct bpf_array, map);
205 	u32 index = *(u32 *)key;
206 
207 	if (unlikely(map_flags > BPF_EXIST))
208 		/* unknown flags */
209 		return -EINVAL;
210 
211 	if (unlikely(index >= array->map.max_entries))
212 		/* all elements were pre-allocated, cannot insert a new one */
213 		return -E2BIG;
214 
215 	if (unlikely(map_flags == BPF_NOEXIST))
216 		/* all elements already exist */
217 		return -EEXIST;
218 
219 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
220 		memcpy(this_cpu_ptr(array->pptrs[index]),
221 		       value, map->value_size);
222 	else
223 		memcpy(array->value + array->elem_size * index,
224 		       value, map->value_size);
225 	return 0;
226 }
227 
228 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
229 			    u64 map_flags)
230 {
231 	struct bpf_array *array = container_of(map, struct bpf_array, map);
232 	u32 index = *(u32 *)key;
233 	void __percpu *pptr;
234 	int cpu, off = 0;
235 	u32 size;
236 
237 	if (unlikely(map_flags > BPF_EXIST))
238 		/* unknown flags */
239 		return -EINVAL;
240 
241 	if (unlikely(index >= array->map.max_entries))
242 		/* all elements were pre-allocated, cannot insert a new one */
243 		return -E2BIG;
244 
245 	if (unlikely(map_flags == BPF_NOEXIST))
246 		/* all elements already exist */
247 		return -EEXIST;
248 
249 	/* the user space will provide round_up(value_size, 8) bytes that
250 	 * will be copied into per-cpu area. bpf programs can only access
251 	 * value_size of it. During lookup the same extra bytes will be
252 	 * returned or zeros which were zero-filled by percpu_alloc,
253 	 * so no kernel data leaks possible
254 	 */
255 	size = round_up(map->value_size, 8);
256 	rcu_read_lock();
257 	pptr = array->pptrs[index];
258 	for_each_possible_cpu(cpu) {
259 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
260 		off += size;
261 	}
262 	rcu_read_unlock();
263 	return 0;
264 }
265 
266 /* Called from syscall or from eBPF program */
267 static int array_map_delete_elem(struct bpf_map *map, void *key)
268 {
269 	return -EINVAL;
270 }
271 
272 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
273 static void array_map_free(struct bpf_map *map)
274 {
275 	struct bpf_array *array = container_of(map, struct bpf_array, map);
276 
277 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
278 	 * so the programs (can be more than one that used this map) were
279 	 * disconnected from events. Wait for outstanding programs to complete
280 	 * and free the array
281 	 */
282 	synchronize_rcu();
283 
284 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
285 		bpf_array_free_percpu(array);
286 
287 	bpf_map_area_free(array);
288 }
289 
290 static const struct bpf_map_ops array_ops = {
291 	.map_alloc = array_map_alloc,
292 	.map_free = array_map_free,
293 	.map_get_next_key = array_map_get_next_key,
294 	.map_lookup_elem = array_map_lookup_elem,
295 	.map_update_elem = array_map_update_elem,
296 	.map_delete_elem = array_map_delete_elem,
297 	.map_gen_lookup = array_map_gen_lookup,
298 };
299 
300 static struct bpf_map_type_list array_type __ro_after_init = {
301 	.ops = &array_ops,
302 	.type = BPF_MAP_TYPE_ARRAY,
303 };
304 
305 static const struct bpf_map_ops percpu_array_ops = {
306 	.map_alloc = array_map_alloc,
307 	.map_free = array_map_free,
308 	.map_get_next_key = array_map_get_next_key,
309 	.map_lookup_elem = percpu_array_map_lookup_elem,
310 	.map_update_elem = array_map_update_elem,
311 	.map_delete_elem = array_map_delete_elem,
312 };
313 
314 static struct bpf_map_type_list percpu_array_type __ro_after_init = {
315 	.ops = &percpu_array_ops,
316 	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
317 };
318 
319 static int __init register_array_map(void)
320 {
321 	bpf_register_map_type(&array_type);
322 	bpf_register_map_type(&percpu_array_type);
323 	return 0;
324 }
325 late_initcall(register_array_map);
326 
327 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
328 {
329 	/* only file descriptors can be stored in this type of map */
330 	if (attr->value_size != sizeof(u32))
331 		return ERR_PTR(-EINVAL);
332 	return array_map_alloc(attr);
333 }
334 
335 static void fd_array_map_free(struct bpf_map *map)
336 {
337 	struct bpf_array *array = container_of(map, struct bpf_array, map);
338 	int i;
339 
340 	synchronize_rcu();
341 
342 	/* make sure it's empty */
343 	for (i = 0; i < array->map.max_entries; i++)
344 		BUG_ON(array->ptrs[i] != NULL);
345 
346 	bpf_map_area_free(array);
347 }
348 
349 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
350 {
351 	return NULL;
352 }
353 
354 /* only called from syscall */
355 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
356 				 void *key, void *value, u64 map_flags)
357 {
358 	struct bpf_array *array = container_of(map, struct bpf_array, map);
359 	void *new_ptr, *old_ptr;
360 	u32 index = *(u32 *)key, ufd;
361 
362 	if (map_flags != BPF_ANY)
363 		return -EINVAL;
364 
365 	if (index >= array->map.max_entries)
366 		return -E2BIG;
367 
368 	ufd = *(u32 *)value;
369 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
370 	if (IS_ERR(new_ptr))
371 		return PTR_ERR(new_ptr);
372 
373 	old_ptr = xchg(array->ptrs + index, new_ptr);
374 	if (old_ptr)
375 		map->ops->map_fd_put_ptr(old_ptr);
376 
377 	return 0;
378 }
379 
380 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
381 {
382 	struct bpf_array *array = container_of(map, struct bpf_array, map);
383 	void *old_ptr;
384 	u32 index = *(u32 *)key;
385 
386 	if (index >= array->map.max_entries)
387 		return -E2BIG;
388 
389 	old_ptr = xchg(array->ptrs + index, NULL);
390 	if (old_ptr) {
391 		map->ops->map_fd_put_ptr(old_ptr);
392 		return 0;
393 	} else {
394 		return -ENOENT;
395 	}
396 }
397 
398 static void *prog_fd_array_get_ptr(struct bpf_map *map,
399 				   struct file *map_file, int fd)
400 {
401 	struct bpf_array *array = container_of(map, struct bpf_array, map);
402 	struct bpf_prog *prog = bpf_prog_get(fd);
403 
404 	if (IS_ERR(prog))
405 		return prog;
406 
407 	if (!bpf_prog_array_compatible(array, prog)) {
408 		bpf_prog_put(prog);
409 		return ERR_PTR(-EINVAL);
410 	}
411 
412 	return prog;
413 }
414 
415 static void prog_fd_array_put_ptr(void *ptr)
416 {
417 	bpf_prog_put(ptr);
418 }
419 
420 /* decrement refcnt of all bpf_progs that are stored in this map */
421 void bpf_fd_array_map_clear(struct bpf_map *map)
422 {
423 	struct bpf_array *array = container_of(map, struct bpf_array, map);
424 	int i;
425 
426 	for (i = 0; i < array->map.max_entries; i++)
427 		fd_array_map_delete_elem(map, &i);
428 }
429 
430 static const struct bpf_map_ops prog_array_ops = {
431 	.map_alloc = fd_array_map_alloc,
432 	.map_free = fd_array_map_free,
433 	.map_get_next_key = array_map_get_next_key,
434 	.map_lookup_elem = fd_array_map_lookup_elem,
435 	.map_delete_elem = fd_array_map_delete_elem,
436 	.map_fd_get_ptr = prog_fd_array_get_ptr,
437 	.map_fd_put_ptr = prog_fd_array_put_ptr,
438 };
439 
440 static struct bpf_map_type_list prog_array_type __ro_after_init = {
441 	.ops = &prog_array_ops,
442 	.type = BPF_MAP_TYPE_PROG_ARRAY,
443 };
444 
445 static int __init register_prog_array_map(void)
446 {
447 	bpf_register_map_type(&prog_array_type);
448 	return 0;
449 }
450 late_initcall(register_prog_array_map);
451 
452 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
453 						   struct file *map_file)
454 {
455 	struct bpf_event_entry *ee;
456 
457 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
458 	if (ee) {
459 		ee->event = perf_file->private_data;
460 		ee->perf_file = perf_file;
461 		ee->map_file = map_file;
462 	}
463 
464 	return ee;
465 }
466 
467 static void __bpf_event_entry_free(struct rcu_head *rcu)
468 {
469 	struct bpf_event_entry *ee;
470 
471 	ee = container_of(rcu, struct bpf_event_entry, rcu);
472 	fput(ee->perf_file);
473 	kfree(ee);
474 }
475 
476 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
477 {
478 	call_rcu(&ee->rcu, __bpf_event_entry_free);
479 }
480 
481 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
482 					 struct file *map_file, int fd)
483 {
484 	const struct perf_event_attr *attr;
485 	struct bpf_event_entry *ee;
486 	struct perf_event *event;
487 	struct file *perf_file;
488 
489 	perf_file = perf_event_get(fd);
490 	if (IS_ERR(perf_file))
491 		return perf_file;
492 
493 	event = perf_file->private_data;
494 	ee = ERR_PTR(-EINVAL);
495 
496 	attr = perf_event_attrs(event);
497 	if (IS_ERR(attr) || attr->inherit)
498 		goto err_out;
499 
500 	switch (attr->type) {
501 	case PERF_TYPE_SOFTWARE:
502 		if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
503 			goto err_out;
504 		/* fall-through */
505 	case PERF_TYPE_RAW:
506 	case PERF_TYPE_HARDWARE:
507 		ee = bpf_event_entry_gen(perf_file, map_file);
508 		if (ee)
509 			return ee;
510 		ee = ERR_PTR(-ENOMEM);
511 		/* fall-through */
512 	default:
513 		break;
514 	}
515 
516 err_out:
517 	fput(perf_file);
518 	return ee;
519 }
520 
521 static void perf_event_fd_array_put_ptr(void *ptr)
522 {
523 	bpf_event_entry_free_rcu(ptr);
524 }
525 
526 static void perf_event_fd_array_release(struct bpf_map *map,
527 					struct file *map_file)
528 {
529 	struct bpf_array *array = container_of(map, struct bpf_array, map);
530 	struct bpf_event_entry *ee;
531 	int i;
532 
533 	rcu_read_lock();
534 	for (i = 0; i < array->map.max_entries; i++) {
535 		ee = READ_ONCE(array->ptrs[i]);
536 		if (ee && ee->map_file == map_file)
537 			fd_array_map_delete_elem(map, &i);
538 	}
539 	rcu_read_unlock();
540 }
541 
542 static const struct bpf_map_ops perf_event_array_ops = {
543 	.map_alloc = fd_array_map_alloc,
544 	.map_free = fd_array_map_free,
545 	.map_get_next_key = array_map_get_next_key,
546 	.map_lookup_elem = fd_array_map_lookup_elem,
547 	.map_delete_elem = fd_array_map_delete_elem,
548 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
549 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
550 	.map_release = perf_event_fd_array_release,
551 };
552 
553 static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
554 	.ops = &perf_event_array_ops,
555 	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
556 };
557 
558 static int __init register_perf_event_array_map(void)
559 {
560 	bpf_register_map_type(&perf_event_array_type);
561 	return 0;
562 }
563 late_initcall(register_perf_event_array_map);
564 
565 #ifdef CONFIG_CGROUPS
566 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
567 				     struct file *map_file /* not used */,
568 				     int fd)
569 {
570 	return cgroup_get_from_fd(fd);
571 }
572 
573 static void cgroup_fd_array_put_ptr(void *ptr)
574 {
575 	/* cgroup_put free cgrp after a rcu grace period */
576 	cgroup_put(ptr);
577 }
578 
579 static void cgroup_fd_array_free(struct bpf_map *map)
580 {
581 	bpf_fd_array_map_clear(map);
582 	fd_array_map_free(map);
583 }
584 
585 static const struct bpf_map_ops cgroup_array_ops = {
586 	.map_alloc = fd_array_map_alloc,
587 	.map_free = cgroup_fd_array_free,
588 	.map_get_next_key = array_map_get_next_key,
589 	.map_lookup_elem = fd_array_map_lookup_elem,
590 	.map_delete_elem = fd_array_map_delete_elem,
591 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
592 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
593 };
594 
595 static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
596 	.ops = &cgroup_array_ops,
597 	.type = BPF_MAP_TYPE_CGROUP_ARRAY,
598 };
599 
600 static int __init register_cgroup_array_map(void)
601 {
602 	bpf_register_map_type(&cgroup_array_type);
603 	return 0;
604 }
605 late_initcall(register_cgroup_array_map);
606 #endif
607 
608 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
609 {
610 	struct bpf_map *map, *inner_map_meta;
611 
612 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
613 	if (IS_ERR(inner_map_meta))
614 		return inner_map_meta;
615 
616 	map = fd_array_map_alloc(attr);
617 	if (IS_ERR(map)) {
618 		bpf_map_meta_free(inner_map_meta);
619 		return map;
620 	}
621 
622 	map->inner_map_meta = inner_map_meta;
623 
624 	return map;
625 }
626 
627 static void array_of_map_free(struct bpf_map *map)
628 {
629 	/* map->inner_map_meta is only accessed by syscall which
630 	 * is protected by fdget/fdput.
631 	 */
632 	bpf_map_meta_free(map->inner_map_meta);
633 	bpf_fd_array_map_clear(map);
634 	fd_array_map_free(map);
635 }
636 
637 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
638 {
639 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
640 
641 	if (!inner_map)
642 		return NULL;
643 
644 	return READ_ONCE(*inner_map);
645 }
646 
647 static const struct bpf_map_ops array_of_map_ops = {
648 	.map_alloc = array_of_map_alloc,
649 	.map_free = array_of_map_free,
650 	.map_get_next_key = array_map_get_next_key,
651 	.map_lookup_elem = array_of_map_lookup_elem,
652 	.map_delete_elem = fd_array_map_delete_elem,
653 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
654 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
655 };
656 
657 static struct bpf_map_type_list array_of_map_type __ro_after_init = {
658 	.ops = &array_of_map_ops,
659 	.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
660 };
661 
662 static int __init register_array_of_map(void)
663 {
664 	bpf_register_map_type(&array_of_map_type);
665 	return 0;
666 }
667 late_initcall(register_array_of_map);
668