xref: /linux/kernel/bpf/arraymap.c (revision d0d106a2bd21499901299160744e5fe9f4c83ddb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016,2017 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
15 
16 #include "map_in_map.h"
17 
18 #define ARRAY_CREATE_FLAG_MASK \
19 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 
bpf_array_free_percpu(struct bpf_array * array)22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 	int i;
25 
26 	for (i = 0; i < array->map.max_entries; i++) {
27 		free_percpu(array->pptrs[i]);
28 		cond_resched();
29 	}
30 }
31 
bpf_array_alloc_percpu(struct bpf_array * array)32 static int bpf_array_alloc_percpu(struct bpf_array *array)
33 {
34 	void __percpu *ptr;
35 	int i;
36 
37 	for (i = 0; i < array->map.max_entries; i++) {
38 		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 					   GFP_USER | __GFP_NOWARN);
40 		if (!ptr) {
41 			bpf_array_free_percpu(array);
42 			return -ENOMEM;
43 		}
44 		array->pptrs[i] = ptr;
45 		cond_resched();
46 	}
47 
48 	return 0;
49 }
50 
51 /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)52 int array_map_alloc_check(union bpf_attr *attr)
53 {
54 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 	int numa_node = bpf_map_attr_numa_node(attr);
56 
57 	/* check sanity of attributes */
58 	if (attr->max_entries == 0 || attr->key_size != 4 ||
59 	    attr->value_size == 0 ||
60 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 	    !bpf_map_flags_access_ok(attr->map_flags) ||
62 	    (percpu && numa_node != NUMA_NO_NODE))
63 		return -EINVAL;
64 
65 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67 		return -EINVAL;
68 
69 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
71 		return -EINVAL;
72 
73 	/* avoid overflow on round_up(map->value_size) */
74 	if (attr->value_size > INT_MAX)
75 		return -E2BIG;
76 	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
77 	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
78 		return -E2BIG;
79 
80 	return 0;
81 }
82 
array_map_alloc(union bpf_attr * attr)83 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
84 {
85 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
86 	int numa_node = bpf_map_attr_numa_node(attr);
87 	u32 elem_size, index_mask, max_entries;
88 	bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
89 	u64 array_size, mask64;
90 	struct bpf_array *array;
91 
92 	elem_size = round_up(attr->value_size, 8);
93 
94 	max_entries = attr->max_entries;
95 
96 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
97 	 * upper most bit set in u32 space is undefined behavior due to
98 	 * resulting 1U << 32, so do it manually here in u64 space.
99 	 */
100 	mask64 = fls_long(max_entries - 1);
101 	mask64 = 1ULL << mask64;
102 	mask64 -= 1;
103 
104 	index_mask = mask64;
105 	if (!bypass_spec_v1) {
106 		/* round up array size to nearest power of 2,
107 		 * since cpu will speculate within index_mask limits
108 		 */
109 		max_entries = index_mask + 1;
110 		/* Check for overflows. */
111 		if (max_entries < attr->max_entries)
112 			return ERR_PTR(-E2BIG);
113 	}
114 
115 	array_size = sizeof(*array);
116 	if (percpu) {
117 		array_size += (u64) max_entries * sizeof(void *);
118 	} else {
119 		/* rely on vmalloc() to return page-aligned memory and
120 		 * ensure array->value is exactly page-aligned
121 		 */
122 		if (attr->map_flags & BPF_F_MMAPABLE) {
123 			array_size = PAGE_ALIGN(array_size);
124 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125 		} else {
126 			array_size += (u64) max_entries * elem_size;
127 		}
128 	}
129 
130 	/* allocate all map elements and zero-initialize them */
131 	if (attr->map_flags & BPF_F_MMAPABLE) {
132 		void *data;
133 
134 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
136 		if (!data)
137 			return ERR_PTR(-ENOMEM);
138 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
139 			- offsetof(struct bpf_array, value);
140 	} else {
141 		array = bpf_map_area_alloc(array_size, numa_node);
142 	}
143 	if (!array)
144 		return ERR_PTR(-ENOMEM);
145 	array->index_mask = index_mask;
146 	array->map.bypass_spec_v1 = bypass_spec_v1;
147 
148 	/* copy mandatory map attributes */
149 	bpf_map_init_from_attr(&array->map, attr);
150 	array->elem_size = elem_size;
151 
152 	if (percpu && bpf_array_alloc_percpu(array)) {
153 		bpf_map_area_free(array);
154 		return ERR_PTR(-ENOMEM);
155 	}
156 
157 	return &array->map;
158 }
159 
array_map_elem_ptr(struct bpf_array * array,u32 index)160 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
161 {
162 	return array->value + (u64)array->elem_size * index;
163 }
164 
165 /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)166 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
167 {
168 	struct bpf_array *array = container_of(map, struct bpf_array, map);
169 	u32 index = *(u32 *)key;
170 
171 	if (unlikely(index >= array->map.max_entries))
172 		return NULL;
173 
174 	return array->value + (u64)array->elem_size * (index & array->index_mask);
175 }
176 
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)177 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
178 				       u32 off)
179 {
180 	struct bpf_array *array = container_of(map, struct bpf_array, map);
181 
182 	if (map->max_entries != 1)
183 		return -ENOTSUPP;
184 	if (off >= map->value_size)
185 		return -EINVAL;
186 
187 	*imm = (unsigned long)array->value;
188 	return 0;
189 }
190 
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)191 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
192 				       u32 *off)
193 {
194 	struct bpf_array *array = container_of(map, struct bpf_array, map);
195 	u64 base = (unsigned long)array->value;
196 	u64 range = array->elem_size;
197 
198 	if (map->max_entries != 1)
199 		return -ENOTSUPP;
200 	if (imm < base || imm >= base + range)
201 		return -ENOENT;
202 
203 	*off = imm - base;
204 	return 0;
205 }
206 
207 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)208 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
209 {
210 	struct bpf_array *array = container_of(map, struct bpf_array, map);
211 	struct bpf_insn *insn = insn_buf;
212 	u32 elem_size = array->elem_size;
213 	const int ret = BPF_REG_0;
214 	const int map_ptr = BPF_REG_1;
215 	const int index = BPF_REG_2;
216 
217 	if (map->map_flags & BPF_F_INNER_MAP)
218 		return -EOPNOTSUPP;
219 
220 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
221 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
222 	if (!map->bypass_spec_v1) {
223 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225 	} else {
226 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227 	}
228 
229 	if (is_power_of_2(elem_size)) {
230 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
231 	} else {
232 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
233 	}
234 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
235 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
236 	*insn++ = BPF_MOV64_IMM(ret, 0);
237 	return insn - insn_buf;
238 }
239 
240 /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242 {
243 	struct bpf_array *array = container_of(map, struct bpf_array, map);
244 	u32 index = *(u32 *)key;
245 
246 	if (unlikely(index >= array->map.max_entries))
247 		return NULL;
248 
249 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250 }
251 
252 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
percpu_array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)253 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
254 {
255 	struct bpf_array *array = container_of(map, struct bpf_array, map);
256 	struct bpf_insn *insn = insn_buf;
257 
258 	if (!bpf_jit_supports_percpu_insn())
259 		return -EOPNOTSUPP;
260 
261 	if (map->map_flags & BPF_F_INNER_MAP)
262 		return -EOPNOTSUPP;
263 
264 	BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
265 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
266 
267 	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
268 	if (!map->bypass_spec_v1) {
269 		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
270 		*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
271 	} else {
272 		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
273 	}
274 
275 	*insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
276 	*insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
277 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
278 	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
279 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
280 	*insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
281 	return insn - insn_buf;
282 }
283 
percpu_array_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
285 {
286 	struct bpf_array *array = container_of(map, struct bpf_array, map);
287 	u32 index = *(u32 *)key;
288 
289 	if (cpu >= nr_cpu_ids)
290 		return NULL;
291 
292 	if (unlikely(index >= array->map.max_entries))
293 		return NULL;
294 
295 	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
296 }
297 
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)298 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
299 {
300 	struct bpf_array *array = container_of(map, struct bpf_array, map);
301 	u32 index = *(u32 *)key;
302 	void __percpu *pptr;
303 	int cpu, off = 0;
304 	u32 size;
305 
306 	if (unlikely(index >= array->map.max_entries))
307 		return -ENOENT;
308 
309 	/* per_cpu areas are zero-filled and bpf programs can only
310 	 * access 'value_size' of them, so copying rounded areas
311 	 * will not leak any kernel data
312 	 */
313 	size = array->elem_size;
314 	rcu_read_lock();
315 	pptr = array->pptrs[index & array->index_mask];
316 	for_each_possible_cpu(cpu) {
317 		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
318 		check_and_init_map_value(map, value + off);
319 		off += size;
320 	}
321 	rcu_read_unlock();
322 	return 0;
323 }
324 
325 /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)326 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
327 {
328 	struct bpf_array *array = container_of(map, struct bpf_array, map);
329 	u32 index = key ? *(u32 *)key : U32_MAX;
330 	u32 *next = (u32 *)next_key;
331 
332 	if (index >= array->map.max_entries) {
333 		*next = 0;
334 		return 0;
335 	}
336 
337 	if (index == array->map.max_entries - 1)
338 		return -ENOENT;
339 
340 	*next = index + 1;
341 	return 0;
342 }
343 
344 /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)345 static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
346 				  u64 map_flags)
347 {
348 	struct bpf_array *array = container_of(map, struct bpf_array, map);
349 	u32 index = *(u32 *)key;
350 	char *val;
351 
352 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
353 		/* unknown flags */
354 		return -EINVAL;
355 
356 	if (unlikely(index >= array->map.max_entries))
357 		/* all elements were pre-allocated, cannot insert a new one */
358 		return -E2BIG;
359 
360 	if (unlikely(map_flags & BPF_NOEXIST))
361 		/* all elements already exist */
362 		return -EEXIST;
363 
364 	if (unlikely((map_flags & BPF_F_LOCK) &&
365 		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
366 		return -EINVAL;
367 
368 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
369 		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
370 		copy_map_value(map, val, value);
371 		bpf_obj_free_fields(array->map.record, val);
372 	} else {
373 		val = array->value +
374 			(u64)array->elem_size * (index & array->index_mask);
375 		if (map_flags & BPF_F_LOCK)
376 			copy_map_value_locked(map, val, value, false);
377 		else
378 			copy_map_value(map, val, value);
379 		bpf_obj_free_fields(array->map.record, val);
380 	}
381 	return 0;
382 }
383 
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)384 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
385 			    u64 map_flags)
386 {
387 	struct bpf_array *array = container_of(map, struct bpf_array, map);
388 	u32 index = *(u32 *)key;
389 	void __percpu *pptr;
390 	int cpu, off = 0;
391 	u32 size;
392 
393 	if (unlikely(map_flags > BPF_EXIST))
394 		/* unknown flags */
395 		return -EINVAL;
396 
397 	if (unlikely(index >= array->map.max_entries))
398 		/* all elements were pre-allocated, cannot insert a new one */
399 		return -E2BIG;
400 
401 	if (unlikely(map_flags == BPF_NOEXIST))
402 		/* all elements already exist */
403 		return -EEXIST;
404 
405 	/* the user space will provide round_up(value_size, 8) bytes that
406 	 * will be copied into per-cpu area. bpf programs can only access
407 	 * value_size of it. During lookup the same extra bytes will be
408 	 * returned or zeros which were zero-filled by percpu_alloc,
409 	 * so no kernel data leaks possible
410 	 */
411 	size = array->elem_size;
412 	rcu_read_lock();
413 	pptr = array->pptrs[index & array->index_mask];
414 	for_each_possible_cpu(cpu) {
415 		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
416 		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
417 		off += size;
418 	}
419 	rcu_read_unlock();
420 	return 0;
421 }
422 
423 /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)424 static long array_map_delete_elem(struct bpf_map *map, void *key)
425 {
426 	return -EINVAL;
427 }
428 
array_map_vmalloc_addr(struct bpf_array * array)429 static void *array_map_vmalloc_addr(struct bpf_array *array)
430 {
431 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
432 }
433 
array_map_free_timers_wq(struct bpf_map * map)434 static void array_map_free_timers_wq(struct bpf_map *map)
435 {
436 	struct bpf_array *array = container_of(map, struct bpf_array, map);
437 	int i;
438 
439 	/* We don't reset or free fields other than timer and workqueue
440 	 * on uref dropping to zero.
441 	 */
442 	if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) {
443 		for (i = 0; i < array->map.max_entries; i++) {
444 			if (btf_record_has_field(map->record, BPF_TIMER))
445 				bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
446 			if (btf_record_has_field(map->record, BPF_WORKQUEUE))
447 				bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
448 		}
449 	}
450 }
451 
452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)453 static void array_map_free(struct bpf_map *map)
454 {
455 	struct bpf_array *array = container_of(map, struct bpf_array, map);
456 	int i;
457 
458 	if (!IS_ERR_OR_NULL(map->record)) {
459 		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
460 			for (i = 0; i < array->map.max_entries; i++) {
461 				void __percpu *pptr = array->pptrs[i & array->index_mask];
462 				int cpu;
463 
464 				for_each_possible_cpu(cpu) {
465 					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
466 					cond_resched();
467 				}
468 			}
469 		} else {
470 			for (i = 0; i < array->map.max_entries; i++)
471 				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
472 		}
473 	}
474 
475 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
476 		bpf_array_free_percpu(array);
477 
478 	if (array->map.map_flags & BPF_F_MMAPABLE)
479 		bpf_map_area_free(array_map_vmalloc_addr(array));
480 	else
481 		bpf_map_area_free(array);
482 }
483 
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)484 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
485 				    struct seq_file *m)
486 {
487 	void *value;
488 
489 	rcu_read_lock();
490 
491 	value = array_map_lookup_elem(map, key);
492 	if (!value) {
493 		rcu_read_unlock();
494 		return;
495 	}
496 
497 	if (map->btf_key_type_id)
498 		seq_printf(m, "%u: ", *(u32 *)key);
499 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
500 	seq_putc(m, '\n');
501 
502 	rcu_read_unlock();
503 }
504 
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)505 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
506 					   struct seq_file *m)
507 {
508 	struct bpf_array *array = container_of(map, struct bpf_array, map);
509 	u32 index = *(u32 *)key;
510 	void __percpu *pptr;
511 	int cpu;
512 
513 	rcu_read_lock();
514 
515 	seq_printf(m, "%u: {\n", *(u32 *)key);
516 	pptr = array->pptrs[index & array->index_mask];
517 	for_each_possible_cpu(cpu) {
518 		seq_printf(m, "\tcpu%d: ", cpu);
519 		btf_type_seq_show(map->btf, map->btf_value_type_id,
520 				  per_cpu_ptr(pptr, cpu), m);
521 		seq_putc(m, '\n');
522 	}
523 	seq_puts(m, "}\n");
524 
525 	rcu_read_unlock();
526 }
527 
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)528 static int array_map_check_btf(const struct bpf_map *map,
529 			       const struct btf *btf,
530 			       const struct btf_type *key_type,
531 			       const struct btf_type *value_type)
532 {
533 	u32 int_data;
534 
535 	/* One exception for keyless BTF: .bss/.data/.rodata map */
536 	if (btf_type_is_void(key_type)) {
537 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
538 		    map->max_entries != 1)
539 			return -EINVAL;
540 
541 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
542 			return -EINVAL;
543 
544 		return 0;
545 	}
546 
547 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
548 		return -EINVAL;
549 
550 	int_data = *(u32 *)(key_type + 1);
551 	/* bpf array can only take a u32 key. This check makes sure
552 	 * that the btf matches the attr used during map_create.
553 	 */
554 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
555 		return -EINVAL;
556 
557 	return 0;
558 }
559 
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)560 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
561 {
562 	struct bpf_array *array = container_of(map, struct bpf_array, map);
563 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
564 
565 	if (!(map->map_flags & BPF_F_MMAPABLE))
566 		return -EINVAL;
567 
568 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
569 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
570 		return -EINVAL;
571 
572 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
573 				   vma->vm_pgoff + pgoff);
574 }
575 
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)576 static bool array_map_meta_equal(const struct bpf_map *meta0,
577 				 const struct bpf_map *meta1)
578 {
579 	if (!bpf_map_meta_equal(meta0, meta1))
580 		return false;
581 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
582 	       meta0->max_entries == meta1->max_entries;
583 }
584 
585 struct bpf_iter_seq_array_map_info {
586 	struct bpf_map *map;
587 	void *percpu_value_buf;
588 	u32 index;
589 };
590 
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)591 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
592 {
593 	struct bpf_iter_seq_array_map_info *info = seq->private;
594 	struct bpf_map *map = info->map;
595 	struct bpf_array *array;
596 	u32 index;
597 
598 	if (info->index >= map->max_entries)
599 		return NULL;
600 
601 	if (*pos == 0)
602 		++*pos;
603 	array = container_of(map, struct bpf_array, map);
604 	index = info->index & array->index_mask;
605 	if (info->percpu_value_buf)
606 		return (void *)(uintptr_t)array->pptrs[index];
607 	return array_map_elem_ptr(array, index);
608 }
609 
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)610 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
611 {
612 	struct bpf_iter_seq_array_map_info *info = seq->private;
613 	struct bpf_map *map = info->map;
614 	struct bpf_array *array;
615 	u32 index;
616 
617 	++*pos;
618 	++info->index;
619 	if (info->index >= map->max_entries)
620 		return NULL;
621 
622 	array = container_of(map, struct bpf_array, map);
623 	index = info->index & array->index_mask;
624 	if (info->percpu_value_buf)
625 		return (void *)(uintptr_t)array->pptrs[index];
626 	return array_map_elem_ptr(array, index);
627 }
628 
__bpf_array_map_seq_show(struct seq_file * seq,void * v)629 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
630 {
631 	struct bpf_iter_seq_array_map_info *info = seq->private;
632 	struct bpf_iter__bpf_map_elem ctx = {};
633 	struct bpf_map *map = info->map;
634 	struct bpf_array *array = container_of(map, struct bpf_array, map);
635 	struct bpf_iter_meta meta;
636 	struct bpf_prog *prog;
637 	int off = 0, cpu = 0;
638 	void __percpu *pptr;
639 	u32 size;
640 
641 	meta.seq = seq;
642 	prog = bpf_iter_get_info(&meta, v == NULL);
643 	if (!prog)
644 		return 0;
645 
646 	ctx.meta = &meta;
647 	ctx.map = info->map;
648 	if (v) {
649 		ctx.key = &info->index;
650 
651 		if (!info->percpu_value_buf) {
652 			ctx.value = v;
653 		} else {
654 			pptr = (void __percpu *)(uintptr_t)v;
655 			size = array->elem_size;
656 			for_each_possible_cpu(cpu) {
657 				copy_map_value_long(map, info->percpu_value_buf + off,
658 						    per_cpu_ptr(pptr, cpu));
659 				check_and_init_map_value(map, info->percpu_value_buf + off);
660 				off += size;
661 			}
662 			ctx.value = info->percpu_value_buf;
663 		}
664 	}
665 
666 	return bpf_iter_run_prog(prog, &ctx);
667 }
668 
bpf_array_map_seq_show(struct seq_file * seq,void * v)669 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
670 {
671 	return __bpf_array_map_seq_show(seq, v);
672 }
673 
bpf_array_map_seq_stop(struct seq_file * seq,void * v)674 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
675 {
676 	if (!v)
677 		(void)__bpf_array_map_seq_show(seq, NULL);
678 }
679 
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)680 static int bpf_iter_init_array_map(void *priv_data,
681 				   struct bpf_iter_aux_info *aux)
682 {
683 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
684 	struct bpf_map *map = aux->map;
685 	struct bpf_array *array = container_of(map, struct bpf_array, map);
686 	void *value_buf;
687 	u32 buf_size;
688 
689 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
690 		buf_size = array->elem_size * num_possible_cpus();
691 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
692 		if (!value_buf)
693 			return -ENOMEM;
694 
695 		seq_info->percpu_value_buf = value_buf;
696 	}
697 
698 	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
699 	 * released before or in the middle of iterating map elements, so
700 	 * acquire an extra map uref for iterator.
701 	 */
702 	bpf_map_inc_with_uref(map);
703 	seq_info->map = map;
704 	return 0;
705 }
706 
bpf_iter_fini_array_map(void * priv_data)707 static void bpf_iter_fini_array_map(void *priv_data)
708 {
709 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
710 
711 	bpf_map_put_with_uref(seq_info->map);
712 	kfree(seq_info->percpu_value_buf);
713 }
714 
715 static const struct seq_operations bpf_array_map_seq_ops = {
716 	.start	= bpf_array_map_seq_start,
717 	.next	= bpf_array_map_seq_next,
718 	.stop	= bpf_array_map_seq_stop,
719 	.show	= bpf_array_map_seq_show,
720 };
721 
722 static const struct bpf_iter_seq_info iter_seq_info = {
723 	.seq_ops		= &bpf_array_map_seq_ops,
724 	.init_seq_private	= bpf_iter_init_array_map,
725 	.fini_seq_private	= bpf_iter_fini_array_map,
726 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
727 };
728 
bpf_for_each_array_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)729 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
730 				    void *callback_ctx, u64 flags)
731 {
732 	u32 i, key, num_elems = 0;
733 	struct bpf_array *array;
734 	bool is_percpu;
735 	u64 ret = 0;
736 	void *val;
737 
738 	cant_migrate();
739 
740 	if (flags != 0)
741 		return -EINVAL;
742 
743 	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
744 	array = container_of(map, struct bpf_array, map);
745 	for (i = 0; i < map->max_entries; i++) {
746 		if (is_percpu)
747 			val = this_cpu_ptr(array->pptrs[i]);
748 		else
749 			val = array_map_elem_ptr(array, i);
750 		num_elems++;
751 		key = i;
752 		ret = callback_fn((u64)(long)map, (u64)(long)&key,
753 				  (u64)(long)val, (u64)(long)callback_ctx, 0);
754 		/* return value: 0 - continue, 1 - stop and return */
755 		if (ret)
756 			break;
757 	}
758 
759 	return num_elems;
760 }
761 
array_map_mem_usage(const struct bpf_map * map)762 static u64 array_map_mem_usage(const struct bpf_map *map)
763 {
764 	struct bpf_array *array = container_of(map, struct bpf_array, map);
765 	bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
766 	u32 elem_size = array->elem_size;
767 	u64 entries = map->max_entries;
768 	u64 usage = sizeof(*array);
769 
770 	if (percpu) {
771 		usage += entries * sizeof(void *);
772 		usage += entries * elem_size * num_possible_cpus();
773 	} else {
774 		if (map->map_flags & BPF_F_MMAPABLE) {
775 			usage = PAGE_ALIGN(usage);
776 			usage += PAGE_ALIGN(entries * elem_size);
777 		} else {
778 			usage += entries * elem_size;
779 		}
780 	}
781 	return usage;
782 }
783 
784 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
785 const struct bpf_map_ops array_map_ops = {
786 	.map_meta_equal = array_map_meta_equal,
787 	.map_alloc_check = array_map_alloc_check,
788 	.map_alloc = array_map_alloc,
789 	.map_free = array_map_free,
790 	.map_get_next_key = array_map_get_next_key,
791 	.map_release_uref = array_map_free_timers_wq,
792 	.map_lookup_elem = array_map_lookup_elem,
793 	.map_update_elem = array_map_update_elem,
794 	.map_delete_elem = array_map_delete_elem,
795 	.map_gen_lookup = array_map_gen_lookup,
796 	.map_direct_value_addr = array_map_direct_value_addr,
797 	.map_direct_value_meta = array_map_direct_value_meta,
798 	.map_mmap = array_map_mmap,
799 	.map_seq_show_elem = array_map_seq_show_elem,
800 	.map_check_btf = array_map_check_btf,
801 	.map_lookup_batch = generic_map_lookup_batch,
802 	.map_update_batch = generic_map_update_batch,
803 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
804 	.map_for_each_callback = bpf_for_each_array_elem,
805 	.map_mem_usage = array_map_mem_usage,
806 	.map_btf_id = &array_map_btf_ids[0],
807 	.iter_seq_info = &iter_seq_info,
808 };
809 
810 const struct bpf_map_ops percpu_array_map_ops = {
811 	.map_meta_equal = bpf_map_meta_equal,
812 	.map_alloc_check = array_map_alloc_check,
813 	.map_alloc = array_map_alloc,
814 	.map_free = array_map_free,
815 	.map_get_next_key = array_map_get_next_key,
816 	.map_lookup_elem = percpu_array_map_lookup_elem,
817 	.map_gen_lookup = percpu_array_map_gen_lookup,
818 	.map_update_elem = array_map_update_elem,
819 	.map_delete_elem = array_map_delete_elem,
820 	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
821 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
822 	.map_check_btf = array_map_check_btf,
823 	.map_lookup_batch = generic_map_lookup_batch,
824 	.map_update_batch = generic_map_update_batch,
825 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
826 	.map_for_each_callback = bpf_for_each_array_elem,
827 	.map_mem_usage = array_map_mem_usage,
828 	.map_btf_id = &array_map_btf_ids[0],
829 	.iter_seq_info = &iter_seq_info,
830 };
831 
fd_array_map_alloc_check(union bpf_attr * attr)832 static int fd_array_map_alloc_check(union bpf_attr *attr)
833 {
834 	/* only file descriptors can be stored in this type of map */
835 	if (attr->value_size != sizeof(u32))
836 		return -EINVAL;
837 	/* Program read-only/write-only not supported for special maps yet. */
838 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
839 		return -EINVAL;
840 	return array_map_alloc_check(attr);
841 }
842 
fd_array_map_free(struct bpf_map * map)843 static void fd_array_map_free(struct bpf_map *map)
844 {
845 	struct bpf_array *array = container_of(map, struct bpf_array, map);
846 	int i;
847 
848 	/* make sure it's empty */
849 	for (i = 0; i < array->map.max_entries; i++)
850 		BUG_ON(array->ptrs[i] != NULL);
851 
852 	bpf_map_area_free(array);
853 }
854 
fd_array_map_lookup_elem(struct bpf_map * map,void * key)855 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
856 {
857 	return ERR_PTR(-EOPNOTSUPP);
858 }
859 
860 /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)861 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
862 {
863 	void **elem, *ptr;
864 	int ret =  0;
865 
866 	if (!map->ops->map_fd_sys_lookup_elem)
867 		return -ENOTSUPP;
868 
869 	rcu_read_lock();
870 	elem = array_map_lookup_elem(map, key);
871 	if (elem && (ptr = READ_ONCE(*elem)))
872 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
873 	else
874 		ret = -ENOENT;
875 	rcu_read_unlock();
876 
877 	return ret;
878 }
879 
880 /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)881 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
882 				 void *key, void *value, u64 map_flags)
883 {
884 	struct bpf_array *array = container_of(map, struct bpf_array, map);
885 	void *new_ptr, *old_ptr;
886 	u32 index = *(u32 *)key, ufd;
887 
888 	if (map_flags != BPF_ANY)
889 		return -EINVAL;
890 
891 	if (index >= array->map.max_entries)
892 		return -E2BIG;
893 
894 	ufd = *(u32 *)value;
895 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
896 	if (IS_ERR(new_ptr))
897 		return PTR_ERR(new_ptr);
898 
899 	if (map->ops->map_poke_run) {
900 		mutex_lock(&array->aux->poke_mutex);
901 		old_ptr = xchg(array->ptrs + index, new_ptr);
902 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
903 		mutex_unlock(&array->aux->poke_mutex);
904 	} else {
905 		old_ptr = xchg(array->ptrs + index, new_ptr);
906 	}
907 
908 	if (old_ptr)
909 		map->ops->map_fd_put_ptr(map, old_ptr, true);
910 	return 0;
911 }
912 
__fd_array_map_delete_elem(struct bpf_map * map,void * key,bool need_defer)913 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
914 {
915 	struct bpf_array *array = container_of(map, struct bpf_array, map);
916 	void *old_ptr;
917 	u32 index = *(u32 *)key;
918 
919 	if (index >= array->map.max_entries)
920 		return -E2BIG;
921 
922 	if (map->ops->map_poke_run) {
923 		mutex_lock(&array->aux->poke_mutex);
924 		old_ptr = xchg(array->ptrs + index, NULL);
925 		map->ops->map_poke_run(map, index, old_ptr, NULL);
926 		mutex_unlock(&array->aux->poke_mutex);
927 	} else {
928 		old_ptr = xchg(array->ptrs + index, NULL);
929 	}
930 
931 	if (old_ptr) {
932 		map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
933 		return 0;
934 	} else {
935 		return -ENOENT;
936 	}
937 }
938 
fd_array_map_delete_elem(struct bpf_map * map,void * key)939 static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
940 {
941 	return __fd_array_map_delete_elem(map, key, true);
942 }
943 
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)944 static void *prog_fd_array_get_ptr(struct bpf_map *map,
945 				   struct file *map_file, int fd)
946 {
947 	struct bpf_prog *prog = bpf_prog_get(fd);
948 	bool is_extended;
949 
950 	if (IS_ERR(prog))
951 		return prog;
952 
953 	if (prog->type == BPF_PROG_TYPE_EXT ||
954 	    !bpf_prog_map_compatible(map, prog)) {
955 		bpf_prog_put(prog);
956 		return ERR_PTR(-EINVAL);
957 	}
958 
959 	mutex_lock(&prog->aux->ext_mutex);
960 	is_extended = prog->aux->is_extended;
961 	if (!is_extended)
962 		prog->aux->prog_array_member_cnt++;
963 	mutex_unlock(&prog->aux->ext_mutex);
964 	if (is_extended) {
965 		/* Extended prog can not be tail callee. It's to prevent a
966 		 * potential infinite loop like:
967 		 * tail callee prog entry -> tail callee prog subprog ->
968 		 * freplace prog entry --tailcall-> tail callee prog entry.
969 		 */
970 		bpf_prog_put(prog);
971 		return ERR_PTR(-EBUSY);
972 	}
973 
974 	return prog;
975 }
976 
prog_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)977 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
978 {
979 	struct bpf_prog *prog = ptr;
980 
981 	mutex_lock(&prog->aux->ext_mutex);
982 	prog->aux->prog_array_member_cnt--;
983 	mutex_unlock(&prog->aux->ext_mutex);
984 	/* bpf_prog is freed after one RCU or tasks trace grace period */
985 	bpf_prog_put(prog);
986 }
987 
prog_fd_array_sys_lookup_elem(void * ptr)988 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
989 {
990 	return ((struct bpf_prog *)ptr)->aux->id;
991 }
992 
993 /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map,bool need_defer)994 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
995 {
996 	struct bpf_array *array = container_of(map, struct bpf_array, map);
997 	int i;
998 
999 	for (i = 0; i < array->map.max_entries; i++)
1000 		__fd_array_map_delete_elem(map, &i, need_defer);
1001 }
1002 
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1003 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
1004 					 struct seq_file *m)
1005 {
1006 	void **elem, *ptr;
1007 	u32 prog_id;
1008 
1009 	rcu_read_lock();
1010 
1011 	elem = array_map_lookup_elem(map, key);
1012 	if (elem) {
1013 		ptr = READ_ONCE(*elem);
1014 		if (ptr) {
1015 			seq_printf(m, "%u: ", *(u32 *)key);
1016 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
1017 			btf_type_seq_show(map->btf, map->btf_value_type_id,
1018 					  &prog_id, m);
1019 			seq_putc(m, '\n');
1020 		}
1021 	}
1022 
1023 	rcu_read_unlock();
1024 }
1025 
1026 struct prog_poke_elem {
1027 	struct list_head list;
1028 	struct bpf_prog_aux *aux;
1029 };
1030 
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)1031 static int prog_array_map_poke_track(struct bpf_map *map,
1032 				     struct bpf_prog_aux *prog_aux)
1033 {
1034 	struct prog_poke_elem *elem;
1035 	struct bpf_array_aux *aux;
1036 	int ret = 0;
1037 
1038 	aux = container_of(map, struct bpf_array, map)->aux;
1039 	mutex_lock(&aux->poke_mutex);
1040 	list_for_each_entry(elem, &aux->poke_progs, list) {
1041 		if (elem->aux == prog_aux)
1042 			goto out;
1043 	}
1044 
1045 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
1046 	if (!elem) {
1047 		ret = -ENOMEM;
1048 		goto out;
1049 	}
1050 
1051 	INIT_LIST_HEAD(&elem->list);
1052 	/* We must track the program's aux info at this point in time
1053 	 * since the program pointer itself may not be stable yet, see
1054 	 * also comment in prog_array_map_poke_run().
1055 	 */
1056 	elem->aux = prog_aux;
1057 
1058 	list_add_tail(&elem->list, &aux->poke_progs);
1059 out:
1060 	mutex_unlock(&aux->poke_mutex);
1061 	return ret;
1062 }
1063 
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)1064 static void prog_array_map_poke_untrack(struct bpf_map *map,
1065 					struct bpf_prog_aux *prog_aux)
1066 {
1067 	struct prog_poke_elem *elem, *tmp;
1068 	struct bpf_array_aux *aux;
1069 
1070 	aux = container_of(map, struct bpf_array, map)->aux;
1071 	mutex_lock(&aux->poke_mutex);
1072 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1073 		if (elem->aux == prog_aux) {
1074 			list_del_init(&elem->list);
1075 			kfree(elem);
1076 			break;
1077 		}
1078 	}
1079 	mutex_unlock(&aux->poke_mutex);
1080 }
1081 
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)1082 void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
1083 				      struct bpf_prog *new, struct bpf_prog *old)
1084 {
1085 	WARN_ON_ONCE(1);
1086 }
1087 
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)1088 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1089 				    struct bpf_prog *old,
1090 				    struct bpf_prog *new)
1091 {
1092 	struct prog_poke_elem *elem;
1093 	struct bpf_array_aux *aux;
1094 
1095 	aux = container_of(map, struct bpf_array, map)->aux;
1096 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1097 
1098 	list_for_each_entry(elem, &aux->poke_progs, list) {
1099 		struct bpf_jit_poke_descriptor *poke;
1100 		int i;
1101 
1102 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1103 			poke = &elem->aux->poke_tab[i];
1104 
1105 			/* Few things to be aware of:
1106 			 *
1107 			 * 1) We can only ever access aux in this context, but
1108 			 *    not aux->prog since it might not be stable yet and
1109 			 *    there could be danger of use after free otherwise.
1110 			 * 2) Initially when we start tracking aux, the program
1111 			 *    is not JITed yet and also does not have a kallsyms
1112 			 *    entry. We skip these as poke->tailcall_target_stable
1113 			 *    is not active yet. The JIT will do the final fixup
1114 			 *    before setting it stable. The various
1115 			 *    poke->tailcall_target_stable are successively
1116 			 *    activated, so tail call updates can arrive from here
1117 			 *    while JIT is still finishing its final fixup for
1118 			 *    non-activated poke entries.
1119 			 * 3) Also programs reaching refcount of zero while patching
1120 			 *    is in progress is okay since we're protected under
1121 			 *    poke_mutex and untrack the programs before the JIT
1122 			 *    buffer is freed.
1123 			 */
1124 			if (!READ_ONCE(poke->tailcall_target_stable))
1125 				continue;
1126 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1127 				continue;
1128 			if (poke->tail_call.map != map ||
1129 			    poke->tail_call.key != key)
1130 				continue;
1131 
1132 			bpf_arch_poke_desc_update(poke, new, old);
1133 		}
1134 	}
1135 }
1136 
prog_array_map_clear_deferred(struct work_struct * work)1137 static void prog_array_map_clear_deferred(struct work_struct *work)
1138 {
1139 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1140 					   work)->map;
1141 	bpf_fd_array_map_clear(map, true);
1142 	bpf_map_put(map);
1143 }
1144 
prog_array_map_clear(struct bpf_map * map)1145 static void prog_array_map_clear(struct bpf_map *map)
1146 {
1147 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1148 						 map)->aux;
1149 	bpf_map_inc(map);
1150 	schedule_work(&aux->work);
1151 }
1152 
prog_array_map_alloc(union bpf_attr * attr)1153 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1154 {
1155 	struct bpf_array_aux *aux;
1156 	struct bpf_map *map;
1157 
1158 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1159 	if (!aux)
1160 		return ERR_PTR(-ENOMEM);
1161 
1162 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1163 	INIT_LIST_HEAD(&aux->poke_progs);
1164 	mutex_init(&aux->poke_mutex);
1165 
1166 	map = array_map_alloc(attr);
1167 	if (IS_ERR(map)) {
1168 		kfree(aux);
1169 		return map;
1170 	}
1171 
1172 	container_of(map, struct bpf_array, map)->aux = aux;
1173 	aux->map = map;
1174 
1175 	return map;
1176 }
1177 
prog_array_map_free(struct bpf_map * map)1178 static void prog_array_map_free(struct bpf_map *map)
1179 {
1180 	struct prog_poke_elem *elem, *tmp;
1181 	struct bpf_array_aux *aux;
1182 
1183 	aux = container_of(map, struct bpf_array, map)->aux;
1184 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1185 		list_del_init(&elem->list);
1186 		kfree(elem);
1187 	}
1188 	kfree(aux);
1189 	fd_array_map_free(map);
1190 }
1191 
1192 /* prog_array->aux->{type,jited} is a runtime binding.
1193  * Doing static check alone in the verifier is not enough.
1194  * Thus, prog_array_map cannot be used as an inner_map
1195  * and map_meta_equal is not implemented.
1196  */
1197 const struct bpf_map_ops prog_array_map_ops = {
1198 	.map_alloc_check = fd_array_map_alloc_check,
1199 	.map_alloc = prog_array_map_alloc,
1200 	.map_free = prog_array_map_free,
1201 	.map_poke_track = prog_array_map_poke_track,
1202 	.map_poke_untrack = prog_array_map_poke_untrack,
1203 	.map_poke_run = prog_array_map_poke_run,
1204 	.map_get_next_key = array_map_get_next_key,
1205 	.map_lookup_elem = fd_array_map_lookup_elem,
1206 	.map_delete_elem = fd_array_map_delete_elem,
1207 	.map_fd_get_ptr = prog_fd_array_get_ptr,
1208 	.map_fd_put_ptr = prog_fd_array_put_ptr,
1209 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1210 	.map_release_uref = prog_array_map_clear,
1211 	.map_seq_show_elem = prog_array_map_seq_show_elem,
1212 	.map_mem_usage = array_map_mem_usage,
1213 	.map_btf_id = &array_map_btf_ids[0],
1214 };
1215 
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)1216 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1217 						   struct file *map_file)
1218 {
1219 	struct bpf_event_entry *ee;
1220 
1221 	ee = kzalloc(sizeof(*ee), GFP_KERNEL);
1222 	if (ee) {
1223 		ee->event = perf_file->private_data;
1224 		ee->perf_file = perf_file;
1225 		ee->map_file = map_file;
1226 	}
1227 
1228 	return ee;
1229 }
1230 
__bpf_event_entry_free(struct rcu_head * rcu)1231 static void __bpf_event_entry_free(struct rcu_head *rcu)
1232 {
1233 	struct bpf_event_entry *ee;
1234 
1235 	ee = container_of(rcu, struct bpf_event_entry, rcu);
1236 	fput(ee->perf_file);
1237 	kfree(ee);
1238 }
1239 
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)1240 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1241 {
1242 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1243 }
1244 
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1245 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1246 					 struct file *map_file, int fd)
1247 {
1248 	struct bpf_event_entry *ee;
1249 	struct perf_event *event;
1250 	struct file *perf_file;
1251 	u64 value;
1252 
1253 	perf_file = perf_event_get(fd);
1254 	if (IS_ERR(perf_file))
1255 		return perf_file;
1256 
1257 	ee = ERR_PTR(-EOPNOTSUPP);
1258 	event = perf_file->private_data;
1259 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1260 		goto err_out;
1261 
1262 	ee = bpf_event_entry_gen(perf_file, map_file);
1263 	if (ee)
1264 		return ee;
1265 	ee = ERR_PTR(-ENOMEM);
1266 err_out:
1267 	fput(perf_file);
1268 	return ee;
1269 }
1270 
perf_event_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)1271 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1272 {
1273 	/* bpf_perf_event is freed after one RCU grace period */
1274 	bpf_event_entry_free_rcu(ptr);
1275 }
1276 
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)1277 static void perf_event_fd_array_release(struct bpf_map *map,
1278 					struct file *map_file)
1279 {
1280 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1281 	struct bpf_event_entry *ee;
1282 	int i;
1283 
1284 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1285 		return;
1286 
1287 	rcu_read_lock();
1288 	for (i = 0; i < array->map.max_entries; i++) {
1289 		ee = READ_ONCE(array->ptrs[i]);
1290 		if (ee && ee->map_file == map_file)
1291 			__fd_array_map_delete_elem(map, &i, true);
1292 	}
1293 	rcu_read_unlock();
1294 }
1295 
perf_event_fd_array_map_free(struct bpf_map * map)1296 static void perf_event_fd_array_map_free(struct bpf_map *map)
1297 {
1298 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1299 		bpf_fd_array_map_clear(map, false);
1300 	fd_array_map_free(map);
1301 }
1302 
1303 const struct bpf_map_ops perf_event_array_map_ops = {
1304 	.map_meta_equal = bpf_map_meta_equal,
1305 	.map_alloc_check = fd_array_map_alloc_check,
1306 	.map_alloc = array_map_alloc,
1307 	.map_free = perf_event_fd_array_map_free,
1308 	.map_get_next_key = array_map_get_next_key,
1309 	.map_lookup_elem = fd_array_map_lookup_elem,
1310 	.map_delete_elem = fd_array_map_delete_elem,
1311 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1312 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1313 	.map_release = perf_event_fd_array_release,
1314 	.map_check_btf = map_check_no_btf,
1315 	.map_mem_usage = array_map_mem_usage,
1316 	.map_btf_id = &array_map_btf_ids[0],
1317 };
1318 
1319 #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1320 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1321 				     struct file *map_file /* not used */,
1322 				     int fd)
1323 {
1324 	return cgroup_get_from_fd(fd);
1325 }
1326 
cgroup_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)1327 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1328 {
1329 	/* cgroup_put free cgrp after a rcu grace period */
1330 	cgroup_put(ptr);
1331 }
1332 
cgroup_fd_array_free(struct bpf_map * map)1333 static void cgroup_fd_array_free(struct bpf_map *map)
1334 {
1335 	bpf_fd_array_map_clear(map, false);
1336 	fd_array_map_free(map);
1337 }
1338 
1339 const struct bpf_map_ops cgroup_array_map_ops = {
1340 	.map_meta_equal = bpf_map_meta_equal,
1341 	.map_alloc_check = fd_array_map_alloc_check,
1342 	.map_alloc = array_map_alloc,
1343 	.map_free = cgroup_fd_array_free,
1344 	.map_get_next_key = array_map_get_next_key,
1345 	.map_lookup_elem = fd_array_map_lookup_elem,
1346 	.map_delete_elem = fd_array_map_delete_elem,
1347 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1348 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1349 	.map_check_btf = map_check_no_btf,
1350 	.map_mem_usage = array_map_mem_usage,
1351 	.map_btf_id = &array_map_btf_ids[0],
1352 };
1353 #endif
1354 
array_of_map_alloc(union bpf_attr * attr)1355 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1356 {
1357 	struct bpf_map *map, *inner_map_meta;
1358 
1359 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1360 	if (IS_ERR(inner_map_meta))
1361 		return inner_map_meta;
1362 
1363 	map = array_map_alloc(attr);
1364 	if (IS_ERR(map)) {
1365 		bpf_map_meta_free(inner_map_meta);
1366 		return map;
1367 	}
1368 
1369 	map->inner_map_meta = inner_map_meta;
1370 
1371 	return map;
1372 }
1373 
array_of_map_free(struct bpf_map * map)1374 static void array_of_map_free(struct bpf_map *map)
1375 {
1376 	/* map->inner_map_meta is only accessed by syscall which
1377 	 * is protected by fdget/fdput.
1378 	 */
1379 	bpf_map_meta_free(map->inner_map_meta);
1380 	bpf_fd_array_map_clear(map, false);
1381 	fd_array_map_free(map);
1382 }
1383 
array_of_map_lookup_elem(struct bpf_map * map,void * key)1384 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1385 {
1386 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1387 
1388 	if (!inner_map)
1389 		return NULL;
1390 
1391 	return READ_ONCE(*inner_map);
1392 }
1393 
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)1394 static int array_of_map_gen_lookup(struct bpf_map *map,
1395 				   struct bpf_insn *insn_buf)
1396 {
1397 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1398 	u32 elem_size = array->elem_size;
1399 	struct bpf_insn *insn = insn_buf;
1400 	const int ret = BPF_REG_0;
1401 	const int map_ptr = BPF_REG_1;
1402 	const int index = BPF_REG_2;
1403 
1404 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1405 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1406 	if (!map->bypass_spec_v1) {
1407 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1408 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1409 	} else {
1410 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1411 	}
1412 	if (is_power_of_2(elem_size))
1413 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1414 	else
1415 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1416 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1417 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1418 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1419 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1420 	*insn++ = BPF_MOV64_IMM(ret, 0);
1421 
1422 	return insn - insn_buf;
1423 }
1424 
1425 const struct bpf_map_ops array_of_maps_map_ops = {
1426 	.map_alloc_check = fd_array_map_alloc_check,
1427 	.map_alloc = array_of_map_alloc,
1428 	.map_free = array_of_map_free,
1429 	.map_get_next_key = array_map_get_next_key,
1430 	.map_lookup_elem = array_of_map_lookup_elem,
1431 	.map_delete_elem = fd_array_map_delete_elem,
1432 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1433 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1434 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1435 	.map_gen_lookup = array_of_map_gen_lookup,
1436 	.map_lookup_batch = generic_map_lookup_batch,
1437 	.map_update_batch = generic_map_update_batch,
1438 	.map_check_btf = map_check_no_btf,
1439 	.map_mem_usage = array_map_mem_usage,
1440 	.map_btf_id = &array_map_btf_ids[0],
1441 };
1442