xref: /linux/kernel/bpf/arraymap.c (revision 5fb94e9ca333f0fe1d96de06704a79942b3832c3)
1  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2   * Copyright (c) 2016,2017 Facebook
3   *
4   * This program is free software; you can redistribute it and/or
5   * modify it under the terms of version 2 of the GNU General Public
6   * License as published by the Free Software Foundation.
7   *
8   * This program is distributed in the hope that it will be useful, but
9   * WITHOUT ANY WARRANTY; without even the implied warranty of
10   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11   * General Public License for more details.
12   */
13  #include <linux/bpf.h>
14  #include <linux/btf.h>
15  #include <linux/err.h>
16  #include <linux/slab.h>
17  #include <linux/mm.h>
18  #include <linux/filter.h>
19  #include <linux/perf_event.h>
20  #include <uapi/linux/btf.h>
21  
22  #include "map_in_map.h"
23  
24  #define ARRAY_CREATE_FLAG_MASK \
25  	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26  
27  static void bpf_array_free_percpu(struct bpf_array *array)
28  {
29  	int i;
30  
31  	for (i = 0; i < array->map.max_entries; i++) {
32  		free_percpu(array->pptrs[i]);
33  		cond_resched();
34  	}
35  }
36  
37  static int bpf_array_alloc_percpu(struct bpf_array *array)
38  {
39  	void __percpu *ptr;
40  	int i;
41  
42  	for (i = 0; i < array->map.max_entries; i++) {
43  		ptr = __alloc_percpu_gfp(array->elem_size, 8,
44  					 GFP_USER | __GFP_NOWARN);
45  		if (!ptr) {
46  			bpf_array_free_percpu(array);
47  			return -ENOMEM;
48  		}
49  		array->pptrs[i] = ptr;
50  		cond_resched();
51  	}
52  
53  	return 0;
54  }
55  
56  /* Called from syscall */
57  static int array_map_alloc_check(union bpf_attr *attr)
58  {
59  	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
60  	int numa_node = bpf_map_attr_numa_node(attr);
61  
62  	/* check sanity of attributes */
63  	if (attr->max_entries == 0 || attr->key_size != 4 ||
64  	    attr->value_size == 0 ||
65  	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
66  	    (percpu && numa_node != NUMA_NO_NODE))
67  		return -EINVAL;
68  
69  	if (attr->value_size > KMALLOC_MAX_SIZE)
70  		/* if value_size is bigger, the user space won't be able to
71  		 * access the elements.
72  		 */
73  		return -E2BIG;
74  
75  	return 0;
76  }
77  
78  static struct bpf_map *array_map_alloc(union bpf_attr *attr)
79  {
80  	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
81  	int ret, numa_node = bpf_map_attr_numa_node(attr);
82  	u32 elem_size, index_mask, max_entries;
83  	bool unpriv = !capable(CAP_SYS_ADMIN);
84  	u64 cost, array_size, mask64;
85  	struct bpf_array *array;
86  
87  	elem_size = round_up(attr->value_size, 8);
88  
89  	max_entries = attr->max_entries;
90  
91  	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
92  	 * upper most bit set in u32 space is undefined behavior due to
93  	 * resulting 1U << 32, so do it manually here in u64 space.
94  	 */
95  	mask64 = fls_long(max_entries - 1);
96  	mask64 = 1ULL << mask64;
97  	mask64 -= 1;
98  
99  	index_mask = mask64;
100  	if (unpriv) {
101  		/* round up array size to nearest power of 2,
102  		 * since cpu will speculate within index_mask limits
103  		 */
104  		max_entries = index_mask + 1;
105  		/* Check for overflows. */
106  		if (max_entries < attr->max_entries)
107  			return ERR_PTR(-E2BIG);
108  	}
109  
110  	array_size = sizeof(*array);
111  	if (percpu)
112  		array_size += (u64) max_entries * sizeof(void *);
113  	else
114  		array_size += (u64) max_entries * elem_size;
115  
116  	/* make sure there is no u32 overflow later in round_up() */
117  	cost = array_size;
118  	if (cost >= U32_MAX - PAGE_SIZE)
119  		return ERR_PTR(-ENOMEM);
120  	if (percpu) {
121  		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
122  		if (cost >= U32_MAX - PAGE_SIZE)
123  			return ERR_PTR(-ENOMEM);
124  	}
125  	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
126  
127  	ret = bpf_map_precharge_memlock(cost);
128  	if (ret < 0)
129  		return ERR_PTR(ret);
130  
131  	/* allocate all map elements and zero-initialize them */
132  	array = bpf_map_area_alloc(array_size, numa_node);
133  	if (!array)
134  		return ERR_PTR(-ENOMEM);
135  	array->index_mask = index_mask;
136  	array->map.unpriv_array = unpriv;
137  
138  	/* copy mandatory map attributes */
139  	bpf_map_init_from_attr(&array->map, attr);
140  	array->map.pages = cost;
141  	array->elem_size = elem_size;
142  
143  	if (percpu && bpf_array_alloc_percpu(array)) {
144  		bpf_map_area_free(array);
145  		return ERR_PTR(-ENOMEM);
146  	}
147  
148  	return &array->map;
149  }
150  
151  /* Called from syscall or from eBPF program */
152  static void *array_map_lookup_elem(struct bpf_map *map, void *key)
153  {
154  	struct bpf_array *array = container_of(map, struct bpf_array, map);
155  	u32 index = *(u32 *)key;
156  
157  	if (unlikely(index >= array->map.max_entries))
158  		return NULL;
159  
160  	return array->value + array->elem_size * (index & array->index_mask);
161  }
162  
163  /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
164  static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
165  {
166  	struct bpf_array *array = container_of(map, struct bpf_array, map);
167  	struct bpf_insn *insn = insn_buf;
168  	u32 elem_size = round_up(map->value_size, 8);
169  	const int ret = BPF_REG_0;
170  	const int map_ptr = BPF_REG_1;
171  	const int index = BPF_REG_2;
172  
173  	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
174  	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
175  	if (map->unpriv_array) {
176  		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
177  		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
178  	} else {
179  		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
180  	}
181  
182  	if (is_power_of_2(elem_size)) {
183  		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
184  	} else {
185  		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
186  	}
187  	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
188  	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
189  	*insn++ = BPF_MOV64_IMM(ret, 0);
190  	return insn - insn_buf;
191  }
192  
193  /* Called from eBPF program */
194  static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
195  {
196  	struct bpf_array *array = container_of(map, struct bpf_array, map);
197  	u32 index = *(u32 *)key;
198  
199  	if (unlikely(index >= array->map.max_entries))
200  		return NULL;
201  
202  	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
203  }
204  
205  int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
206  {
207  	struct bpf_array *array = container_of(map, struct bpf_array, map);
208  	u32 index = *(u32 *)key;
209  	void __percpu *pptr;
210  	int cpu, off = 0;
211  	u32 size;
212  
213  	if (unlikely(index >= array->map.max_entries))
214  		return -ENOENT;
215  
216  	/* per_cpu areas are zero-filled and bpf programs can only
217  	 * access 'value_size' of them, so copying rounded areas
218  	 * will not leak any kernel data
219  	 */
220  	size = round_up(map->value_size, 8);
221  	rcu_read_lock();
222  	pptr = array->pptrs[index & array->index_mask];
223  	for_each_possible_cpu(cpu) {
224  		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
225  		off += size;
226  	}
227  	rcu_read_unlock();
228  	return 0;
229  }
230  
231  /* Called from syscall */
232  static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
233  {
234  	struct bpf_array *array = container_of(map, struct bpf_array, map);
235  	u32 index = key ? *(u32 *)key : U32_MAX;
236  	u32 *next = (u32 *)next_key;
237  
238  	if (index >= array->map.max_entries) {
239  		*next = 0;
240  		return 0;
241  	}
242  
243  	if (index == array->map.max_entries - 1)
244  		return -ENOENT;
245  
246  	*next = index + 1;
247  	return 0;
248  }
249  
250  /* Called from syscall or from eBPF program */
251  static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
252  				 u64 map_flags)
253  {
254  	struct bpf_array *array = container_of(map, struct bpf_array, map);
255  	u32 index = *(u32 *)key;
256  
257  	if (unlikely(map_flags > BPF_EXIST))
258  		/* unknown flags */
259  		return -EINVAL;
260  
261  	if (unlikely(index >= array->map.max_entries))
262  		/* all elements were pre-allocated, cannot insert a new one */
263  		return -E2BIG;
264  
265  	if (unlikely(map_flags == BPF_NOEXIST))
266  		/* all elements already exist */
267  		return -EEXIST;
268  
269  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
270  		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
271  		       value, map->value_size);
272  	else
273  		memcpy(array->value +
274  		       array->elem_size * (index & array->index_mask),
275  		       value, map->value_size);
276  	return 0;
277  }
278  
279  int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
280  			    u64 map_flags)
281  {
282  	struct bpf_array *array = container_of(map, struct bpf_array, map);
283  	u32 index = *(u32 *)key;
284  	void __percpu *pptr;
285  	int cpu, off = 0;
286  	u32 size;
287  
288  	if (unlikely(map_flags > BPF_EXIST))
289  		/* unknown flags */
290  		return -EINVAL;
291  
292  	if (unlikely(index >= array->map.max_entries))
293  		/* all elements were pre-allocated, cannot insert a new one */
294  		return -E2BIG;
295  
296  	if (unlikely(map_flags == BPF_NOEXIST))
297  		/* all elements already exist */
298  		return -EEXIST;
299  
300  	/* the user space will provide round_up(value_size, 8) bytes that
301  	 * will be copied into per-cpu area. bpf programs can only access
302  	 * value_size of it. During lookup the same extra bytes will be
303  	 * returned or zeros which were zero-filled by percpu_alloc,
304  	 * so no kernel data leaks possible
305  	 */
306  	size = round_up(map->value_size, 8);
307  	rcu_read_lock();
308  	pptr = array->pptrs[index & array->index_mask];
309  	for_each_possible_cpu(cpu) {
310  		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
311  		off += size;
312  	}
313  	rcu_read_unlock();
314  	return 0;
315  }
316  
317  /* Called from syscall or from eBPF program */
318  static int array_map_delete_elem(struct bpf_map *map, void *key)
319  {
320  	return -EINVAL;
321  }
322  
323  /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
324  static void array_map_free(struct bpf_map *map)
325  {
326  	struct bpf_array *array = container_of(map, struct bpf_array, map);
327  
328  	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
329  	 * so the programs (can be more than one that used this map) were
330  	 * disconnected from events. Wait for outstanding programs to complete
331  	 * and free the array
332  	 */
333  	synchronize_rcu();
334  
335  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
336  		bpf_array_free_percpu(array);
337  
338  	bpf_map_area_free(array);
339  }
340  
341  static void array_map_seq_show_elem(struct bpf_map *map, void *key,
342  				    struct seq_file *m)
343  {
344  	void *value;
345  
346  	rcu_read_lock();
347  
348  	value = array_map_lookup_elem(map, key);
349  	if (!value) {
350  		rcu_read_unlock();
351  		return;
352  	}
353  
354  	seq_printf(m, "%u: ", *(u32 *)key);
355  	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
356  	seq_puts(m, "\n");
357  
358  	rcu_read_unlock();
359  }
360  
361  static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
362  			       u32 btf_key_id, u32 btf_value_id)
363  {
364  	const struct btf_type *key_type, *value_type;
365  	u32 key_size, value_size;
366  	u32 int_data;
367  
368  	key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
369  	if (!key_type || BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
370  		return -EINVAL;
371  
372  	int_data = *(u32 *)(key_type + 1);
373  	/* bpf array can only take a u32 key.  This check makes
374  	 * sure that the btf matches the attr used during map_create.
375  	 */
376  	if (BTF_INT_BITS(int_data) != 32 || key_size != 4 ||
377  	    BTF_INT_OFFSET(int_data))
378  		return -EINVAL;
379  
380  	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
381  	if (!value_type || value_size > map->value_size)
382  		return -EINVAL;
383  
384  	return 0;
385  }
386  
387  const struct bpf_map_ops array_map_ops = {
388  	.map_alloc_check = array_map_alloc_check,
389  	.map_alloc = array_map_alloc,
390  	.map_free = array_map_free,
391  	.map_get_next_key = array_map_get_next_key,
392  	.map_lookup_elem = array_map_lookup_elem,
393  	.map_update_elem = array_map_update_elem,
394  	.map_delete_elem = array_map_delete_elem,
395  	.map_gen_lookup = array_map_gen_lookup,
396  	.map_seq_show_elem = array_map_seq_show_elem,
397  	.map_check_btf = array_map_check_btf,
398  };
399  
400  const struct bpf_map_ops percpu_array_map_ops = {
401  	.map_alloc_check = array_map_alloc_check,
402  	.map_alloc = array_map_alloc,
403  	.map_free = array_map_free,
404  	.map_get_next_key = array_map_get_next_key,
405  	.map_lookup_elem = percpu_array_map_lookup_elem,
406  	.map_update_elem = array_map_update_elem,
407  	.map_delete_elem = array_map_delete_elem,
408  };
409  
410  static int fd_array_map_alloc_check(union bpf_attr *attr)
411  {
412  	/* only file descriptors can be stored in this type of map */
413  	if (attr->value_size != sizeof(u32))
414  		return -EINVAL;
415  	return array_map_alloc_check(attr);
416  }
417  
418  static void fd_array_map_free(struct bpf_map *map)
419  {
420  	struct bpf_array *array = container_of(map, struct bpf_array, map);
421  	int i;
422  
423  	synchronize_rcu();
424  
425  	/* make sure it's empty */
426  	for (i = 0; i < array->map.max_entries; i++)
427  		BUG_ON(array->ptrs[i] != NULL);
428  
429  	bpf_map_area_free(array);
430  }
431  
432  static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
433  {
434  	return NULL;
435  }
436  
437  /* only called from syscall */
438  int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
439  {
440  	void **elem, *ptr;
441  	int ret =  0;
442  
443  	if (!map->ops->map_fd_sys_lookup_elem)
444  		return -ENOTSUPP;
445  
446  	rcu_read_lock();
447  	elem = array_map_lookup_elem(map, key);
448  	if (elem && (ptr = READ_ONCE(*elem)))
449  		*value = map->ops->map_fd_sys_lookup_elem(ptr);
450  	else
451  		ret = -ENOENT;
452  	rcu_read_unlock();
453  
454  	return ret;
455  }
456  
457  /* only called from syscall */
458  int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
459  				 void *key, void *value, u64 map_flags)
460  {
461  	struct bpf_array *array = container_of(map, struct bpf_array, map);
462  	void *new_ptr, *old_ptr;
463  	u32 index = *(u32 *)key, ufd;
464  
465  	if (map_flags != BPF_ANY)
466  		return -EINVAL;
467  
468  	if (index >= array->map.max_entries)
469  		return -E2BIG;
470  
471  	ufd = *(u32 *)value;
472  	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
473  	if (IS_ERR(new_ptr))
474  		return PTR_ERR(new_ptr);
475  
476  	old_ptr = xchg(array->ptrs + index, new_ptr);
477  	if (old_ptr)
478  		map->ops->map_fd_put_ptr(old_ptr);
479  
480  	return 0;
481  }
482  
483  static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
484  {
485  	struct bpf_array *array = container_of(map, struct bpf_array, map);
486  	void *old_ptr;
487  	u32 index = *(u32 *)key;
488  
489  	if (index >= array->map.max_entries)
490  		return -E2BIG;
491  
492  	old_ptr = xchg(array->ptrs + index, NULL);
493  	if (old_ptr) {
494  		map->ops->map_fd_put_ptr(old_ptr);
495  		return 0;
496  	} else {
497  		return -ENOENT;
498  	}
499  }
500  
501  static void *prog_fd_array_get_ptr(struct bpf_map *map,
502  				   struct file *map_file, int fd)
503  {
504  	struct bpf_array *array = container_of(map, struct bpf_array, map);
505  	struct bpf_prog *prog = bpf_prog_get(fd);
506  
507  	if (IS_ERR(prog))
508  		return prog;
509  
510  	if (!bpf_prog_array_compatible(array, prog)) {
511  		bpf_prog_put(prog);
512  		return ERR_PTR(-EINVAL);
513  	}
514  
515  	return prog;
516  }
517  
518  static void prog_fd_array_put_ptr(void *ptr)
519  {
520  	bpf_prog_put(ptr);
521  }
522  
523  static u32 prog_fd_array_sys_lookup_elem(void *ptr)
524  {
525  	return ((struct bpf_prog *)ptr)->aux->id;
526  }
527  
528  /* decrement refcnt of all bpf_progs that are stored in this map */
529  static void bpf_fd_array_map_clear(struct bpf_map *map)
530  {
531  	struct bpf_array *array = container_of(map, struct bpf_array, map);
532  	int i;
533  
534  	for (i = 0; i < array->map.max_entries; i++)
535  		fd_array_map_delete_elem(map, &i);
536  }
537  
538  const struct bpf_map_ops prog_array_map_ops = {
539  	.map_alloc_check = fd_array_map_alloc_check,
540  	.map_alloc = array_map_alloc,
541  	.map_free = fd_array_map_free,
542  	.map_get_next_key = array_map_get_next_key,
543  	.map_lookup_elem = fd_array_map_lookup_elem,
544  	.map_delete_elem = fd_array_map_delete_elem,
545  	.map_fd_get_ptr = prog_fd_array_get_ptr,
546  	.map_fd_put_ptr = prog_fd_array_put_ptr,
547  	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
548  	.map_release_uref = bpf_fd_array_map_clear,
549  };
550  
551  static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
552  						   struct file *map_file)
553  {
554  	struct bpf_event_entry *ee;
555  
556  	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
557  	if (ee) {
558  		ee->event = perf_file->private_data;
559  		ee->perf_file = perf_file;
560  		ee->map_file = map_file;
561  	}
562  
563  	return ee;
564  }
565  
566  static void __bpf_event_entry_free(struct rcu_head *rcu)
567  {
568  	struct bpf_event_entry *ee;
569  
570  	ee = container_of(rcu, struct bpf_event_entry, rcu);
571  	fput(ee->perf_file);
572  	kfree(ee);
573  }
574  
575  static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
576  {
577  	call_rcu(&ee->rcu, __bpf_event_entry_free);
578  }
579  
580  static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
581  					 struct file *map_file, int fd)
582  {
583  	struct bpf_event_entry *ee;
584  	struct perf_event *event;
585  	struct file *perf_file;
586  	u64 value;
587  
588  	perf_file = perf_event_get(fd);
589  	if (IS_ERR(perf_file))
590  		return perf_file;
591  
592  	ee = ERR_PTR(-EOPNOTSUPP);
593  	event = perf_file->private_data;
594  	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
595  		goto err_out;
596  
597  	ee = bpf_event_entry_gen(perf_file, map_file);
598  	if (ee)
599  		return ee;
600  	ee = ERR_PTR(-ENOMEM);
601  err_out:
602  	fput(perf_file);
603  	return ee;
604  }
605  
606  static void perf_event_fd_array_put_ptr(void *ptr)
607  {
608  	bpf_event_entry_free_rcu(ptr);
609  }
610  
611  static void perf_event_fd_array_release(struct bpf_map *map,
612  					struct file *map_file)
613  {
614  	struct bpf_array *array = container_of(map, struct bpf_array, map);
615  	struct bpf_event_entry *ee;
616  	int i;
617  
618  	rcu_read_lock();
619  	for (i = 0; i < array->map.max_entries; i++) {
620  		ee = READ_ONCE(array->ptrs[i]);
621  		if (ee && ee->map_file == map_file)
622  			fd_array_map_delete_elem(map, &i);
623  	}
624  	rcu_read_unlock();
625  }
626  
627  const struct bpf_map_ops perf_event_array_map_ops = {
628  	.map_alloc_check = fd_array_map_alloc_check,
629  	.map_alloc = array_map_alloc,
630  	.map_free = fd_array_map_free,
631  	.map_get_next_key = array_map_get_next_key,
632  	.map_lookup_elem = fd_array_map_lookup_elem,
633  	.map_delete_elem = fd_array_map_delete_elem,
634  	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
635  	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
636  	.map_release = perf_event_fd_array_release,
637  };
638  
639  #ifdef CONFIG_CGROUPS
640  static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
641  				     struct file *map_file /* not used */,
642  				     int fd)
643  {
644  	return cgroup_get_from_fd(fd);
645  }
646  
647  static void cgroup_fd_array_put_ptr(void *ptr)
648  {
649  	/* cgroup_put free cgrp after a rcu grace period */
650  	cgroup_put(ptr);
651  }
652  
653  static void cgroup_fd_array_free(struct bpf_map *map)
654  {
655  	bpf_fd_array_map_clear(map);
656  	fd_array_map_free(map);
657  }
658  
659  const struct bpf_map_ops cgroup_array_map_ops = {
660  	.map_alloc_check = fd_array_map_alloc_check,
661  	.map_alloc = array_map_alloc,
662  	.map_free = cgroup_fd_array_free,
663  	.map_get_next_key = array_map_get_next_key,
664  	.map_lookup_elem = fd_array_map_lookup_elem,
665  	.map_delete_elem = fd_array_map_delete_elem,
666  	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
667  	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
668  };
669  #endif
670  
671  static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
672  {
673  	struct bpf_map *map, *inner_map_meta;
674  
675  	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
676  	if (IS_ERR(inner_map_meta))
677  		return inner_map_meta;
678  
679  	map = array_map_alloc(attr);
680  	if (IS_ERR(map)) {
681  		bpf_map_meta_free(inner_map_meta);
682  		return map;
683  	}
684  
685  	map->inner_map_meta = inner_map_meta;
686  
687  	return map;
688  }
689  
690  static void array_of_map_free(struct bpf_map *map)
691  {
692  	/* map->inner_map_meta is only accessed by syscall which
693  	 * is protected by fdget/fdput.
694  	 */
695  	bpf_map_meta_free(map->inner_map_meta);
696  	bpf_fd_array_map_clear(map);
697  	fd_array_map_free(map);
698  }
699  
700  static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
701  {
702  	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
703  
704  	if (!inner_map)
705  		return NULL;
706  
707  	return READ_ONCE(*inner_map);
708  }
709  
710  static u32 array_of_map_gen_lookup(struct bpf_map *map,
711  				   struct bpf_insn *insn_buf)
712  {
713  	struct bpf_array *array = container_of(map, struct bpf_array, map);
714  	u32 elem_size = round_up(map->value_size, 8);
715  	struct bpf_insn *insn = insn_buf;
716  	const int ret = BPF_REG_0;
717  	const int map_ptr = BPF_REG_1;
718  	const int index = BPF_REG_2;
719  
720  	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
721  	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
722  	if (map->unpriv_array) {
723  		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
724  		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
725  	} else {
726  		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
727  	}
728  	if (is_power_of_2(elem_size))
729  		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
730  	else
731  		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
732  	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
733  	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
734  	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
735  	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
736  	*insn++ = BPF_MOV64_IMM(ret, 0);
737  
738  	return insn - insn_buf;
739  }
740  
741  const struct bpf_map_ops array_of_maps_map_ops = {
742  	.map_alloc_check = fd_array_map_alloc_check,
743  	.map_alloc = array_of_map_alloc,
744  	.map_free = array_of_map_free,
745  	.map_get_next_key = array_map_get_next_key,
746  	.map_lookup_elem = array_of_map_lookup_elem,
747  	.map_delete_elem = fd_array_map_delete_elem,
748  	.map_fd_get_ptr = bpf_map_fd_get_ptr,
749  	.map_fd_put_ptr = bpf_map_fd_put_ptr,
750  	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
751  	.map_gen_lookup = array_of_map_gen_lookup,
752  };
753