xref: /linux/kernel/bpf/arraymap.c (revision 140eb5227767c6754742020a16d2691222b9c19b)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016,2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
19 
20 #include "map_in_map.h"
21 
22 #define ARRAY_CREATE_FLAG_MASK \
23 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
24 
25 static void bpf_array_free_percpu(struct bpf_array *array)
26 {
27 	int i;
28 
29 	for (i = 0; i < array->map.max_entries; i++)
30 		free_percpu(array->pptrs[i]);
31 }
32 
33 static int bpf_array_alloc_percpu(struct bpf_array *array)
34 {
35 	void __percpu *ptr;
36 	int i;
37 
38 	for (i = 0; i < array->map.max_entries; i++) {
39 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
40 					 GFP_USER | __GFP_NOWARN);
41 		if (!ptr) {
42 			bpf_array_free_percpu(array);
43 			return -ENOMEM;
44 		}
45 		array->pptrs[i] = ptr;
46 	}
47 
48 	return 0;
49 }
50 
51 /* Called from syscall */
52 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53 {
54 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 	int numa_node = bpf_map_attr_numa_node(attr);
56 	u32 elem_size, index_mask, max_entries;
57 	bool unpriv = !capable(CAP_SYS_ADMIN);
58 	struct bpf_array *array;
59 	u64 array_size, mask64;
60 
61 	/* check sanity of attributes */
62 	if (attr->max_entries == 0 || attr->key_size != 4 ||
63 	    attr->value_size == 0 ||
64 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
65 	    (percpu && numa_node != NUMA_NO_NODE))
66 		return ERR_PTR(-EINVAL);
67 
68 	if (attr->value_size > KMALLOC_MAX_SIZE)
69 		/* if value_size is bigger, the user space won't be able to
70 		 * access the elements.
71 		 */
72 		return ERR_PTR(-E2BIG);
73 
74 	elem_size = round_up(attr->value_size, 8);
75 
76 	max_entries = attr->max_entries;
77 
78 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
79 	 * upper most bit set in u32 space is undefined behavior due to
80 	 * resulting 1U << 32, so do it manually here in u64 space.
81 	 */
82 	mask64 = fls_long(max_entries - 1);
83 	mask64 = 1ULL << mask64;
84 	mask64 -= 1;
85 
86 	index_mask = mask64;
87 	if (unpriv) {
88 		/* round up array size to nearest power of 2,
89 		 * since cpu will speculate within index_mask limits
90 		 */
91 		max_entries = index_mask + 1;
92 		/* Check for overflows. */
93 		if (max_entries < attr->max_entries)
94 			return ERR_PTR(-E2BIG);
95 	}
96 
97 	array_size = sizeof(*array);
98 	if (percpu)
99 		array_size += (u64) max_entries * sizeof(void *);
100 	else
101 		array_size += (u64) max_entries * elem_size;
102 
103 	/* make sure there is no u32 overflow later in round_up() */
104 	if (array_size >= U32_MAX - PAGE_SIZE)
105 		return ERR_PTR(-ENOMEM);
106 
107 	/* allocate all map elements and zero-initialize them */
108 	array = bpf_map_area_alloc(array_size, numa_node);
109 	if (!array)
110 		return ERR_PTR(-ENOMEM);
111 	array->index_mask = index_mask;
112 	array->map.unpriv_array = unpriv;
113 
114 	/* copy mandatory map attributes */
115 	array->map.map_type = attr->map_type;
116 	array->map.key_size = attr->key_size;
117 	array->map.value_size = attr->value_size;
118 	array->map.max_entries = attr->max_entries;
119 	array->map.map_flags = attr->map_flags;
120 	array->map.numa_node = numa_node;
121 	array->elem_size = elem_size;
122 
123 	if (!percpu)
124 		goto out;
125 
126 	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
127 
128 	if (array_size >= U32_MAX - PAGE_SIZE ||
129 	    bpf_array_alloc_percpu(array)) {
130 		bpf_map_area_free(array);
131 		return ERR_PTR(-ENOMEM);
132 	}
133 out:
134 	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
135 
136 	return &array->map;
137 }
138 
139 /* Called from syscall or from eBPF program */
140 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
141 {
142 	struct bpf_array *array = container_of(map, struct bpf_array, map);
143 	u32 index = *(u32 *)key;
144 
145 	if (unlikely(index >= array->map.max_entries))
146 		return NULL;
147 
148 	return array->value + array->elem_size * (index & array->index_mask);
149 }
150 
151 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
152 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
153 {
154 	struct bpf_array *array = container_of(map, struct bpf_array, map);
155 	struct bpf_insn *insn = insn_buf;
156 	u32 elem_size = round_up(map->value_size, 8);
157 	const int ret = BPF_REG_0;
158 	const int map_ptr = BPF_REG_1;
159 	const int index = BPF_REG_2;
160 
161 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
162 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
163 	if (map->unpriv_array) {
164 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
165 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
166 	} else {
167 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
168 	}
169 
170 	if (is_power_of_2(elem_size)) {
171 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
172 	} else {
173 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
174 	}
175 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
176 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
177 	*insn++ = BPF_MOV64_IMM(ret, 0);
178 	return insn - insn_buf;
179 }
180 
181 /* Called from eBPF program */
182 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
183 {
184 	struct bpf_array *array = container_of(map, struct bpf_array, map);
185 	u32 index = *(u32 *)key;
186 
187 	if (unlikely(index >= array->map.max_entries))
188 		return NULL;
189 
190 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
191 }
192 
193 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
194 {
195 	struct bpf_array *array = container_of(map, struct bpf_array, map);
196 	u32 index = *(u32 *)key;
197 	void __percpu *pptr;
198 	int cpu, off = 0;
199 	u32 size;
200 
201 	if (unlikely(index >= array->map.max_entries))
202 		return -ENOENT;
203 
204 	/* per_cpu areas are zero-filled and bpf programs can only
205 	 * access 'value_size' of them, so copying rounded areas
206 	 * will not leak any kernel data
207 	 */
208 	size = round_up(map->value_size, 8);
209 	rcu_read_lock();
210 	pptr = array->pptrs[index & array->index_mask];
211 	for_each_possible_cpu(cpu) {
212 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
213 		off += size;
214 	}
215 	rcu_read_unlock();
216 	return 0;
217 }
218 
219 /* Called from syscall */
220 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
221 {
222 	struct bpf_array *array = container_of(map, struct bpf_array, map);
223 	u32 index = key ? *(u32 *)key : U32_MAX;
224 	u32 *next = (u32 *)next_key;
225 
226 	if (index >= array->map.max_entries) {
227 		*next = 0;
228 		return 0;
229 	}
230 
231 	if (index == array->map.max_entries - 1)
232 		return -ENOENT;
233 
234 	*next = index + 1;
235 	return 0;
236 }
237 
238 /* Called from syscall or from eBPF program */
239 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
240 				 u64 map_flags)
241 {
242 	struct bpf_array *array = container_of(map, struct bpf_array, map);
243 	u32 index = *(u32 *)key;
244 
245 	if (unlikely(map_flags > BPF_EXIST))
246 		/* unknown flags */
247 		return -EINVAL;
248 
249 	if (unlikely(index >= array->map.max_entries))
250 		/* all elements were pre-allocated, cannot insert a new one */
251 		return -E2BIG;
252 
253 	if (unlikely(map_flags == BPF_NOEXIST))
254 		/* all elements already exist */
255 		return -EEXIST;
256 
257 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
258 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
259 		       value, map->value_size);
260 	else
261 		memcpy(array->value +
262 		       array->elem_size * (index & array->index_mask),
263 		       value, map->value_size);
264 	return 0;
265 }
266 
267 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
268 			    u64 map_flags)
269 {
270 	struct bpf_array *array = container_of(map, struct bpf_array, map);
271 	u32 index = *(u32 *)key;
272 	void __percpu *pptr;
273 	int cpu, off = 0;
274 	u32 size;
275 
276 	if (unlikely(map_flags > BPF_EXIST))
277 		/* unknown flags */
278 		return -EINVAL;
279 
280 	if (unlikely(index >= array->map.max_entries))
281 		/* all elements were pre-allocated, cannot insert a new one */
282 		return -E2BIG;
283 
284 	if (unlikely(map_flags == BPF_NOEXIST))
285 		/* all elements already exist */
286 		return -EEXIST;
287 
288 	/* the user space will provide round_up(value_size, 8) bytes that
289 	 * will be copied into per-cpu area. bpf programs can only access
290 	 * value_size of it. During lookup the same extra bytes will be
291 	 * returned or zeros which were zero-filled by percpu_alloc,
292 	 * so no kernel data leaks possible
293 	 */
294 	size = round_up(map->value_size, 8);
295 	rcu_read_lock();
296 	pptr = array->pptrs[index & array->index_mask];
297 	for_each_possible_cpu(cpu) {
298 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
299 		off += size;
300 	}
301 	rcu_read_unlock();
302 	return 0;
303 }
304 
305 /* Called from syscall or from eBPF program */
306 static int array_map_delete_elem(struct bpf_map *map, void *key)
307 {
308 	return -EINVAL;
309 }
310 
311 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
312 static void array_map_free(struct bpf_map *map)
313 {
314 	struct bpf_array *array = container_of(map, struct bpf_array, map);
315 
316 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
317 	 * so the programs (can be more than one that used this map) were
318 	 * disconnected from events. Wait for outstanding programs to complete
319 	 * and free the array
320 	 */
321 	synchronize_rcu();
322 
323 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
324 		bpf_array_free_percpu(array);
325 
326 	bpf_map_area_free(array);
327 }
328 
329 const struct bpf_map_ops array_map_ops = {
330 	.map_alloc = array_map_alloc,
331 	.map_free = array_map_free,
332 	.map_get_next_key = array_map_get_next_key,
333 	.map_lookup_elem = array_map_lookup_elem,
334 	.map_update_elem = array_map_update_elem,
335 	.map_delete_elem = array_map_delete_elem,
336 	.map_gen_lookup = array_map_gen_lookup,
337 };
338 
339 const struct bpf_map_ops percpu_array_map_ops = {
340 	.map_alloc = array_map_alloc,
341 	.map_free = array_map_free,
342 	.map_get_next_key = array_map_get_next_key,
343 	.map_lookup_elem = percpu_array_map_lookup_elem,
344 	.map_update_elem = array_map_update_elem,
345 	.map_delete_elem = array_map_delete_elem,
346 };
347 
348 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
349 {
350 	/* only file descriptors can be stored in this type of map */
351 	if (attr->value_size != sizeof(u32))
352 		return ERR_PTR(-EINVAL);
353 	return array_map_alloc(attr);
354 }
355 
356 static void fd_array_map_free(struct bpf_map *map)
357 {
358 	struct bpf_array *array = container_of(map, struct bpf_array, map);
359 	int i;
360 
361 	synchronize_rcu();
362 
363 	/* make sure it's empty */
364 	for (i = 0; i < array->map.max_entries; i++)
365 		BUG_ON(array->ptrs[i] != NULL);
366 
367 	bpf_map_area_free(array);
368 }
369 
370 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
371 {
372 	return NULL;
373 }
374 
375 /* only called from syscall */
376 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
377 {
378 	void **elem, *ptr;
379 	int ret =  0;
380 
381 	if (!map->ops->map_fd_sys_lookup_elem)
382 		return -ENOTSUPP;
383 
384 	rcu_read_lock();
385 	elem = array_map_lookup_elem(map, key);
386 	if (elem && (ptr = READ_ONCE(*elem)))
387 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
388 	else
389 		ret = -ENOENT;
390 	rcu_read_unlock();
391 
392 	return ret;
393 }
394 
395 /* only called from syscall */
396 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
397 				 void *key, void *value, u64 map_flags)
398 {
399 	struct bpf_array *array = container_of(map, struct bpf_array, map);
400 	void *new_ptr, *old_ptr;
401 	u32 index = *(u32 *)key, ufd;
402 
403 	if (map_flags != BPF_ANY)
404 		return -EINVAL;
405 
406 	if (index >= array->map.max_entries)
407 		return -E2BIG;
408 
409 	ufd = *(u32 *)value;
410 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
411 	if (IS_ERR(new_ptr))
412 		return PTR_ERR(new_ptr);
413 
414 	old_ptr = xchg(array->ptrs + index, new_ptr);
415 	if (old_ptr)
416 		map->ops->map_fd_put_ptr(old_ptr);
417 
418 	return 0;
419 }
420 
421 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
422 {
423 	struct bpf_array *array = container_of(map, struct bpf_array, map);
424 	void *old_ptr;
425 	u32 index = *(u32 *)key;
426 
427 	if (index >= array->map.max_entries)
428 		return -E2BIG;
429 
430 	old_ptr = xchg(array->ptrs + index, NULL);
431 	if (old_ptr) {
432 		map->ops->map_fd_put_ptr(old_ptr);
433 		return 0;
434 	} else {
435 		return -ENOENT;
436 	}
437 }
438 
439 static void *prog_fd_array_get_ptr(struct bpf_map *map,
440 				   struct file *map_file, int fd)
441 {
442 	struct bpf_array *array = container_of(map, struct bpf_array, map);
443 	struct bpf_prog *prog = bpf_prog_get(fd);
444 
445 	if (IS_ERR(prog))
446 		return prog;
447 
448 	if (!bpf_prog_array_compatible(array, prog)) {
449 		bpf_prog_put(prog);
450 		return ERR_PTR(-EINVAL);
451 	}
452 
453 	return prog;
454 }
455 
456 static void prog_fd_array_put_ptr(void *ptr)
457 {
458 	bpf_prog_put(ptr);
459 }
460 
461 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
462 {
463 	return ((struct bpf_prog *)ptr)->aux->id;
464 }
465 
466 /* decrement refcnt of all bpf_progs that are stored in this map */
467 void bpf_fd_array_map_clear(struct bpf_map *map)
468 {
469 	struct bpf_array *array = container_of(map, struct bpf_array, map);
470 	int i;
471 
472 	for (i = 0; i < array->map.max_entries; i++)
473 		fd_array_map_delete_elem(map, &i);
474 }
475 
476 const struct bpf_map_ops prog_array_map_ops = {
477 	.map_alloc = fd_array_map_alloc,
478 	.map_free = fd_array_map_free,
479 	.map_get_next_key = array_map_get_next_key,
480 	.map_lookup_elem = fd_array_map_lookup_elem,
481 	.map_delete_elem = fd_array_map_delete_elem,
482 	.map_fd_get_ptr = prog_fd_array_get_ptr,
483 	.map_fd_put_ptr = prog_fd_array_put_ptr,
484 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
485 };
486 
487 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
488 						   struct file *map_file)
489 {
490 	struct bpf_event_entry *ee;
491 
492 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
493 	if (ee) {
494 		ee->event = perf_file->private_data;
495 		ee->perf_file = perf_file;
496 		ee->map_file = map_file;
497 	}
498 
499 	return ee;
500 }
501 
502 static void __bpf_event_entry_free(struct rcu_head *rcu)
503 {
504 	struct bpf_event_entry *ee;
505 
506 	ee = container_of(rcu, struct bpf_event_entry, rcu);
507 	fput(ee->perf_file);
508 	kfree(ee);
509 }
510 
511 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
512 {
513 	call_rcu(&ee->rcu, __bpf_event_entry_free);
514 }
515 
516 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
517 					 struct file *map_file, int fd)
518 {
519 	struct bpf_event_entry *ee;
520 	struct perf_event *event;
521 	struct file *perf_file;
522 	u64 value;
523 
524 	perf_file = perf_event_get(fd);
525 	if (IS_ERR(perf_file))
526 		return perf_file;
527 
528 	ee = ERR_PTR(-EOPNOTSUPP);
529 	event = perf_file->private_data;
530 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
531 		goto err_out;
532 
533 	ee = bpf_event_entry_gen(perf_file, map_file);
534 	if (ee)
535 		return ee;
536 	ee = ERR_PTR(-ENOMEM);
537 err_out:
538 	fput(perf_file);
539 	return ee;
540 }
541 
542 static void perf_event_fd_array_put_ptr(void *ptr)
543 {
544 	bpf_event_entry_free_rcu(ptr);
545 }
546 
547 static void perf_event_fd_array_release(struct bpf_map *map,
548 					struct file *map_file)
549 {
550 	struct bpf_array *array = container_of(map, struct bpf_array, map);
551 	struct bpf_event_entry *ee;
552 	int i;
553 
554 	rcu_read_lock();
555 	for (i = 0; i < array->map.max_entries; i++) {
556 		ee = READ_ONCE(array->ptrs[i]);
557 		if (ee && ee->map_file == map_file)
558 			fd_array_map_delete_elem(map, &i);
559 	}
560 	rcu_read_unlock();
561 }
562 
563 const struct bpf_map_ops perf_event_array_map_ops = {
564 	.map_alloc = fd_array_map_alloc,
565 	.map_free = fd_array_map_free,
566 	.map_get_next_key = array_map_get_next_key,
567 	.map_lookup_elem = fd_array_map_lookup_elem,
568 	.map_delete_elem = fd_array_map_delete_elem,
569 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
570 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
571 	.map_release = perf_event_fd_array_release,
572 };
573 
574 #ifdef CONFIG_CGROUPS
575 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
576 				     struct file *map_file /* not used */,
577 				     int fd)
578 {
579 	return cgroup_get_from_fd(fd);
580 }
581 
582 static void cgroup_fd_array_put_ptr(void *ptr)
583 {
584 	/* cgroup_put free cgrp after a rcu grace period */
585 	cgroup_put(ptr);
586 }
587 
588 static void cgroup_fd_array_free(struct bpf_map *map)
589 {
590 	bpf_fd_array_map_clear(map);
591 	fd_array_map_free(map);
592 }
593 
594 const struct bpf_map_ops cgroup_array_map_ops = {
595 	.map_alloc = fd_array_map_alloc,
596 	.map_free = cgroup_fd_array_free,
597 	.map_get_next_key = array_map_get_next_key,
598 	.map_lookup_elem = fd_array_map_lookup_elem,
599 	.map_delete_elem = fd_array_map_delete_elem,
600 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
601 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
602 };
603 #endif
604 
605 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
606 {
607 	struct bpf_map *map, *inner_map_meta;
608 
609 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
610 	if (IS_ERR(inner_map_meta))
611 		return inner_map_meta;
612 
613 	map = fd_array_map_alloc(attr);
614 	if (IS_ERR(map)) {
615 		bpf_map_meta_free(inner_map_meta);
616 		return map;
617 	}
618 
619 	map->inner_map_meta = inner_map_meta;
620 
621 	return map;
622 }
623 
624 static void array_of_map_free(struct bpf_map *map)
625 {
626 	/* map->inner_map_meta is only accessed by syscall which
627 	 * is protected by fdget/fdput.
628 	 */
629 	bpf_map_meta_free(map->inner_map_meta);
630 	bpf_fd_array_map_clear(map);
631 	fd_array_map_free(map);
632 }
633 
634 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
635 {
636 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
637 
638 	if (!inner_map)
639 		return NULL;
640 
641 	return READ_ONCE(*inner_map);
642 }
643 
644 static u32 array_of_map_gen_lookup(struct bpf_map *map,
645 				   struct bpf_insn *insn_buf)
646 {
647 	struct bpf_array *array = container_of(map, struct bpf_array, map);
648 	u32 elem_size = round_up(map->value_size, 8);
649 	struct bpf_insn *insn = insn_buf;
650 	const int ret = BPF_REG_0;
651 	const int map_ptr = BPF_REG_1;
652 	const int index = BPF_REG_2;
653 
654 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
655 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
656 	if (map->unpriv_array) {
657 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
658 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
659 	} else {
660 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
661 	}
662 	if (is_power_of_2(elem_size))
663 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
664 	else
665 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
666 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
667 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
668 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
669 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
670 	*insn++ = BPF_MOV64_IMM(ret, 0);
671 
672 	return insn - insn_buf;
673 }
674 
675 const struct bpf_map_ops array_of_maps_map_ops = {
676 	.map_alloc = array_of_map_alloc,
677 	.map_free = array_of_map_free,
678 	.map_get_next_key = array_map_get_next_key,
679 	.map_lookup_elem = array_of_map_lookup_elem,
680 	.map_delete_elem = fd_array_map_delete_elem,
681 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
682 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
683 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
684 	.map_gen_lookup = array_of_map_gen_lookup,
685 };
686