xref: /linux/kernel/bpf/arraymap.c (revision fae0a4df1cc6bb1772fa653a2c8eb422d82b824d)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016,2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
19 
20 #include "map_in_map.h"
21 
22 #define ARRAY_CREATE_FLAG_MASK \
23 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
24 
25 static void bpf_array_free_percpu(struct bpf_array *array)
26 {
27 	int i;
28 
29 	for (i = 0; i < array->map.max_entries; i++)
30 		free_percpu(array->pptrs[i]);
31 }
32 
33 static int bpf_array_alloc_percpu(struct bpf_array *array)
34 {
35 	void __percpu *ptr;
36 	int i;
37 
38 	for (i = 0; i < array->map.max_entries; i++) {
39 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
40 					 GFP_USER | __GFP_NOWARN);
41 		if (!ptr) {
42 			bpf_array_free_percpu(array);
43 			return -ENOMEM;
44 		}
45 		array->pptrs[i] = ptr;
46 	}
47 
48 	return 0;
49 }
50 
51 /* Called from syscall */
52 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53 {
54 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 	int numa_node = bpf_map_attr_numa_node(attr);
56 	u32 elem_size, index_mask, max_entries;
57 	bool unpriv = !capable(CAP_SYS_ADMIN);
58 	struct bpf_array *array;
59 	u64 array_size;
60 
61 	/* check sanity of attributes */
62 	if (attr->max_entries == 0 || attr->key_size != 4 ||
63 	    attr->value_size == 0 ||
64 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
65 	    (percpu && numa_node != NUMA_NO_NODE))
66 		return ERR_PTR(-EINVAL);
67 
68 	if (attr->value_size > KMALLOC_MAX_SIZE)
69 		/* if value_size is bigger, the user space won't be able to
70 		 * access the elements.
71 		 */
72 		return ERR_PTR(-E2BIG);
73 
74 	elem_size = round_up(attr->value_size, 8);
75 
76 	max_entries = attr->max_entries;
77 	index_mask = roundup_pow_of_two(max_entries) - 1;
78 
79 	if (unpriv)
80 		/* round up array size to nearest power of 2,
81 		 * since cpu will speculate within index_mask limits
82 		 */
83 		max_entries = index_mask + 1;
84 
85 	array_size = sizeof(*array);
86 	if (percpu)
87 		array_size += (u64) max_entries * sizeof(void *);
88 	else
89 		array_size += (u64) max_entries * elem_size;
90 
91 	/* make sure there is no u32 overflow later in round_up() */
92 	if (array_size >= U32_MAX - PAGE_SIZE)
93 		return ERR_PTR(-ENOMEM);
94 
95 	/* allocate all map elements and zero-initialize them */
96 	array = bpf_map_area_alloc(array_size, numa_node);
97 	if (!array)
98 		return ERR_PTR(-ENOMEM);
99 	array->index_mask = index_mask;
100 	array->map.unpriv_array = unpriv;
101 
102 	/* copy mandatory map attributes */
103 	array->map.map_type = attr->map_type;
104 	array->map.key_size = attr->key_size;
105 	array->map.value_size = attr->value_size;
106 	array->map.max_entries = attr->max_entries;
107 	array->map.map_flags = attr->map_flags;
108 	array->map.numa_node = numa_node;
109 	array->elem_size = elem_size;
110 
111 	if (!percpu)
112 		goto out;
113 
114 	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
115 
116 	if (array_size >= U32_MAX - PAGE_SIZE ||
117 	    bpf_array_alloc_percpu(array)) {
118 		bpf_map_area_free(array);
119 		return ERR_PTR(-ENOMEM);
120 	}
121 out:
122 	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
123 
124 	return &array->map;
125 }
126 
127 /* Called from syscall or from eBPF program */
128 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
129 {
130 	struct bpf_array *array = container_of(map, struct bpf_array, map);
131 	u32 index = *(u32 *)key;
132 
133 	if (unlikely(index >= array->map.max_entries))
134 		return NULL;
135 
136 	return array->value + array->elem_size * (index & array->index_mask);
137 }
138 
139 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
140 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
141 {
142 	struct bpf_array *array = container_of(map, struct bpf_array, map);
143 	struct bpf_insn *insn = insn_buf;
144 	u32 elem_size = round_up(map->value_size, 8);
145 	const int ret = BPF_REG_0;
146 	const int map_ptr = BPF_REG_1;
147 	const int index = BPF_REG_2;
148 
149 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
150 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
151 	if (map->unpriv_array) {
152 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
153 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
154 	} else {
155 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
156 	}
157 
158 	if (is_power_of_2(elem_size)) {
159 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
160 	} else {
161 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
162 	}
163 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
164 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
165 	*insn++ = BPF_MOV64_IMM(ret, 0);
166 	return insn - insn_buf;
167 }
168 
169 /* Called from eBPF program */
170 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
171 {
172 	struct bpf_array *array = container_of(map, struct bpf_array, map);
173 	u32 index = *(u32 *)key;
174 
175 	if (unlikely(index >= array->map.max_entries))
176 		return NULL;
177 
178 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
179 }
180 
181 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
182 {
183 	struct bpf_array *array = container_of(map, struct bpf_array, map);
184 	u32 index = *(u32 *)key;
185 	void __percpu *pptr;
186 	int cpu, off = 0;
187 	u32 size;
188 
189 	if (unlikely(index >= array->map.max_entries))
190 		return -ENOENT;
191 
192 	/* per_cpu areas are zero-filled and bpf programs can only
193 	 * access 'value_size' of them, so copying rounded areas
194 	 * will not leak any kernel data
195 	 */
196 	size = round_up(map->value_size, 8);
197 	rcu_read_lock();
198 	pptr = array->pptrs[index & array->index_mask];
199 	for_each_possible_cpu(cpu) {
200 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
201 		off += size;
202 	}
203 	rcu_read_unlock();
204 	return 0;
205 }
206 
207 /* Called from syscall */
208 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
209 {
210 	struct bpf_array *array = container_of(map, struct bpf_array, map);
211 	u32 index = key ? *(u32 *)key : U32_MAX;
212 	u32 *next = (u32 *)next_key;
213 
214 	if (index >= array->map.max_entries) {
215 		*next = 0;
216 		return 0;
217 	}
218 
219 	if (index == array->map.max_entries - 1)
220 		return -ENOENT;
221 
222 	*next = index + 1;
223 	return 0;
224 }
225 
226 /* Called from syscall or from eBPF program */
227 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
228 				 u64 map_flags)
229 {
230 	struct bpf_array *array = container_of(map, struct bpf_array, map);
231 	u32 index = *(u32 *)key;
232 
233 	if (unlikely(map_flags > BPF_EXIST))
234 		/* unknown flags */
235 		return -EINVAL;
236 
237 	if (unlikely(index >= array->map.max_entries))
238 		/* all elements were pre-allocated, cannot insert a new one */
239 		return -E2BIG;
240 
241 	if (unlikely(map_flags == BPF_NOEXIST))
242 		/* all elements already exist */
243 		return -EEXIST;
244 
245 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
246 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
247 		       value, map->value_size);
248 	else
249 		memcpy(array->value +
250 		       array->elem_size * (index & array->index_mask),
251 		       value, map->value_size);
252 	return 0;
253 }
254 
255 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
256 			    u64 map_flags)
257 {
258 	struct bpf_array *array = container_of(map, struct bpf_array, map);
259 	u32 index = *(u32 *)key;
260 	void __percpu *pptr;
261 	int cpu, off = 0;
262 	u32 size;
263 
264 	if (unlikely(map_flags > BPF_EXIST))
265 		/* unknown flags */
266 		return -EINVAL;
267 
268 	if (unlikely(index >= array->map.max_entries))
269 		/* all elements were pre-allocated, cannot insert a new one */
270 		return -E2BIG;
271 
272 	if (unlikely(map_flags == BPF_NOEXIST))
273 		/* all elements already exist */
274 		return -EEXIST;
275 
276 	/* the user space will provide round_up(value_size, 8) bytes that
277 	 * will be copied into per-cpu area. bpf programs can only access
278 	 * value_size of it. During lookup the same extra bytes will be
279 	 * returned or zeros which were zero-filled by percpu_alloc,
280 	 * so no kernel data leaks possible
281 	 */
282 	size = round_up(map->value_size, 8);
283 	rcu_read_lock();
284 	pptr = array->pptrs[index & array->index_mask];
285 	for_each_possible_cpu(cpu) {
286 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
287 		off += size;
288 	}
289 	rcu_read_unlock();
290 	return 0;
291 }
292 
293 /* Called from syscall or from eBPF program */
294 static int array_map_delete_elem(struct bpf_map *map, void *key)
295 {
296 	return -EINVAL;
297 }
298 
299 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
300 static void array_map_free(struct bpf_map *map)
301 {
302 	struct bpf_array *array = container_of(map, struct bpf_array, map);
303 
304 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
305 	 * so the programs (can be more than one that used this map) were
306 	 * disconnected from events. Wait for outstanding programs to complete
307 	 * and free the array
308 	 */
309 	synchronize_rcu();
310 
311 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
312 		bpf_array_free_percpu(array);
313 
314 	bpf_map_area_free(array);
315 }
316 
317 const struct bpf_map_ops array_map_ops = {
318 	.map_alloc = array_map_alloc,
319 	.map_free = array_map_free,
320 	.map_get_next_key = array_map_get_next_key,
321 	.map_lookup_elem = array_map_lookup_elem,
322 	.map_update_elem = array_map_update_elem,
323 	.map_delete_elem = array_map_delete_elem,
324 	.map_gen_lookup = array_map_gen_lookup,
325 };
326 
327 const struct bpf_map_ops percpu_array_map_ops = {
328 	.map_alloc = array_map_alloc,
329 	.map_free = array_map_free,
330 	.map_get_next_key = array_map_get_next_key,
331 	.map_lookup_elem = percpu_array_map_lookup_elem,
332 	.map_update_elem = array_map_update_elem,
333 	.map_delete_elem = array_map_delete_elem,
334 };
335 
336 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
337 {
338 	/* only file descriptors can be stored in this type of map */
339 	if (attr->value_size != sizeof(u32))
340 		return ERR_PTR(-EINVAL);
341 	return array_map_alloc(attr);
342 }
343 
344 static void fd_array_map_free(struct bpf_map *map)
345 {
346 	struct bpf_array *array = container_of(map, struct bpf_array, map);
347 	int i;
348 
349 	synchronize_rcu();
350 
351 	/* make sure it's empty */
352 	for (i = 0; i < array->map.max_entries; i++)
353 		BUG_ON(array->ptrs[i] != NULL);
354 
355 	bpf_map_area_free(array);
356 }
357 
358 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
359 {
360 	return NULL;
361 }
362 
363 /* only called from syscall */
364 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
365 {
366 	void **elem, *ptr;
367 	int ret =  0;
368 
369 	if (!map->ops->map_fd_sys_lookup_elem)
370 		return -ENOTSUPP;
371 
372 	rcu_read_lock();
373 	elem = array_map_lookup_elem(map, key);
374 	if (elem && (ptr = READ_ONCE(*elem)))
375 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
376 	else
377 		ret = -ENOENT;
378 	rcu_read_unlock();
379 
380 	return ret;
381 }
382 
383 /* only called from syscall */
384 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
385 				 void *key, void *value, u64 map_flags)
386 {
387 	struct bpf_array *array = container_of(map, struct bpf_array, map);
388 	void *new_ptr, *old_ptr;
389 	u32 index = *(u32 *)key, ufd;
390 
391 	if (map_flags != BPF_ANY)
392 		return -EINVAL;
393 
394 	if (index >= array->map.max_entries)
395 		return -E2BIG;
396 
397 	ufd = *(u32 *)value;
398 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
399 	if (IS_ERR(new_ptr))
400 		return PTR_ERR(new_ptr);
401 
402 	old_ptr = xchg(array->ptrs + index, new_ptr);
403 	if (old_ptr)
404 		map->ops->map_fd_put_ptr(old_ptr);
405 
406 	return 0;
407 }
408 
409 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
410 {
411 	struct bpf_array *array = container_of(map, struct bpf_array, map);
412 	void *old_ptr;
413 	u32 index = *(u32 *)key;
414 
415 	if (index >= array->map.max_entries)
416 		return -E2BIG;
417 
418 	old_ptr = xchg(array->ptrs + index, NULL);
419 	if (old_ptr) {
420 		map->ops->map_fd_put_ptr(old_ptr);
421 		return 0;
422 	} else {
423 		return -ENOENT;
424 	}
425 }
426 
427 static void *prog_fd_array_get_ptr(struct bpf_map *map,
428 				   struct file *map_file, int fd)
429 {
430 	struct bpf_array *array = container_of(map, struct bpf_array, map);
431 	struct bpf_prog *prog = bpf_prog_get(fd);
432 
433 	if (IS_ERR(prog))
434 		return prog;
435 
436 	if (!bpf_prog_array_compatible(array, prog)) {
437 		bpf_prog_put(prog);
438 		return ERR_PTR(-EINVAL);
439 	}
440 
441 	return prog;
442 }
443 
444 static void prog_fd_array_put_ptr(void *ptr)
445 {
446 	bpf_prog_put(ptr);
447 }
448 
449 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
450 {
451 	return ((struct bpf_prog *)ptr)->aux->id;
452 }
453 
454 /* decrement refcnt of all bpf_progs that are stored in this map */
455 void bpf_fd_array_map_clear(struct bpf_map *map)
456 {
457 	struct bpf_array *array = container_of(map, struct bpf_array, map);
458 	int i;
459 
460 	for (i = 0; i < array->map.max_entries; i++)
461 		fd_array_map_delete_elem(map, &i);
462 }
463 
464 const struct bpf_map_ops prog_array_map_ops = {
465 	.map_alloc = fd_array_map_alloc,
466 	.map_free = fd_array_map_free,
467 	.map_get_next_key = array_map_get_next_key,
468 	.map_lookup_elem = fd_array_map_lookup_elem,
469 	.map_delete_elem = fd_array_map_delete_elem,
470 	.map_fd_get_ptr = prog_fd_array_get_ptr,
471 	.map_fd_put_ptr = prog_fd_array_put_ptr,
472 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
473 };
474 
475 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
476 						   struct file *map_file)
477 {
478 	struct bpf_event_entry *ee;
479 
480 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
481 	if (ee) {
482 		ee->event = perf_file->private_data;
483 		ee->perf_file = perf_file;
484 		ee->map_file = map_file;
485 	}
486 
487 	return ee;
488 }
489 
490 static void __bpf_event_entry_free(struct rcu_head *rcu)
491 {
492 	struct bpf_event_entry *ee;
493 
494 	ee = container_of(rcu, struct bpf_event_entry, rcu);
495 	fput(ee->perf_file);
496 	kfree(ee);
497 }
498 
499 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
500 {
501 	call_rcu(&ee->rcu, __bpf_event_entry_free);
502 }
503 
504 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
505 					 struct file *map_file, int fd)
506 {
507 	struct bpf_event_entry *ee;
508 	struct perf_event *event;
509 	struct file *perf_file;
510 	u64 value;
511 
512 	perf_file = perf_event_get(fd);
513 	if (IS_ERR(perf_file))
514 		return perf_file;
515 
516 	ee = ERR_PTR(-EOPNOTSUPP);
517 	event = perf_file->private_data;
518 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
519 		goto err_out;
520 
521 	ee = bpf_event_entry_gen(perf_file, map_file);
522 	if (ee)
523 		return ee;
524 	ee = ERR_PTR(-ENOMEM);
525 err_out:
526 	fput(perf_file);
527 	return ee;
528 }
529 
530 static void perf_event_fd_array_put_ptr(void *ptr)
531 {
532 	bpf_event_entry_free_rcu(ptr);
533 }
534 
535 static void perf_event_fd_array_release(struct bpf_map *map,
536 					struct file *map_file)
537 {
538 	struct bpf_array *array = container_of(map, struct bpf_array, map);
539 	struct bpf_event_entry *ee;
540 	int i;
541 
542 	rcu_read_lock();
543 	for (i = 0; i < array->map.max_entries; i++) {
544 		ee = READ_ONCE(array->ptrs[i]);
545 		if (ee && ee->map_file == map_file)
546 			fd_array_map_delete_elem(map, &i);
547 	}
548 	rcu_read_unlock();
549 }
550 
551 const struct bpf_map_ops perf_event_array_map_ops = {
552 	.map_alloc = fd_array_map_alloc,
553 	.map_free = fd_array_map_free,
554 	.map_get_next_key = array_map_get_next_key,
555 	.map_lookup_elem = fd_array_map_lookup_elem,
556 	.map_delete_elem = fd_array_map_delete_elem,
557 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
558 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
559 	.map_release = perf_event_fd_array_release,
560 };
561 
562 #ifdef CONFIG_CGROUPS
563 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
564 				     struct file *map_file /* not used */,
565 				     int fd)
566 {
567 	return cgroup_get_from_fd(fd);
568 }
569 
570 static void cgroup_fd_array_put_ptr(void *ptr)
571 {
572 	/* cgroup_put free cgrp after a rcu grace period */
573 	cgroup_put(ptr);
574 }
575 
576 static void cgroup_fd_array_free(struct bpf_map *map)
577 {
578 	bpf_fd_array_map_clear(map);
579 	fd_array_map_free(map);
580 }
581 
582 const struct bpf_map_ops cgroup_array_map_ops = {
583 	.map_alloc = fd_array_map_alloc,
584 	.map_free = cgroup_fd_array_free,
585 	.map_get_next_key = array_map_get_next_key,
586 	.map_lookup_elem = fd_array_map_lookup_elem,
587 	.map_delete_elem = fd_array_map_delete_elem,
588 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
589 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
590 };
591 #endif
592 
593 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
594 {
595 	struct bpf_map *map, *inner_map_meta;
596 
597 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
598 	if (IS_ERR(inner_map_meta))
599 		return inner_map_meta;
600 
601 	map = fd_array_map_alloc(attr);
602 	if (IS_ERR(map)) {
603 		bpf_map_meta_free(inner_map_meta);
604 		return map;
605 	}
606 
607 	map->inner_map_meta = inner_map_meta;
608 
609 	return map;
610 }
611 
612 static void array_of_map_free(struct bpf_map *map)
613 {
614 	/* map->inner_map_meta is only accessed by syscall which
615 	 * is protected by fdget/fdput.
616 	 */
617 	bpf_map_meta_free(map->inner_map_meta);
618 	bpf_fd_array_map_clear(map);
619 	fd_array_map_free(map);
620 }
621 
622 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
623 {
624 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
625 
626 	if (!inner_map)
627 		return NULL;
628 
629 	return READ_ONCE(*inner_map);
630 }
631 
632 static u32 array_of_map_gen_lookup(struct bpf_map *map,
633 				   struct bpf_insn *insn_buf)
634 {
635 	struct bpf_array *array = container_of(map, struct bpf_array, map);
636 	u32 elem_size = round_up(map->value_size, 8);
637 	struct bpf_insn *insn = insn_buf;
638 	const int ret = BPF_REG_0;
639 	const int map_ptr = BPF_REG_1;
640 	const int index = BPF_REG_2;
641 
642 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
643 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
644 	if (map->unpriv_array) {
645 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
646 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
647 	} else {
648 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
649 	}
650 	if (is_power_of_2(elem_size))
651 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
652 	else
653 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
654 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
655 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
656 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
657 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
658 	*insn++ = BPF_MOV64_IMM(ret, 0);
659 
660 	return insn - insn_buf;
661 }
662 
663 const struct bpf_map_ops array_of_maps_map_ops = {
664 	.map_alloc = array_of_map_alloc,
665 	.map_free = array_of_map_free,
666 	.map_get_next_key = array_map_get_next_key,
667 	.map_lookup_elem = array_of_map_lookup_elem,
668 	.map_delete_elem = fd_array_map_delete_elem,
669 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
670 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
671 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
672 	.map_gen_lookup = array_of_map_gen_lookup,
673 };
674