xref: /linux/tools/testing/selftests/bpf/progs/map_ptr_kern.c (revision 8a922b7728a93d837954315c98b84f6b78de0c4f)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #define LOOP_BOUND 0xf
8 #define MAX_ENTRIES 8
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
10 
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12 
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14 __u32 g_line = 0;
15 int page_size = 0; /* userspace should set it */
16 
17 #define VERIFY_TYPE(type, func) ({	\
18 	g_map_type = type;		\
19 	if (!func())			\
20 		return 0;		\
21 })
22 
23 
24 #define VERIFY(expr) ({		\
25 	g_line = __LINE__;	\
26 	if (!(expr))		\
27 		return 0;	\
28 })
29 
30 struct bpf_map {
31 	enum bpf_map_type map_type;
32 	__u32 key_size;
33 	__u32 value_size;
34 	__u32 max_entries;
35 	__u32 id;
36 } __attribute__((preserve_access_index));
37 
38 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
39 				       __u32 value_size, __u32 max_entries)
40 {
41 	VERIFY(map->map_type == g_map_type);
42 	VERIFY(map->key_size == key_size);
43 	VERIFY(map->value_size == value_size);
44 	VERIFY(map->max_entries == max_entries);
45 	VERIFY(map->id > 0);
46 
47 	return 1;
48 }
49 
50 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
51 				    struct bpf_map *direct)
52 {
53 	VERIFY(indirect->map_type == direct->map_type);
54 	VERIFY(indirect->key_size == direct->key_size);
55 	VERIFY(indirect->value_size == direct->value_size);
56 	VERIFY(indirect->max_entries == direct->max_entries);
57 	VERIFY(indirect->id == direct->id);
58 
59 	return 1;
60 }
61 
62 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
63 			__u32 key_size, __u32 value_size, __u32 max_entries)
64 {
65 	VERIFY(check_bpf_map_ptr(indirect, direct));
66 	VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
67 				    max_entries));
68 	return 1;
69 }
70 
71 static inline int check_default(struct bpf_map *indirect,
72 				struct bpf_map *direct)
73 {
74 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
75 		     MAX_ENTRIES));
76 	return 1;
77 }
78 
79 static __noinline int
80 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
81 {
82 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
83 		     MAX_ENTRIES));
84 	return 1;
85 }
86 
87 typedef struct {
88 	int counter;
89 } atomic_t;
90 
91 struct bpf_htab {
92 	struct bpf_map map;
93 	atomic_t count;
94 	__u32 n_buckets;
95 	__u32 elem_size;
96 } __attribute__((preserve_access_index));
97 
98 struct {
99 	__uint(type, BPF_MAP_TYPE_HASH);
100 	__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101 	__uint(max_entries, MAX_ENTRIES);
102 	__type(key, __u32);
103 	__type(value, __u32);
104 } m_hash SEC(".maps");
105 
106 static inline int check_hash(void)
107 {
108 	struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
109 	struct bpf_map *map = (struct bpf_map *)&m_hash;
110 	int i;
111 
112 	VERIFY(check_default_noinline(&hash->map, map));
113 
114 	VERIFY(hash->n_buckets == MAX_ENTRIES);
115 	VERIFY(hash->elem_size == 64);
116 
117 	VERIFY(hash->count.counter == 0);
118 	for (i = 0; i < HALF_ENTRIES; ++i) {
119 		const __u32 key = i;
120 		const __u32 val = 1;
121 
122 		if (bpf_map_update_elem(hash, &key, &val, 0))
123 			return 0;
124 	}
125 	VERIFY(hash->count.counter == HALF_ENTRIES);
126 
127 	return 1;
128 }
129 
130 struct bpf_array {
131 	struct bpf_map map;
132 	__u32 elem_size;
133 } __attribute__((preserve_access_index));
134 
135 struct {
136 	__uint(type, BPF_MAP_TYPE_ARRAY);
137 	__uint(max_entries, MAX_ENTRIES);
138 	__type(key, __u32);
139 	__type(value, __u32);
140 } m_array SEC(".maps");
141 
142 static inline int check_array(void)
143 {
144 	struct bpf_array *array = (struct bpf_array *)&m_array;
145 	struct bpf_map *map = (struct bpf_map *)&m_array;
146 	int i, n_lookups = 0, n_keys = 0;
147 
148 	VERIFY(check_default(&array->map, map));
149 
150 	VERIFY(array->elem_size == 8);
151 
152 	for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
153 		const __u32 key = i;
154 		__u32 *val = bpf_map_lookup_elem(array, &key);
155 
156 		++n_lookups;
157 		if (val)
158 			++n_keys;
159 	}
160 
161 	VERIFY(n_lookups == MAX_ENTRIES);
162 	VERIFY(n_keys == MAX_ENTRIES);
163 
164 	return 1;
165 }
166 
167 struct {
168 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
169 	__uint(max_entries, MAX_ENTRIES);
170 	__type(key, __u32);
171 	__type(value, __u32);
172 } m_prog_array SEC(".maps");
173 
174 static inline int check_prog_array(void)
175 {
176 	struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
177 	struct bpf_map *map = (struct bpf_map *)&m_prog_array;
178 
179 	VERIFY(check_default(&prog_array->map, map));
180 
181 	return 1;
182 }
183 
184 struct {
185 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
186 	__uint(max_entries, MAX_ENTRIES);
187 	__type(key, __u32);
188 	__type(value, __u32);
189 } m_perf_event_array SEC(".maps");
190 
191 static inline int check_perf_event_array(void)
192 {
193 	struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
194 	struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
195 
196 	VERIFY(check_default(&perf_event_array->map, map));
197 
198 	return 1;
199 }
200 
201 struct {
202 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
203 	__uint(max_entries, MAX_ENTRIES);
204 	__type(key, __u32);
205 	__type(value, __u32);
206 } m_percpu_hash SEC(".maps");
207 
208 static inline int check_percpu_hash(void)
209 {
210 	struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
211 	struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
212 
213 	VERIFY(check_default(&percpu_hash->map, map));
214 
215 	return 1;
216 }
217 
218 struct {
219 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
220 	__uint(max_entries, MAX_ENTRIES);
221 	__type(key, __u32);
222 	__type(value, __u32);
223 } m_percpu_array SEC(".maps");
224 
225 static inline int check_percpu_array(void)
226 {
227 	struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
228 	struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
229 
230 	VERIFY(check_default(&percpu_array->map, map));
231 
232 	return 1;
233 }
234 
235 struct bpf_stack_map {
236 	struct bpf_map map;
237 } __attribute__((preserve_access_index));
238 
239 struct {
240 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
241 	__uint(max_entries, MAX_ENTRIES);
242 	__type(key, __u32);
243 	__type(value, __u64);
244 } m_stack_trace SEC(".maps");
245 
246 static inline int check_stack_trace(void)
247 {
248 	struct bpf_stack_map *stack_trace =
249 		(struct bpf_stack_map *)&m_stack_trace;
250 	struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
251 
252 	VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
253 		     MAX_ENTRIES));
254 
255 	return 1;
256 }
257 
258 struct {
259 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
260 	__uint(max_entries, MAX_ENTRIES);
261 	__type(key, __u32);
262 	__type(value, __u32);
263 } m_cgroup_array SEC(".maps");
264 
265 static inline int check_cgroup_array(void)
266 {
267 	struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
268 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
269 
270 	VERIFY(check_default(&cgroup_array->map, map));
271 
272 	return 1;
273 }
274 
275 struct {
276 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
277 	__uint(max_entries, MAX_ENTRIES);
278 	__type(key, __u32);
279 	__type(value, __u32);
280 } m_lru_hash SEC(".maps");
281 
282 static inline int check_lru_hash(void)
283 {
284 	struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
285 	struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
286 
287 	VERIFY(check_default(&lru_hash->map, map));
288 
289 	return 1;
290 }
291 
292 struct {
293 	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
294 	__uint(max_entries, MAX_ENTRIES);
295 	__type(key, __u32);
296 	__type(value, __u32);
297 } m_lru_percpu_hash SEC(".maps");
298 
299 static inline int check_lru_percpu_hash(void)
300 {
301 	struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
302 	struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
303 
304 	VERIFY(check_default(&lru_percpu_hash->map, map));
305 
306 	return 1;
307 }
308 
309 struct lpm_trie {
310 	struct bpf_map map;
311 } __attribute__((preserve_access_index));
312 
313 struct lpm_key {
314 	struct bpf_lpm_trie_key trie_key;
315 	__u32 data;
316 };
317 
318 struct {
319 	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
320 	__uint(map_flags, BPF_F_NO_PREALLOC);
321 	__uint(max_entries, MAX_ENTRIES);
322 	__type(key, struct lpm_key);
323 	__type(value, __u32);
324 } m_lpm_trie SEC(".maps");
325 
326 static inline int check_lpm_trie(void)
327 {
328 	struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
329 	struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
330 
331 	VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
332 		     MAX_ENTRIES));
333 
334 	return 1;
335 }
336 
337 #define INNER_MAX_ENTRIES 1234
338 
339 struct inner_map {
340 	__uint(type, BPF_MAP_TYPE_ARRAY);
341 	__uint(max_entries, INNER_MAX_ENTRIES);
342 	__type(key, __u32);
343 	__type(value, __u32);
344 } inner_map SEC(".maps");
345 
346 struct {
347 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
348 	__uint(max_entries, MAX_ENTRIES);
349 	__type(key, __u32);
350 	__type(value, __u32);
351 	__array(values, struct {
352 		__uint(type, BPF_MAP_TYPE_ARRAY);
353 		__uint(max_entries, INNER_MAX_ENTRIES);
354 		__type(key, __u32);
355 		__type(value, __u32);
356 	});
357 } m_array_of_maps SEC(".maps") = {
358 	.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
359 };
360 
361 static inline int check_array_of_maps(void)
362 {
363 	struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
364 	struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
365 	struct bpf_array *inner_map;
366 	int key = 0;
367 
368 	VERIFY(check_default(&array_of_maps->map, map));
369 	inner_map = bpf_map_lookup_elem(array_of_maps, &key);
370 	VERIFY(inner_map != NULL);
371 	VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
372 
373 	return 1;
374 }
375 
376 struct {
377 	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
378 	__uint(max_entries, MAX_ENTRIES);
379 	__type(key, __u32);
380 	__type(value, __u32);
381 	__array(values, struct inner_map);
382 } m_hash_of_maps SEC(".maps") = {
383 	.values = {
384 		[2] = &inner_map,
385 	},
386 };
387 
388 static inline int check_hash_of_maps(void)
389 {
390 	struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
391 	struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
392 	struct bpf_htab *inner_map;
393 	int key = 2;
394 
395 	VERIFY(check_default(&hash_of_maps->map, map));
396 	inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
397 	VERIFY(inner_map != NULL);
398 	VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
399 
400 	return 1;
401 }
402 
403 struct bpf_dtab {
404 	struct bpf_map map;
405 } __attribute__((preserve_access_index));
406 
407 struct {
408 	__uint(type, BPF_MAP_TYPE_DEVMAP);
409 	__uint(max_entries, MAX_ENTRIES);
410 	__type(key, __u32);
411 	__type(value, __u32);
412 } m_devmap SEC(".maps");
413 
414 static inline int check_devmap(void)
415 {
416 	struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
417 	struct bpf_map *map = (struct bpf_map *)&m_devmap;
418 
419 	VERIFY(check_default(&devmap->map, map));
420 
421 	return 1;
422 }
423 
424 struct bpf_stab {
425 	struct bpf_map map;
426 } __attribute__((preserve_access_index));
427 
428 struct {
429 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
430 	__uint(max_entries, MAX_ENTRIES);
431 	__type(key, __u32);
432 	__type(value, __u32);
433 } m_sockmap SEC(".maps");
434 
435 static inline int check_sockmap(void)
436 {
437 	struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
438 	struct bpf_map *map = (struct bpf_map *)&m_sockmap;
439 
440 	VERIFY(check_default(&sockmap->map, map));
441 
442 	return 1;
443 }
444 
445 struct bpf_cpu_map {
446 	struct bpf_map map;
447 } __attribute__((preserve_access_index));
448 
449 struct {
450 	__uint(type, BPF_MAP_TYPE_CPUMAP);
451 	__uint(max_entries, MAX_ENTRIES);
452 	__type(key, __u32);
453 	__type(value, __u32);
454 } m_cpumap SEC(".maps");
455 
456 static inline int check_cpumap(void)
457 {
458 	struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
459 	struct bpf_map *map = (struct bpf_map *)&m_cpumap;
460 
461 	VERIFY(check_default(&cpumap->map, map));
462 
463 	return 1;
464 }
465 
466 struct xsk_map {
467 	struct bpf_map map;
468 } __attribute__((preserve_access_index));
469 
470 struct {
471 	__uint(type, BPF_MAP_TYPE_XSKMAP);
472 	__uint(max_entries, MAX_ENTRIES);
473 	__type(key, __u32);
474 	__type(value, __u32);
475 } m_xskmap SEC(".maps");
476 
477 static inline int check_xskmap(void)
478 {
479 	struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
480 	struct bpf_map *map = (struct bpf_map *)&m_xskmap;
481 
482 	VERIFY(check_default(&xskmap->map, map));
483 
484 	return 1;
485 }
486 
487 struct bpf_shtab {
488 	struct bpf_map map;
489 } __attribute__((preserve_access_index));
490 
491 struct {
492 	__uint(type, BPF_MAP_TYPE_SOCKHASH);
493 	__uint(max_entries, MAX_ENTRIES);
494 	__type(key, __u32);
495 	__type(value, __u32);
496 } m_sockhash SEC(".maps");
497 
498 static inline int check_sockhash(void)
499 {
500 	struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
501 	struct bpf_map *map = (struct bpf_map *)&m_sockhash;
502 
503 	VERIFY(check_default(&sockhash->map, map));
504 
505 	return 1;
506 }
507 
508 struct bpf_cgroup_storage_map {
509 	struct bpf_map map;
510 } __attribute__((preserve_access_index));
511 
512 struct {
513 	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
514 	__type(key, struct bpf_cgroup_storage_key);
515 	__type(value, __u32);
516 } m_cgroup_storage SEC(".maps");
517 
518 static inline int check_cgroup_storage(void)
519 {
520 	struct bpf_cgroup_storage_map *cgroup_storage =
521 		(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
522 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
523 
524 	VERIFY(check(&cgroup_storage->map, map,
525 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
526 
527 	return 1;
528 }
529 
530 struct reuseport_array {
531 	struct bpf_map map;
532 } __attribute__((preserve_access_index));
533 
534 struct {
535 	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
536 	__uint(max_entries, MAX_ENTRIES);
537 	__type(key, __u32);
538 	__type(value, __u32);
539 } m_reuseport_sockarray SEC(".maps");
540 
541 static inline int check_reuseport_sockarray(void)
542 {
543 	struct reuseport_array *reuseport_sockarray =
544 		(struct reuseport_array *)&m_reuseport_sockarray;
545 	struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
546 
547 	VERIFY(check_default(&reuseport_sockarray->map, map));
548 
549 	return 1;
550 }
551 
552 struct {
553 	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
554 	__type(key, struct bpf_cgroup_storage_key);
555 	__type(value, __u32);
556 } m_percpu_cgroup_storage SEC(".maps");
557 
558 static inline int check_percpu_cgroup_storage(void)
559 {
560 	struct bpf_cgroup_storage_map *percpu_cgroup_storage =
561 		(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
562 	struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
563 
564 	VERIFY(check(&percpu_cgroup_storage->map, map,
565 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
566 
567 	return 1;
568 }
569 
570 struct bpf_queue_stack {
571 	struct bpf_map map;
572 } __attribute__((preserve_access_index));
573 
574 struct {
575 	__uint(type, BPF_MAP_TYPE_QUEUE);
576 	__uint(max_entries, MAX_ENTRIES);
577 	__type(value, __u32);
578 } m_queue SEC(".maps");
579 
580 static inline int check_queue(void)
581 {
582 	struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
583 	struct bpf_map *map = (struct bpf_map *)&m_queue;
584 
585 	VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
586 
587 	return 1;
588 }
589 
590 struct {
591 	__uint(type, BPF_MAP_TYPE_STACK);
592 	__uint(max_entries, MAX_ENTRIES);
593 	__type(value, __u32);
594 } m_stack SEC(".maps");
595 
596 static inline int check_stack(void)
597 {
598 	struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
599 	struct bpf_map *map = (struct bpf_map *)&m_stack;
600 
601 	VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
602 
603 	return 1;
604 }
605 
606 struct bpf_local_storage_map {
607 	struct bpf_map map;
608 } __attribute__((preserve_access_index));
609 
610 struct {
611 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
612 	__uint(map_flags, BPF_F_NO_PREALLOC);
613 	__type(key, __u32);
614 	__type(value, __u32);
615 } m_sk_storage SEC(".maps");
616 
617 static inline int check_sk_storage(void)
618 {
619 	struct bpf_local_storage_map *sk_storage =
620 		(struct bpf_local_storage_map *)&m_sk_storage;
621 	struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
622 
623 	VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
624 
625 	return 1;
626 }
627 
628 struct {
629 	__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
630 	__uint(max_entries, MAX_ENTRIES);
631 	__type(key, __u32);
632 	__type(value, __u32);
633 } m_devmap_hash SEC(".maps");
634 
635 static inline int check_devmap_hash(void)
636 {
637 	struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
638 	struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
639 
640 	VERIFY(check_default(&devmap_hash->map, map));
641 
642 	return 1;
643 }
644 
645 struct bpf_ringbuf_map {
646 	struct bpf_map map;
647 } __attribute__((preserve_access_index));
648 
649 struct {
650 	__uint(type, BPF_MAP_TYPE_RINGBUF);
651 } m_ringbuf SEC(".maps");
652 
653 static inline int check_ringbuf(void)
654 {
655 	struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
656 	struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
657 
658 	VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
659 
660 	return 1;
661 }
662 
663 SEC("cgroup_skb/egress")
664 int cg_skb(void *ctx)
665 {
666 	VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
667 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
668 	VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
669 	VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
670 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
671 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
672 	VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
673 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
674 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
675 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
676 	VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
677 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
678 	VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
679 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
680 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
681 	VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
682 	VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
683 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
684 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
685 	VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
686 		    check_reuseport_sockarray);
687 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
688 		    check_percpu_cgroup_storage);
689 	VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
690 	VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
691 	VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
692 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
693 	VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
694 
695 	return 1;
696 }
697 
698 char _license[] SEC("license") = "GPL";
699