xref: /linux/samples/bpf/map_perf_test.bpf.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include "vmlinux.h"
8 #include <errno.h>
9 #include <linux/version.h>
10 #include <bpf/bpf_helpers.h>
11 #include <bpf/bpf_tracing.h>
12 #include <bpf/bpf_core_read.h>
13 
14 #define MAX_ENTRIES 1000
15 #define MAX_NR_CPUS 1024
16 
17 struct {
18 	__uint(type, BPF_MAP_TYPE_HASH);
19 	__type(key, u32);
20 	__type(value, long);
21 	__uint(max_entries, MAX_ENTRIES);
22 } hash_map SEC(".maps");
23 
24 struct {
25 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
26 	__type(key, u32);
27 	__type(value, long);
28 	__uint(max_entries, 10000);
29 } lru_hash_map SEC(".maps");
30 
31 struct {
32 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
33 	__type(key, u32);
34 	__type(value, long);
35 	__uint(max_entries, 10000);
36 	__uint(map_flags, BPF_F_NO_COMMON_LRU);
37 } nocommon_lru_hash_map SEC(".maps");
38 
39 struct inner_lru {
40 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
41 	__type(key, u32);
42 	__type(value, long);
43 	__uint(max_entries, MAX_ENTRIES);
44 	__uint(map_flags, BPF_F_NUMA_NODE);
45 	__uint(numa_node, 0);
46 } inner_lru_hash_map SEC(".maps");
47 
48 struct {
49 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
50 	__uint(max_entries, MAX_NR_CPUS);
51 	__uint(key_size, sizeof(u32));
52 	__array(values, struct inner_lru); /* use inner_lru as inner map */
53 } array_of_lru_hashs SEC(".maps") = {
54 	/* statically initialize the first element */
55 	.values = { &inner_lru_hash_map },
56 };
57 
58 struct {
59 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
60 	__uint(key_size, sizeof(u32));
61 	__uint(value_size, sizeof(long));
62 	__uint(max_entries, MAX_ENTRIES);
63 } percpu_hash_map SEC(".maps");
64 
65 struct {
66 	__uint(type, BPF_MAP_TYPE_HASH);
67 	__type(key, u32);
68 	__type(value, long);
69 	__uint(max_entries, MAX_ENTRIES);
70 	__uint(map_flags, BPF_F_NO_PREALLOC);
71 } hash_map_alloc SEC(".maps");
72 
73 struct {
74 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
75 	__uint(key_size, sizeof(u32));
76 	__uint(value_size, sizeof(long));
77 	__uint(max_entries, MAX_ENTRIES);
78 	__uint(map_flags, BPF_F_NO_PREALLOC);
79 } percpu_hash_map_alloc SEC(".maps");
80 
81 struct {
82 	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
83 	__uint(key_size, 8);
84 	__uint(value_size, sizeof(long));
85 	__uint(max_entries, 10000);
86 	__uint(map_flags, BPF_F_NO_PREALLOC);
87 } lpm_trie_map_alloc SEC(".maps");
88 
89 struct {
90 	__uint(type, BPF_MAP_TYPE_ARRAY);
91 	__type(key, u32);
92 	__type(value, long);
93 	__uint(max_entries, MAX_ENTRIES);
94 } array_map SEC(".maps");
95 
96 struct {
97 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
98 	__type(key, u32);
99 	__type(value, long);
100 	__uint(max_entries, MAX_ENTRIES);
101 } lru_hash_lookup_map SEC(".maps");
102 
103 SEC("ksyscall/getuid")
104 int BPF_KSYSCALL(stress_hmap)
105 {
106 	u32 key = bpf_get_current_pid_tgid();
107 	long init_val = 1;
108 	long *value;
109 	int i;
110 
111 	for (i = 0; i < 10; i++) {
112 		bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
113 		value = bpf_map_lookup_elem(&hash_map, &key);
114 		if (value)
115 			bpf_map_delete_elem(&hash_map, &key);
116 	}
117 
118 	return 0;
119 }
120 
121 SEC("ksyscall/geteuid")
122 int BPF_KSYSCALL(stress_percpu_hmap)
123 {
124 	u32 key = bpf_get_current_pid_tgid();
125 	long init_val = 1;
126 	long *value;
127 	int i;
128 
129 	for (i = 0; i < 10; i++) {
130 		bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
131 		value = bpf_map_lookup_elem(&percpu_hash_map, &key);
132 		if (value)
133 			bpf_map_delete_elem(&percpu_hash_map, &key);
134 	}
135 	return 0;
136 }
137 
138 SEC("ksyscall/getgid")
139 int BPF_KSYSCALL(stress_hmap_alloc)
140 {
141 	u32 key = bpf_get_current_pid_tgid();
142 	long init_val = 1;
143 	long *value;
144 	int i;
145 
146 	for (i = 0; i < 10; i++) {
147 		bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
148 		value = bpf_map_lookup_elem(&hash_map_alloc, &key);
149 		if (value)
150 			bpf_map_delete_elem(&hash_map_alloc, &key);
151 	}
152 	return 0;
153 }
154 
155 SEC("ksyscall/getegid")
156 int BPF_KSYSCALL(stress_percpu_hmap_alloc)
157 {
158 	u32 key = bpf_get_current_pid_tgid();
159 	long init_val = 1;
160 	long *value;
161 	int i;
162 
163 	for (i = 0; i < 10; i++) {
164 		bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
165 		value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
166 		if (value)
167 			bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
168 	}
169 	return 0;
170 }
171 SEC("ksyscall/connect")
172 int BPF_KSYSCALL(stress_lru_hmap_alloc, int fd, struct sockaddr_in *uservaddr,
173 		 int addrlen)
174 {
175 	char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
176 	union {
177 		u16 dst6[8];
178 		struct {
179 			u16 magic0;
180 			u16 magic1;
181 			u16 tcase;
182 			u16 unused16;
183 			u32 unused32;
184 			u32 key;
185 		};
186 	} test_params;
187 	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)uservaddr;
188 	u16 test_case;
189 	long val = 1;
190 	u32 key = 0;
191 	int ret;
192 
193 	if (addrlen != sizeof(*in6))
194 		return 0;
195 
196 	ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
197 				  &in6->sin6_addr);
198 	if (ret)
199 		goto done;
200 
201 	if (test_params.magic0 != 0xdead ||
202 	    test_params.magic1 != 0xbeef)
203 		return 0;
204 
205 	test_case = test_params.tcase;
206 	if (test_case != 3)
207 		key = bpf_get_prandom_u32();
208 
209 	if (test_case == 0) {
210 		ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
211 	} else if (test_case == 1) {
212 		ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
213 					  BPF_ANY);
214 	} else if (test_case == 2) {
215 		void *nolocal_lru_map;
216 		int cpu = bpf_get_smp_processor_id();
217 
218 		nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
219 						      &cpu);
220 		if (!nolocal_lru_map) {
221 			ret = -ENOENT;
222 			goto done;
223 		}
224 
225 		ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
226 					  BPF_ANY);
227 	} else if (test_case == 3) {
228 		u32 i;
229 
230 		key = test_params.key;
231 
232 #pragma clang loop unroll(full)
233 		for (i = 0; i < 32; i++) {
234 			bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
235 			key++;
236 		}
237 	} else {
238 		ret = -EINVAL;
239 	}
240 
241 done:
242 	if (ret)
243 		bpf_trace_printk(fmt, sizeof(fmt), ret);
244 
245 	return 0;
246 }
247 
248 SEC("ksyscall/gettid")
249 int BPF_KSYSCALL(stress_lpm_trie_map_alloc)
250 {
251 	union {
252 		u32 b32[2];
253 		u8 b8[8];
254 	} key;
255 	unsigned int i;
256 
257 	key.b32[0] = 32;
258 	key.b8[4] = 192;
259 	key.b8[5] = 168;
260 	key.b8[6] = 0;
261 	key.b8[7] = 1;
262 
263 #pragma clang loop unroll(full)
264 	for (i = 0; i < 32; ++i)
265 		bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
266 
267 	return 0;
268 }
269 
270 SEC("ksyscall/getpgid")
271 int BPF_KSYSCALL(stress_hash_map_lookup)
272 {
273 	u32 key = 1, i;
274 	long *value;
275 
276 #pragma clang loop unroll(full)
277 	for (i = 0; i < 64; ++i)
278 		value = bpf_map_lookup_elem(&hash_map, &key);
279 
280 	return 0;
281 }
282 
283 SEC("ksyscall/getppid")
284 int BPF_KSYSCALL(stress_array_map_lookup)
285 {
286 	u32 key = 1, i;
287 	long *value;
288 
289 #pragma clang loop unroll(full)
290 	for (i = 0; i < 64; ++i)
291 		value = bpf_map_lookup_elem(&array_map, &key);
292 
293 	return 0;
294 }
295 
296 char _license[] SEC("license") = "GPL";
297 u32 _version SEC("version") = LINUX_VERSION_CODE;
298