xref: /linux/tools/testing/selftests/bpf/test_lru_map.c (revision ee88bddf7f2f5d1f1da87dd7bedc734048b70e88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016 Facebook
4  */
5 #define _GNU_SOURCE
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <assert.h>
11 #include <sched.h>
12 #include <stdlib.h>
13 #include <time.h>
14 
15 #include <sys/wait.h>
16 
17 #include <bpf/bpf.h>
18 #include <bpf/libbpf.h>
19 
20 #include "bpf_util.h"
21 #include "../../../include/linux/filter.h"
22 
23 #define LOCAL_FREE_TARGET	(128)
24 #define PERCPU_FREE_TARGET	(4)
25 
26 static int nr_cpus;
27 
create_map(int map_type,int map_flags,unsigned int size)28 static int create_map(int map_type, int map_flags, unsigned int size)
29 {
30 	LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
31 	int map_fd;
32 
33 	map_fd = bpf_map_create(map_type, NULL,  sizeof(unsigned long long),
34 				sizeof(unsigned long long), size, &opts);
35 
36 	if (map_fd == -1)
37 		perror("bpf_map_create");
38 
39 	return map_fd;
40 }
41 
bpf_map_lookup_elem_with_ref_bit(int fd,unsigned long long key,void * value)42 static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
43 					    void *value)
44 {
45 	struct bpf_insn insns[] = {
46 		BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
47 		BPF_LD_MAP_FD(BPF_REG_1, fd),
48 		BPF_LD_IMM64(BPF_REG_3, key),
49 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
50 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
51 		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
52 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
53 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
54 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
55 		BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0),
56 		BPF_MOV64_IMM(BPF_REG_0, 42),
57 		BPF_JMP_IMM(BPF_JA, 0, 0, 1),
58 		BPF_MOV64_IMM(BPF_REG_0, 1),
59 		BPF_EXIT_INSN(),
60 	};
61 	__u8 data[64] = {};
62 	int mfd, pfd, ret, zero = 0;
63 	LIBBPF_OPTS(bpf_test_run_opts, topts,
64 		.data_in = data,
65 		.data_size_in = sizeof(data),
66 		.repeat = 1,
67 	);
68 
69 	mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL);
70 	if (mfd < 0)
71 		return -1;
72 
73 	insns[0].imm = mfd;
74 
75 	pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL);
76 	if (pfd < 0) {
77 		close(mfd);
78 		return -1;
79 	}
80 
81 	ret = bpf_prog_test_run_opts(pfd, &topts);
82 	if (ret < 0 || topts.retval != 42) {
83 		ret = -1;
84 	} else {
85 		assert(!bpf_map_lookup_elem(mfd, &zero, value));
86 		ret = 0;
87 	}
88 	close(pfd);
89 	close(mfd);
90 	return ret;
91 }
92 
map_subset(int map0,int map1)93 static int map_subset(int map0, int map1)
94 {
95 	unsigned long long next_key = 0;
96 	unsigned long long value0[nr_cpus], value1[nr_cpus];
97 	int ret;
98 
99 	while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
100 		assert(!bpf_map_lookup_elem(map1, &next_key, value1));
101 		ret = bpf_map_lookup_elem(map0, &next_key, value0);
102 		if (ret) {
103 			printf("key:%llu not found from map. %s(%d)\n",
104 			       next_key, strerror(errno), errno);
105 			return 0;
106 		}
107 		if (value0[0] != value1[0]) {
108 			printf("key:%llu value0:%llu != value1:%llu\n",
109 			       next_key, value0[0], value1[0]);
110 			return 0;
111 		}
112 	}
113 	return 1;
114 }
115 
map_equal(int lru_map,int expected)116 static int map_equal(int lru_map, int expected)
117 {
118 	return map_subset(lru_map, expected) && map_subset(expected, lru_map);
119 }
120 
sched_next_online(int pid,int * next_to_try)121 static int sched_next_online(int pid, int *next_to_try)
122 {
123 	cpu_set_t cpuset;
124 	int next = *next_to_try;
125 	int ret = -1;
126 
127 	while (next < nr_cpus) {
128 		CPU_ZERO(&cpuset);
129 		CPU_SET(next, &cpuset);
130 		next++;
131 		if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
132 			ret = 0;
133 			break;
134 		}
135 	}
136 
137 	*next_to_try = next;
138 	return ret;
139 }
140 
141 /* Derive target_free from map_size, same as bpf_common_lru_populate */
__tgt_size(unsigned int map_size)142 static unsigned int __tgt_size(unsigned int map_size)
143 {
144 	return (map_size / nr_cpus) / 2;
145 }
146 
147 /* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
__map_size(unsigned int tgt_free)148 static unsigned int __map_size(unsigned int tgt_free)
149 {
150 	return tgt_free * nr_cpus * 2;
151 }
152 
153 /* Size of the LRU map is 2
154  * Add key=1 (+1 key)
155  * Add key=2 (+1 key)
156  * Lookup Key=1
157  * Add Key=3
158  *   => Key=2 will be removed by LRU
159  * Iterate map.  Only found key=1 and key=3
160  */
test_lru_sanity0(int map_type,int map_flags)161 static void test_lru_sanity0(int map_type, int map_flags)
162 {
163 	unsigned long long key, value[nr_cpus];
164 	int lru_map_fd, expected_map_fd;
165 	int next_cpu = 0;
166 
167 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
168 	       map_flags);
169 
170 	assert(sched_next_online(0, &next_cpu) != -1);
171 
172 	if (map_flags & BPF_F_NO_COMMON_LRU)
173 		lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
174 	else
175 		lru_map_fd = create_map(map_type, map_flags, 2);
176 	assert(lru_map_fd != -1);
177 
178 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
179 	assert(expected_map_fd != -1);
180 
181 	value[0] = 1234;
182 
183 	/* insert key=1 element */
184 
185 	key = 1;
186 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
187 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
188 				    BPF_NOEXIST));
189 
190 	/* BPF_NOEXIST means: add new element if it doesn't exist */
191 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
192 	/* key=1 already exists */
193 
194 	assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
195 
196 	/* insert key=2 element */
197 
198 	/* check that key=2 is not found */
199 	key = 2;
200 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
201 
202 	/* BPF_EXIST means: update existing element */
203 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
204 	/* key=2 is not there */
205 
206 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
207 
208 	/* insert key=3 element */
209 
210 	/* check that key=3 is not found */
211 	key = 3;
212 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
213 
214 	/* check that key=1 can be found and mark the ref bit to
215 	 * stop LRU from removing key=1
216 	 */
217 	key = 1;
218 	assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
219 	assert(value[0] == 1234);
220 
221 	key = 3;
222 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
223 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
224 				    BPF_NOEXIST));
225 
226 	/* key=2 has been removed from the LRU */
227 	key = 2;
228 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
229 
230 	/* lookup elem key=1 and delete it, then check it doesn't exist */
231 	key = 1;
232 	assert(!bpf_map_lookup_and_delete_elem(lru_map_fd, &key, &value));
233 	assert(value[0] == 1234);
234 
235 	/* remove the same element from the expected map */
236 	assert(!bpf_map_delete_elem(expected_map_fd, &key));
237 
238 	assert(map_equal(lru_map_fd, expected_map_fd));
239 
240 	close(expected_map_fd);
241 	close(lru_map_fd);
242 
243 	printf("Pass\n");
244 }
245 
246 /* Verify that unreferenced elements are recycled before referenced ones.
247  * Insert elements.
248  * Reference a subset of these.
249  * Insert more, enough to trigger recycling.
250  * Verify that unreferenced are recycled.
251  */
test_lru_sanity1(int map_type,int map_flags,unsigned int tgt_free)252 static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
253 {
254 	unsigned long long key, end_key, value[nr_cpus];
255 	int lru_map_fd, expected_map_fd;
256 	unsigned int batch_size;
257 	unsigned int map_size;
258 	int next_cpu = 0;
259 
260 	if (map_flags & BPF_F_NO_COMMON_LRU)
261 		/* This test is only applicable to common LRU list */
262 		return;
263 
264 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
265 	       map_flags);
266 
267 	assert(sched_next_online(0, &next_cpu) != -1);
268 
269 	batch_size = tgt_free / 2;
270 	assert(batch_size * 2 == tgt_free);
271 
272 	map_size = __map_size(tgt_free) + batch_size;
273 	lru_map_fd = create_map(map_type, map_flags, map_size);
274 	assert(lru_map_fd != -1);
275 
276 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
277 	assert(expected_map_fd != -1);
278 
279 	value[0] = 1234;
280 
281 	/* Insert map_size - batch_size keys */
282 	end_key = 1 + __map_size(tgt_free);
283 	for (key = 1; key < end_key; key++)
284 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
285 					    BPF_NOEXIST));
286 
287 	/* Lookup 1 to batch_size */
288 	end_key = 1 + batch_size;
289 	for (key = 1; key < end_key; key++) {
290 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
291 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
292 					    BPF_NOEXIST));
293 	}
294 
295 	/* Insert another map_size - batch_size keys
296 	 * Map will contain 1 to batch_size plus these latest, i.e.,
297 	 * => previous 1+batch_size to map_size - batch_size will have been
298 	 * removed by LRU
299 	 */
300 	key = 1 + __map_size(tgt_free);
301 	end_key = key + __map_size(tgt_free);
302 	for (; key < end_key; key++) {
303 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
304 					    BPF_NOEXIST));
305 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
306 					    BPF_NOEXIST));
307 	}
308 
309 	assert(map_equal(lru_map_fd, expected_map_fd));
310 
311 	close(expected_map_fd);
312 	close(lru_map_fd);
313 
314 	printf("Pass\n");
315 }
316 
317 /* Verify that insertions exceeding map size will recycle the oldest.
318  * Verify that unreferenced elements are recycled before referenced.
319  */
test_lru_sanity2(int map_type,int map_flags,unsigned int tgt_free)320 static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
321 {
322 	unsigned long long key, value[nr_cpus];
323 	unsigned long long end_key;
324 	int lru_map_fd, expected_map_fd;
325 	unsigned int batch_size;
326 	unsigned int map_size;
327 	int next_cpu = 0;
328 
329 	if (map_flags & BPF_F_NO_COMMON_LRU)
330 		/* This test is only applicable to common LRU list */
331 		return;
332 
333 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
334 	       map_flags);
335 
336 	assert(sched_next_online(0, &next_cpu) != -1);
337 
338 	batch_size = tgt_free / 2;
339 	assert(batch_size * 2 == tgt_free);
340 
341 	map_size = __map_size(tgt_free) + batch_size;
342 	lru_map_fd = create_map(map_type, map_flags, map_size);
343 	assert(lru_map_fd != -1);
344 
345 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
346 	assert(expected_map_fd != -1);
347 
348 	value[0] = 1234;
349 
350 	/* Insert map_size - batch_size keys */
351 	end_key = 1 + __map_size(tgt_free);
352 	for (key = 1; key < end_key; key++)
353 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
354 					    BPF_NOEXIST));
355 
356 	/* Any bpf_map_update_elem will require to acquire a new node
357 	 * from LRU first.
358 	 *
359 	 * The local list is running out of free nodes.
360 	 * It gets from the global LRU list which tries to
361 	 * shrink the inactive list to get tgt_free
362 	 * number of free nodes.
363 	 *
364 	 * Hence, the oldest key is removed from the LRU list.
365 	 */
366 	key = 1;
367 	if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
368 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
369 					    BPF_NOEXIST));
370 		assert(!bpf_map_delete_elem(lru_map_fd, &key));
371 	} else {
372 		assert(bpf_map_update_elem(lru_map_fd, &key, value,
373 					   BPF_EXIST));
374 	}
375 
376 	/* Re-insert 1 to batch_size again and do a lookup immediately.
377 	 */
378 	end_key = 1 + batch_size;
379 	value[0] = 4321;
380 	for (key = 1; key < end_key; key++) {
381 		assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
382 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
383 					    BPF_NOEXIST));
384 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
385 		assert(value[0] == 4321);
386 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
387 					    BPF_NOEXIST));
388 	}
389 
390 	value[0] = 1234;
391 
392 	/* Insert batch_size new elements */
393 	key = 1 + __map_size(tgt_free);
394 	end_key = key + batch_size;
395 	for (; key < end_key; key++)
396 		/* These newly added but not referenced keys will be
397 		 * gone during the next LRU shrink.
398 		 */
399 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
400 					    BPF_NOEXIST));
401 
402 	/* Insert map_size - batch_size elements */
403 	end_key += __map_size(tgt_free);
404 	for (; key < end_key; key++) {
405 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
406 					    BPF_NOEXIST));
407 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
408 					    BPF_NOEXIST));
409 	}
410 
411 	assert(map_equal(lru_map_fd, expected_map_fd));
412 
413 	close(expected_map_fd);
414 	close(lru_map_fd);
415 
416 	printf("Pass\n");
417 }
418 
419 /* Test the active/inactive list rotation
420  *
421  * Fill the whole map, deplete the free list.
422  * Reference all except the last lru->target_free elements.
423  * Insert lru->target_free new elements. This triggers one shrink.
424  * Verify that the non-referenced elements are replaced.
425  */
test_lru_sanity3(int map_type,int map_flags,unsigned int tgt_free)426 static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
427 {
428 	unsigned long long key, end_key, value[nr_cpus];
429 	int lru_map_fd, expected_map_fd;
430 	unsigned int batch_size;
431 	unsigned int map_size;
432 	int next_cpu = 0;
433 
434 	if (map_flags & BPF_F_NO_COMMON_LRU)
435 		/* This test is only applicable to common LRU list */
436 		return;
437 
438 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
439 	       map_flags);
440 
441 	assert(sched_next_online(0, &next_cpu) != -1);
442 
443 	batch_size = __tgt_size(tgt_free);
444 
445 	map_size = tgt_free * 2;
446 	lru_map_fd = create_map(map_type, map_flags, map_size);
447 	assert(lru_map_fd != -1);
448 
449 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
450 	assert(expected_map_fd != -1);
451 
452 	value[0] = 1234;
453 
454 	/* Fill the map */
455 	end_key = 1 + map_size;
456 	for (key = 1; key < end_key; key++)
457 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
458 					    BPF_NOEXIST));
459 
460 	/* Reference all but the last batch_size */
461 	end_key = 1 + map_size - batch_size;
462 	for (key = 1; key < end_key; key++) {
463 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
464 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
465 					    BPF_NOEXIST));
466 	}
467 
468 	/* Insert new batch_size: replaces the non-referenced elements */
469 	key = 2 * tgt_free + 1;
470 	end_key = key + batch_size;
471 	for (; key < end_key; key++) {
472 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
473 					    BPF_NOEXIST));
474 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
475 					    BPF_NOEXIST));
476 	}
477 
478 	assert(map_equal(lru_map_fd, expected_map_fd));
479 
480 	close(expected_map_fd);
481 	close(lru_map_fd);
482 
483 	printf("Pass\n");
484 }
485 
486 /* Test deletion */
test_lru_sanity4(int map_type,int map_flags,unsigned int tgt_free)487 static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
488 {
489 	int lru_map_fd, expected_map_fd;
490 	unsigned long long key, value[nr_cpus];
491 	unsigned long long end_key;
492 	int next_cpu = 0;
493 
494 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
495 	       map_flags);
496 
497 	assert(sched_next_online(0, &next_cpu) != -1);
498 
499 	if (map_flags & BPF_F_NO_COMMON_LRU)
500 		lru_map_fd = create_map(map_type, map_flags,
501 					3 * tgt_free * nr_cpus);
502 	else
503 		lru_map_fd = create_map(map_type, map_flags,
504 					3 * __map_size(tgt_free));
505 	assert(lru_map_fd != -1);
506 
507 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
508 				     3 * tgt_free);
509 	assert(expected_map_fd != -1);
510 
511 	value[0] = 1234;
512 
513 	for (key = 1; key <= 2 * tgt_free; key++)
514 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
515 					    BPF_NOEXIST));
516 
517 	key = 1;
518 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
519 
520 	for (key = 1; key <= tgt_free; key++) {
521 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
522 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
523 					    BPF_NOEXIST));
524 	}
525 
526 	for (; key <= 2 * tgt_free; key++) {
527 		assert(!bpf_map_delete_elem(lru_map_fd, &key));
528 		assert(bpf_map_delete_elem(lru_map_fd, &key));
529 	}
530 
531 	end_key = key + 2 * tgt_free;
532 	for (; key < end_key; key++) {
533 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
534 					    BPF_NOEXIST));
535 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
536 					    BPF_NOEXIST));
537 	}
538 
539 	assert(map_equal(lru_map_fd, expected_map_fd));
540 
541 	close(expected_map_fd);
542 	close(lru_map_fd);
543 
544 	printf("Pass\n");
545 }
546 
do_test_lru_sanity5(unsigned long long last_key,int map_fd)547 static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
548 {
549 	unsigned long long key, value[nr_cpus];
550 
551 	/* Ensure the last key inserted by previous CPU can be found */
552 	assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
553 	value[0] = 1234;
554 
555 	key = last_key + 1;
556 	assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
557 	assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
558 
559 	/* Cannot find the last key because it was removed by LRU */
560 	assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
561 }
562 
563 /* Test map with only one element */
test_lru_sanity5(int map_type,int map_flags)564 static void test_lru_sanity5(int map_type, int map_flags)
565 {
566 	unsigned long long key, value[nr_cpus];
567 	int next_cpu = 0;
568 	int map_fd;
569 
570 	if (map_flags & BPF_F_NO_COMMON_LRU)
571 		return;
572 
573 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
574 	       map_flags);
575 
576 	map_fd = create_map(map_type, map_flags, 1);
577 	assert(map_fd != -1);
578 
579 	value[0] = 1234;
580 	key = 0;
581 	assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
582 
583 	while (sched_next_online(0, &next_cpu) != -1) {
584 		pid_t pid;
585 
586 		pid = fork();
587 		if (pid == 0) {
588 			do_test_lru_sanity5(key, map_fd);
589 			exit(0);
590 		} else if (pid == -1) {
591 			printf("couldn't spawn process to test key:%llu\n",
592 			       key);
593 			exit(1);
594 		} else {
595 			int status;
596 
597 			assert(waitpid(pid, &status, 0) == pid);
598 			assert(status == 0);
599 			key++;
600 		}
601 	}
602 
603 	close(map_fd);
604 	/* At least one key should be tested */
605 	assert(key > 0);
606 
607 	printf("Pass\n");
608 }
609 
610 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
test_lru_sanity6(int map_type,int map_flags,int tgt_free)611 static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
612 {
613 	int lru_map_fd, expected_map_fd;
614 	unsigned long long key, value[nr_cpus];
615 	unsigned int map_size = tgt_free * 2;
616 	int next_cpu = 0;
617 
618 	if (!(map_flags & BPF_F_NO_COMMON_LRU))
619 		return;
620 
621 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
622 	       map_flags);
623 
624 	assert(sched_next_online(0, &next_cpu) != -1);
625 
626 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
627 	assert(expected_map_fd != -1);
628 
629 	lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus);
630 	assert(lru_map_fd != -1);
631 
632 	value[0] = 1234;
633 
634 	for (key = 1; key <= tgt_free; key++) {
635 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
636 					    BPF_NOEXIST));
637 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
638 					    BPF_NOEXIST));
639 	}
640 
641 	for (; key <= tgt_free * 2; key++) {
642 		unsigned long long stable_key;
643 
644 		/* Make ref bit sticky for key: [1, tgt_free] */
645 		for (stable_key = 1; stable_key <= tgt_free; stable_key++) {
646 			/* Mark the ref bit */
647 			assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd,
648 								 stable_key, value));
649 		}
650 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
651 					    BPF_NOEXIST));
652 	}
653 
654 	for (; key <= tgt_free * 3; key++) {
655 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
656 					    BPF_NOEXIST));
657 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
658 					    BPF_NOEXIST));
659 	}
660 
661 	assert(map_equal(lru_map_fd, expected_map_fd));
662 
663 	close(expected_map_fd);
664 	close(lru_map_fd);
665 
666 	printf("Pass\n");
667 }
668 
669 /* Size of the LRU map is 2
670  * Add key=1 (+1 key)
671  * Add key=2 (+1 key)
672  * Lookup Key=1 (datapath)
673  * Lookup Key=2 (syscall)
674  * Add Key=3
675  *   => Key=2 will be removed by LRU
676  * Iterate map.  Only found key=1 and key=3
677  */
test_lru_sanity7(int map_type,int map_flags)678 static void test_lru_sanity7(int map_type, int map_flags)
679 {
680 	unsigned long long key, value[nr_cpus];
681 	int lru_map_fd, expected_map_fd;
682 	int next_cpu = 0;
683 
684 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
685 	       map_flags);
686 
687 	assert(sched_next_online(0, &next_cpu) != -1);
688 
689 	if (map_flags & BPF_F_NO_COMMON_LRU)
690 		lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
691 	else
692 		lru_map_fd = create_map(map_type, map_flags, 2);
693 	assert(lru_map_fd != -1);
694 
695 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
696 	assert(expected_map_fd != -1);
697 
698 	value[0] = 1234;
699 
700 	/* insert key=1 element */
701 
702 	key = 1;
703 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
704 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
705 				    BPF_NOEXIST));
706 
707 	/* BPF_NOEXIST means: add new element if it doesn't exist */
708 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
709 	/* key=1 already exists */
710 
711 	/* insert key=2 element */
712 
713 	/* check that key=2 is not found */
714 	key = 2;
715 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
716 
717 	/* BPF_EXIST means: update existing element */
718 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
719 	/* key=2 is not there */
720 
721 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
722 
723 	/* insert key=3 element */
724 
725 	/* check that key=3 is not found */
726 	key = 3;
727 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
728 
729 	/* check that key=1 can be found and mark the ref bit to
730 	 * stop LRU from removing key=1
731 	 */
732 	key = 1;
733 	assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
734 	assert(value[0] == 1234);
735 
736 	/* check that key=2 can be found and do _not_ mark ref bit.
737 	 * this will be evicted on next update.
738 	 */
739 	key = 2;
740 	assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
741 	assert(value[0] == 1234);
742 
743 	key = 3;
744 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
745 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
746 				    BPF_NOEXIST));
747 
748 	/* key=2 has been removed from the LRU */
749 	key = 2;
750 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
751 
752 	assert(map_equal(lru_map_fd, expected_map_fd));
753 
754 	close(expected_map_fd);
755 	close(lru_map_fd);
756 
757 	printf("Pass\n");
758 }
759 
760 /* Size of the LRU map is 2
761  * Add key=1 (+1 key)
762  * Add key=2 (+1 key)
763  * Lookup Key=1 (syscall)
764  * Lookup Key=2 (datapath)
765  * Add Key=3
766  *   => Key=1 will be removed by LRU
767  * Iterate map.  Only found key=2 and key=3
768  */
test_lru_sanity8(int map_type,int map_flags)769 static void test_lru_sanity8(int map_type, int map_flags)
770 {
771 	unsigned long long key, value[nr_cpus];
772 	int lru_map_fd, expected_map_fd;
773 	int next_cpu = 0;
774 
775 	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
776 	       map_flags);
777 
778 	assert(sched_next_online(0, &next_cpu) != -1);
779 
780 	if (map_flags & BPF_F_NO_COMMON_LRU)
781 		lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
782 	else
783 		lru_map_fd = create_map(map_type, map_flags, 2);
784 	assert(lru_map_fd != -1);
785 
786 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
787 	assert(expected_map_fd != -1);
788 
789 	value[0] = 1234;
790 
791 	/* insert key=1 element */
792 
793 	key = 1;
794 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
795 
796 	/* BPF_NOEXIST means: add new element if it doesn't exist */
797 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
798 	/* key=1 already exists */
799 
800 	/* insert key=2 element */
801 
802 	/* check that key=2 is not found */
803 	key = 2;
804 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
805 
806 	/* BPF_EXIST means: update existing element */
807 	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
808 	/* key=2 is not there */
809 
810 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
811 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
812 				    BPF_NOEXIST));
813 
814 	/* insert key=3 element */
815 
816 	/* check that key=3 is not found */
817 	key = 3;
818 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
819 
820 	/* check that key=1 can be found and do _not_ mark ref bit.
821 	 * this will be evicted on next update.
822 	 */
823 	key = 1;
824 	assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
825 	assert(value[0] == 1234);
826 
827 	/* check that key=2 can be found and mark the ref bit to
828 	 * stop LRU from removing key=2
829 	 */
830 	key = 2;
831 	assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
832 	assert(value[0] == 1234);
833 
834 	key = 3;
835 	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
836 	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
837 				    BPF_NOEXIST));
838 
839 	/* key=1 has been removed from the LRU */
840 	key = 1;
841 	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
842 
843 	assert(map_equal(lru_map_fd, expected_map_fd));
844 
845 	close(expected_map_fd);
846 	close(lru_map_fd);
847 
848 	printf("Pass\n");
849 }
850 
main(int argc,char ** argv)851 int main(int argc, char **argv)
852 {
853 	int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
854 			     BPF_MAP_TYPE_LRU_PERCPU_HASH};
855 	int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
856 	int t, f;
857 
858 	setbuf(stdout, NULL);
859 
860 	nr_cpus = bpf_num_possible_cpus();
861 	assert(nr_cpus != -1);
862 	printf("nr_cpus:%d\n\n", nr_cpus);
863 
864 	/* Use libbpf 1.0 API mode */
865 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
866 
867 	for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
868 		unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
869 			PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
870 
871 		for (t = 0; t < ARRAY_SIZE(map_types); t++) {
872 			test_lru_sanity0(map_types[t], map_flags[f]);
873 			test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
874 			test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
875 			test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
876 			test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
877 			test_lru_sanity5(map_types[t], map_flags[f]);
878 			test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
879 			test_lru_sanity7(map_types[t], map_flags[f]);
880 			test_lru_sanity8(map_types[t], map_flags[f]);
881 
882 			printf("\n");
883 		}
884 	}
885 
886 	return 0;
887 }
888