xref: /linux/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Google */
3 
4 #include <test_progs.h>
5 #include <bpf/libbpf.h>
6 #include <bpf/btf.h>
7 #include "kmem_cache_iter.skel.h"
8 
9 #define SLAB_NAME_MAX  32
10 
11 struct kmem_cache_result {
12 	char name[SLAB_NAME_MAX];
13 	long obj_size;
14 };
15 
16 static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel)
17 {
18 	LIBBPF_OPTS(bpf_test_run_opts, opts,
19 		.flags = 0,  /* Run it with the current task */
20 	);
21 	int prog_fd = bpf_program__fd(skel->progs.check_task_struct);
22 
23 	/* Get task_struct and check it if's from a slab cache */
24 	ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run");
25 
26 	/* The BPF program should set 'found' variable */
27 	ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found");
28 }
29 
30 static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
31 {
32 	FILE *fp;
33 	int map_fd;
34 	char name[SLAB_NAME_MAX];
35 	unsigned long objsize;
36 	char rest_of_line[1000];
37 	struct kmem_cache_result r;
38 	int seen = 0;
39 
40 	fp = fopen("/proc/slabinfo", "r");
41 	if (fp == NULL) {
42 		/* CONFIG_SLUB_DEBUG is not enabled */
43 		return;
44 	}
45 
46 	map_fd = bpf_map__fd(skel->maps.slab_result);
47 
48 	/* Ignore first two lines for header */
49 	fscanf(fp, "slabinfo - version: %*d.%*d\n");
50 	fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line);
51 
52 	/* Compare name and objsize only - others can be changes frequently */
53 	while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n",
54 		      name, &objsize, rest_of_line) == 3) {
55 		int ret = bpf_map_lookup_elem(map_fd, &seen, &r);
56 
57 		if (!ASSERT_OK(ret, "kmem_cache_lookup"))
58 			break;
59 
60 		ASSERT_STREQ(r.name, name, "kmem_cache_name");
61 		ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize");
62 
63 		seen++;
64 	}
65 
66 	ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq");
67 
68 	fclose(fp);
69 }
70 
71 static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
72 {
73 	LIBBPF_OPTS(bpf_test_run_opts, topts);
74 	int err, fd;
75 
76 	/* No need to attach it, just run it directly */
77 	fd = bpf_program__fd(skel->progs.open_coded_iter);
78 
79 	err = bpf_prog_test_run_opts(fd, &topts);
80 	if (!ASSERT_OK(err, "test_run_opts err"))
81 		return;
82 	if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
83 		return;
84 
85 	/* It should be same as we've seen from the explicit iterator */
86 	ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
87 }
88 
89 void test_kmem_cache_iter(void)
90 {
91 	struct kmem_cache_iter *skel = NULL;
92 	char buf[256];
93 	int iter_fd;
94 
95 	skel = kmem_cache_iter__open_and_load();
96 	if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
97 		return;
98 
99 	if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
100 		goto destroy;
101 
102 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
103 	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
104 		goto destroy;
105 
106 	memset(buf, 0, sizeof(buf));
107 	while (read(iter_fd, buf, sizeof(buf) > 0)) {
108 		/* Read out all contents */
109 		printf("%s", buf);
110 	}
111 
112 	/* Next reads should return 0 */
113 	ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
114 
115 	if (test__start_subtest("check_task_struct"))
116 		subtest_kmem_cache_iter_check_task_struct(skel);
117 	if (test__start_subtest("check_slabinfo"))
118 		subtest_kmem_cache_iter_check_slabinfo(skel);
119 	if (test__start_subtest("open_coded_iter"))
120 		subtest_kmem_cache_iter_open_coded(skel);
121 
122 	close(iter_fd);
123 
124 destroy:
125 	kmem_cache_iter__destroy(skel);
126 }
127