xref: /linux/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c (revision f17b474e36647c23801ef8fdaf2255ab66dd2973)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3 #include <test_progs.h>
4 #include <bpf/libbpf.h>
5 #include <bpf/btf.h>
6 #include <fcntl.h>
7 #include <sys/mman.h>
8 #include <unistd.h>
9 #include "cgroup_helpers.h"
10 #include "cgroup_iter_memcg.h"
11 #include "cgroup_iter_memcg.skel.h"
12 
13 static int read_stats(struct bpf_link *link)
14 {
15 	int fd, ret = 0;
16 	ssize_t bytes;
17 
18 	fd = bpf_iter_create(bpf_link__fd(link));
19 	if (!ASSERT_OK_FD(fd, "bpf_iter_create"))
20 		return 1;
21 
22 	/*
23 	 * Invoke iter program by reading from its fd. We're not expecting any
24 	 * data to be written by the bpf program so the result should be zero.
25 	 * Results will be read directly through the custom data section
26 	 * accessible through skel->data_query.memcg_query.
27 	 */
28 	bytes = read(fd, NULL, 0);
29 	if (!ASSERT_EQ(bytes, 0, "read fd"))
30 		ret = 1;
31 
32 	close(fd);
33 	return ret;
34 }
35 
36 static void test_anon(struct bpf_link *link, struct memcg_query *memcg_query)
37 {
38 	void *map;
39 	size_t len;
40 
41 	len = sysconf(_SC_PAGESIZE) * 1024;
42 
43 	/*
44 	 * Increase memcg anon usage by mapping and writing
45 	 * to a new anon region.
46 	 */
47 	map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
48 	if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
49 		return;
50 
51 	memset(map, 1, len);
52 
53 	if (!ASSERT_OK(read_stats(link), "read stats"))
54 		goto cleanup;
55 
56 	ASSERT_GT(memcg_query->nr_anon_mapped, 0, "final anon mapped val");
57 
58 cleanup:
59 	munmap(map, len);
60 }
61 
62 static void test_file(struct bpf_link *link, struct memcg_query *memcg_query)
63 {
64 	void *map;
65 	size_t len;
66 	char *path;
67 	int fd;
68 
69 	len = sysconf(_SC_PAGESIZE) * 1024;
70 	path = "/tmp/test_cgroup_iter_memcg";
71 
72 	/*
73 	 * Increase memcg file usage by creating and writing
74 	 * to a mapped file.
75 	 */
76 	fd = open(path, O_CREAT | O_RDWR, 0644);
77 	if (!ASSERT_OK_FD(fd, "open fd"))
78 		return;
79 	if (!ASSERT_OK(ftruncate(fd, len), "ftruncate"))
80 		goto cleanup_fd;
81 
82 	map = mmap(NULL, len, PROT_WRITE, MAP_SHARED, fd, 0);
83 	if (!ASSERT_NEQ(map, MAP_FAILED, "mmap file"))
84 		goto cleanup_fd;
85 
86 	memset(map, 1, len);
87 
88 	if (!ASSERT_OK(read_stats(link), "read stats"))
89 		goto cleanup_map;
90 
91 	ASSERT_GT(memcg_query->nr_file_pages, 0, "final file value");
92 	ASSERT_GT(memcg_query->nr_file_mapped, 0, "final file mapped value");
93 
94 cleanup_map:
95 	munmap(map, len);
96 cleanup_fd:
97 	close(fd);
98 	unlink(path);
99 }
100 
101 static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
102 {
103 	size_t len;
104 	int fd;
105 
106 	len = sysconf(_SC_PAGESIZE) * 1024;
107 
108 	/*
109 	 * Increase memcg shmem usage by creating and writing
110 	 * to a shmem object.
111 	 */
112 	fd = shm_open("/tmp_shmem", O_CREAT | O_RDWR, 0644);
113 	if (!ASSERT_OK_FD(fd, "shm_open"))
114 		return;
115 
116 	if (!ASSERT_OK(fallocate(fd, 0, 0, len), "fallocate"))
117 		goto cleanup;
118 
119 	if (!ASSERT_OK(read_stats(link), "read stats"))
120 		goto cleanup;
121 
122 	ASSERT_GT(memcg_query->nr_shmem, 0, "final shmem value");
123 
124 cleanup:
125 	close(fd);
126 	shm_unlink("/tmp_shmem");
127 }
128 
129 #define NR_PIPES 64
130 static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query)
131 {
132 	int fds[NR_PIPES][2], i;
133 
134 	/*
135 	 * Increase kmem value by creating pipes which will allocate some
136 	 * kernel buffers.
137 	 */
138 	for (i = 0; i < NR_PIPES; i++) {
139 		if (!ASSERT_OK(pipe(fds[i]), "pipe"))
140 			goto cleanup;
141 	}
142 
143 	if (!ASSERT_OK(read_stats(link), "read stats"))
144 		goto cleanup;
145 
146 	ASSERT_GT(memcg_query->memcg_kmem, 0, "kmem value");
147 
148 cleanup:
149 	for (i = i - 1; i >= 0; i--) {
150 		close(fds[i][0]);
151 		close(fds[i][1]);
152 	}
153 }
154 
155 static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
156 {
157 	void *map;
158 	size_t len;
159 
160 	len = sysconf(_SC_PAGESIZE) * 1024;
161 
162 	/* Create region to use for triggering a page fault. */
163 	map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
164 	if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
165 		return;
166 
167 	/* Trigger page fault. */
168 	memset(map, 1, len);
169 
170 	if (!ASSERT_OK(read_stats(link), "read stats"))
171 		goto cleanup;
172 
173 	ASSERT_GT(memcg_query->pgfault, 0, "final pgfault val");
174 
175 cleanup:
176 	munmap(map, len);
177 }
178 
179 void test_cgroup_iter_memcg(void)
180 {
181 	char *cgroup_rel_path = "/cgroup_iter_memcg_test";
182 	struct cgroup_iter_memcg *skel;
183 	struct bpf_link *link;
184 	int cgroup_fd;
185 
186 	cgroup_fd = cgroup_setup_and_join(cgroup_rel_path);
187 	if (!ASSERT_OK_FD(cgroup_fd, "cgroup_setup_and_join"))
188 		return;
189 
190 	skel = cgroup_iter_memcg__open_and_load();
191 	if (!ASSERT_OK_PTR(skel, "cgroup_iter_memcg__open_and_load"))
192 		goto cleanup_cgroup_fd;
193 
194 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
195 	union bpf_iter_link_info linfo = {
196 		.cgroup.cgroup_fd = cgroup_fd,
197 		.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY,
198 	};
199 	opts.link_info = &linfo;
200 	opts.link_info_len = sizeof(linfo);
201 
202 	link = bpf_program__attach_iter(skel->progs.cgroup_memcg_query, &opts);
203 	if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
204 		goto cleanup_skel;
205 
206 	if (test__start_subtest("cgroup_iter_memcg__anon"))
207 		test_anon(link, &skel->data_query->memcg_query);
208 	if (test__start_subtest("cgroup_iter_memcg__shmem"))
209 		test_shmem(link, &skel->data_query->memcg_query);
210 	if (test__start_subtest("cgroup_iter_memcg__file"))
211 		test_file(link, &skel->data_query->memcg_query);
212 	if (test__start_subtest("cgroup_iter_memcg__kmem"))
213 		test_kmem(link, &skel->data_query->memcg_query);
214 	if (test__start_subtest("cgroup_iter_memcg__pgfault"))
215 		test_pgfault(link, &skel->data_query->memcg_query);
216 
217 	bpf_link__destroy(link);
218 cleanup_skel:
219 	cgroup_iter_memcg__destroy(skel);
220 cleanup_cgroup_fd:
221 	close(cgroup_fd);
222 	cleanup_cgroup_environment();
223 }
224