xref: /linux/tools/testing/selftests/cgroup/test_zswap.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 
4 #include <linux/limits.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <signal.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <sys/sysinfo.h>
11 #include <string.h>
12 #include <sys/wait.h>
13 #include <sys/mman.h>
14 
15 #include "kselftest.h"
16 #include "cgroup_util.h"
17 
18 static int read_int(const char *path, size_t *value)
19 {
20 	FILE *file;
21 	int ret = 0;
22 
23 	file = fopen(path, "r");
24 	if (!file)
25 		return -1;
26 	if (fscanf(file, "%ld", value) != 1)
27 		ret = -1;
28 	fclose(file);
29 	return ret;
30 }
31 
32 static int set_min_free_kb(size_t value)
33 {
34 	FILE *file;
35 	int ret;
36 
37 	file = fopen("/proc/sys/vm/min_free_kbytes", "w");
38 	if (!file)
39 		return -1;
40 	ret = fprintf(file, "%ld\n", value);
41 	fclose(file);
42 	return ret;
43 }
44 
45 static int read_min_free_kb(size_t *value)
46 {
47 	return read_int("/proc/sys/vm/min_free_kbytes", value);
48 }
49 
50 static int get_zswap_stored_pages(size_t *value)
51 {
52 	return read_int("/sys/kernel/debug/zswap/stored_pages", value);
53 }
54 
55 static long get_cg_wb_count(const char *cg)
56 {
57 	return cg_read_key_long(cg, "memory.stat", "zswpwb");
58 }
59 
60 static long get_zswpout(const char *cgroup)
61 {
62 	return cg_read_key_long(cgroup, "memory.stat", "zswpout ");
63 }
64 
65 static int allocate_and_read_bytes(const char *cgroup, void *arg)
66 {
67 	size_t size = (size_t)arg;
68 	char *mem = (char *)malloc(size);
69 	int ret = 0;
70 
71 	if (!mem)
72 		return -1;
73 	for (int i = 0; i < size; i += 4095)
74 		mem[i] = 'a';
75 
76 	/* Go through the allocated memory to (z)swap in and out pages */
77 	for (int i = 0; i < size; i += 4095) {
78 		if (mem[i] != 'a')
79 			ret = -1;
80 	}
81 
82 	free(mem);
83 	return ret;
84 }
85 
86 static int allocate_bytes(const char *cgroup, void *arg)
87 {
88 	size_t size = (size_t)arg;
89 	char *mem = (char *)malloc(size);
90 
91 	if (!mem)
92 		return -1;
93 	for (int i = 0; i < size; i += 4095)
94 		mem[i] = 'a';
95 	free(mem);
96 	return 0;
97 }
98 
99 static char *setup_test_group_1M(const char *root, const char *name)
100 {
101 	char *group_name = cg_name(root, name);
102 
103 	if (!group_name)
104 		return NULL;
105 	if (cg_create(group_name))
106 		goto fail;
107 	if (cg_write(group_name, "memory.max", "1M")) {
108 		cg_destroy(group_name);
109 		goto fail;
110 	}
111 	return group_name;
112 fail:
113 	free(group_name);
114 	return NULL;
115 }
116 
117 /*
118  * Sanity test to check that pages are written into zswap.
119  */
120 static int test_zswap_usage(const char *root)
121 {
122 	long zswpout_before, zswpout_after;
123 	int ret = KSFT_FAIL;
124 	char *test_group;
125 
126 	test_group = cg_name(root, "no_shrink_test");
127 	if (!test_group)
128 		goto out;
129 	if (cg_create(test_group))
130 		goto out;
131 	if (cg_write(test_group, "memory.max", "1M"))
132 		goto out;
133 
134 	zswpout_before = get_zswpout(test_group);
135 	if (zswpout_before < 0) {
136 		ksft_print_msg("Failed to get zswpout\n");
137 		goto out;
138 	}
139 
140 	/* Allocate more than memory.max to push memory into zswap */
141 	if (cg_run(test_group, allocate_bytes, (void *)MB(4)))
142 		goto out;
143 
144 	/* Verify that pages come into zswap */
145 	zswpout_after = get_zswpout(test_group);
146 	if (zswpout_after <= zswpout_before) {
147 		ksft_print_msg("zswpout does not increase after test program\n");
148 		goto out;
149 	}
150 	ret = KSFT_PASS;
151 
152 out:
153 	cg_destroy(test_group);
154 	free(test_group);
155 	return ret;
156 }
157 
158 /*
159  * Check that when memory.zswap.max = 0, no pages can go to the zswap pool for
160  * the cgroup.
161  */
162 static int test_swapin_nozswap(const char *root)
163 {
164 	int ret = KSFT_FAIL;
165 	char *test_group;
166 	long swap_peak, zswpout;
167 
168 	test_group = cg_name(root, "no_zswap_test");
169 	if (!test_group)
170 		goto out;
171 	if (cg_create(test_group))
172 		goto out;
173 	if (cg_write(test_group, "memory.max", "8M"))
174 		goto out;
175 	if (cg_write(test_group, "memory.zswap.max", "0"))
176 		goto out;
177 
178 	/* Allocate and read more than memory.max to trigger swapin */
179 	if (cg_run(test_group, allocate_and_read_bytes, (void *)MB(32)))
180 		goto out;
181 
182 	/* Verify that pages are swapped out, but no zswap happened */
183 	swap_peak = cg_read_long(test_group, "memory.swap.peak");
184 	if (swap_peak < 0) {
185 		ksft_print_msg("failed to get cgroup's swap_peak\n");
186 		goto out;
187 	}
188 
189 	if (swap_peak < MB(24)) {
190 		ksft_print_msg("at least 24MB of memory should be swapped out\n");
191 		goto out;
192 	}
193 
194 	zswpout = get_zswpout(test_group);
195 	if (zswpout < 0) {
196 		ksft_print_msg("failed to get zswpout\n");
197 		goto out;
198 	}
199 
200 	if (zswpout > 0) {
201 		ksft_print_msg("zswapout > 0 when memory.zswap.max = 0\n");
202 		goto out;
203 	}
204 
205 	ret = KSFT_PASS;
206 
207 out:
208 	cg_destroy(test_group);
209 	free(test_group);
210 	return ret;
211 }
212 
213 /* Simple test to verify the (z)swapin code paths */
214 static int test_zswapin(const char *root)
215 {
216 	int ret = KSFT_FAIL;
217 	char *test_group;
218 	long zswpin;
219 
220 	test_group = cg_name(root, "zswapin_test");
221 	if (!test_group)
222 		goto out;
223 	if (cg_create(test_group))
224 		goto out;
225 	if (cg_write(test_group, "memory.max", "8M"))
226 		goto out;
227 	if (cg_write(test_group, "memory.zswap.max", "max"))
228 		goto out;
229 
230 	/* Allocate and read more than memory.max to trigger (z)swap in */
231 	if (cg_run(test_group, allocate_and_read_bytes, (void *)MB(32)))
232 		goto out;
233 
234 	zswpin = cg_read_key_long(test_group, "memory.stat", "zswpin ");
235 	if (zswpin < 0) {
236 		ksft_print_msg("failed to get zswpin\n");
237 		goto out;
238 	}
239 
240 	if (zswpin < MB(24) / PAGE_SIZE) {
241 		ksft_print_msg("at least 24MB should be brought back from zswap\n");
242 		goto out;
243 	}
244 
245 	ret = KSFT_PASS;
246 
247 out:
248 	cg_destroy(test_group);
249 	free(test_group);
250 	return ret;
251 }
252 
253 /*
254  * Attempt writeback with the following steps:
255  * 1. Allocate memory.
256  * 2. Reclaim memory equal to the amount that was allocated in step 1.
257       This will move it into zswap.
258  * 3. Save current zswap usage.
259  * 4. Move the memory allocated in step 1 back in from zswap.
260  * 5. Set zswap.max to half the amount that was recorded in step 3.
261  * 6. Attempt to reclaim memory equal to the amount that was allocated,
262       this will either trigger writeback if it's enabled, or reclamation
263       will fail if writeback is disabled as there isn't enough zswap space.
264  */
265 static int attempt_writeback(const char *cgroup, void *arg)
266 {
267 	long pagesize = sysconf(_SC_PAGESIZE);
268 	size_t memsize = MB(4);
269 	char buf[pagesize];
270 	long zswap_usage;
271 	bool wb_enabled = *(bool *) arg;
272 	int ret = -1;
273 	char *mem;
274 
275 	mem = (char *)malloc(memsize);
276 	if (!mem)
277 		return ret;
278 
279 	/*
280 	 * Fill half of each page with increasing data, and keep other
281 	 * half empty, this will result in data that is still compressible
282 	 * and ends up in zswap, with material zswap usage.
283 	 */
284 	for (int i = 0; i < pagesize; i++)
285 		buf[i] = i < pagesize/2 ? (char) i : 0;
286 
287 	for (int i = 0; i < memsize; i += pagesize)
288 		memcpy(&mem[i], buf, pagesize);
289 
290 	/* Try and reclaim allocated memory */
291 	if (cg_write_numeric(cgroup, "memory.reclaim", memsize)) {
292 		ksft_print_msg("Failed to reclaim all of the requested memory\n");
293 		goto out;
294 	}
295 
296 	zswap_usage = cg_read_long(cgroup, "memory.zswap.current");
297 
298 	/* zswpin */
299 	for (int i = 0; i < memsize; i += pagesize) {
300 		if (memcmp(&mem[i], buf, pagesize)) {
301 			ksft_print_msg("invalid memory\n");
302 			goto out;
303 		}
304 	}
305 
306 	if (cg_write_numeric(cgroup, "memory.zswap.max", zswap_usage/2))
307 		goto out;
308 
309 	/*
310 	 * If writeback is enabled, trying to reclaim memory now will trigger a
311 	 * writeback as zswap.max is half of what was needed when reclaim ran the first time.
312 	 * If writeback is disabled, memory reclaim will fail as zswap is limited and
313 	 * it can't writeback to swap.
314 	 */
315 	ret = cg_write_numeric(cgroup, "memory.reclaim", memsize);
316 	if (!wb_enabled)
317 		ret = (ret == -EAGAIN) ? 0 : -1;
318 
319 out:
320 	free(mem);
321 	return ret;
322 }
323 
324 static int test_zswap_writeback_one(const char *cgroup, bool wb)
325 {
326 	long zswpwb_before, zswpwb_after;
327 
328 	zswpwb_before = get_cg_wb_count(cgroup);
329 	if (zswpwb_before != 0) {
330 		ksft_print_msg("zswpwb_before = %ld instead of 0\n", zswpwb_before);
331 		return -1;
332 	}
333 
334 	if (cg_run(cgroup, attempt_writeback, (void *) &wb))
335 		return -1;
336 
337 	/* Verify that zswap writeback occurred only if writeback was enabled */
338 	zswpwb_after = get_cg_wb_count(cgroup);
339 	if (zswpwb_after < 0)
340 		return -1;
341 
342 	if (wb != !!zswpwb_after) {
343 		ksft_print_msg("zswpwb_after is %ld while wb is %s\n",
344 				zswpwb_after, wb ? "enabled" : "disabled");
345 		return -1;
346 	}
347 
348 	return 0;
349 }
350 
351 /* Test to verify the zswap writeback path */
352 static int test_zswap_writeback(const char *root, bool wb)
353 {
354 	int ret = KSFT_FAIL;
355 	char *test_group, *test_group_child = NULL;
356 
357 	if (cg_read_strcmp(root, "memory.zswap.writeback", "1"))
358 		return KSFT_SKIP;
359 
360 	test_group = cg_name(root, "zswap_writeback_test");
361 	if (!test_group)
362 		goto out;
363 	if (cg_create(test_group))
364 		goto out;
365 	if (cg_write(test_group, "memory.zswap.writeback", wb ? "1" : "0"))
366 		goto out;
367 
368 	if (test_zswap_writeback_one(test_group, wb))
369 		goto out;
370 
371 	/* Reset memory.zswap.max to max (modified by attempt_writeback), and
372 	 * set up child cgroup, whose memory.zswap.writeback is hardcoded to 1.
373 	 * Thus, the parent's setting shall be what's in effect. */
374 	if (cg_write(test_group, "memory.zswap.max", "max"))
375 		goto out;
376 	if (cg_write(test_group, "cgroup.subtree_control", "+memory"))
377 		goto out;
378 
379 	test_group_child = cg_name(test_group, "zswap_writeback_test_child");
380 	if (!test_group_child)
381 		goto out;
382 	if (cg_create(test_group_child))
383 		goto out;
384 	if (cg_write(test_group_child, "memory.zswap.writeback", "1"))
385 		goto out;
386 
387 	if (test_zswap_writeback_one(test_group_child, wb))
388 		goto out;
389 
390 	ret = KSFT_PASS;
391 
392 out:
393 	if (test_group_child) {
394 		cg_destroy(test_group_child);
395 		free(test_group_child);
396 	}
397 	cg_destroy(test_group);
398 	free(test_group);
399 	return ret;
400 }
401 
402 static int test_zswap_writeback_enabled(const char *root)
403 {
404 	return test_zswap_writeback(root, true);
405 }
406 
407 static int test_zswap_writeback_disabled(const char *root)
408 {
409 	return test_zswap_writeback(root, false);
410 }
411 
412 /*
413  * When trying to store a memcg page in zswap, if the memcg hits its memory
414  * limit in zswap, writeback should affect only the zswapped pages of that
415  * memcg.
416  */
417 static int test_no_invasive_cgroup_shrink(const char *root)
418 {
419 	int ret = KSFT_FAIL;
420 	size_t control_allocation_size = MB(10);
421 	char *control_allocation = NULL, *wb_group = NULL, *control_group = NULL;
422 
423 	wb_group = setup_test_group_1M(root, "per_memcg_wb_test1");
424 	if (!wb_group)
425 		return KSFT_FAIL;
426 	if (cg_write(wb_group, "memory.zswap.max", "10K"))
427 		goto out;
428 	control_group = setup_test_group_1M(root, "per_memcg_wb_test2");
429 	if (!control_group)
430 		goto out;
431 
432 	/* Push some test_group2 memory into zswap */
433 	if (cg_enter_current(control_group))
434 		goto out;
435 	control_allocation = malloc(control_allocation_size);
436 	for (int i = 0; i < control_allocation_size; i += 4095)
437 		control_allocation[i] = 'a';
438 	if (cg_read_key_long(control_group, "memory.stat", "zswapped") < 1)
439 		goto out;
440 
441 	/* Allocate 10x memory.max to push wb_group memory into zswap and trigger wb */
442 	if (cg_run(wb_group, allocate_bytes, (void *)MB(10)))
443 		goto out;
444 
445 	/* Verify that only zswapped memory from gwb_group has been written back */
446 	if (get_cg_wb_count(wb_group) > 0 && get_cg_wb_count(control_group) == 0)
447 		ret = KSFT_PASS;
448 out:
449 	cg_enter_current(root);
450 	if (control_group) {
451 		cg_destroy(control_group);
452 		free(control_group);
453 	}
454 	cg_destroy(wb_group);
455 	free(wb_group);
456 	if (control_allocation)
457 		free(control_allocation);
458 	return ret;
459 }
460 
461 struct no_kmem_bypass_child_args {
462 	size_t target_alloc_bytes;
463 	size_t child_allocated;
464 };
465 
466 static int no_kmem_bypass_child(const char *cgroup, void *arg)
467 {
468 	struct no_kmem_bypass_child_args *values = arg;
469 	void *allocation;
470 
471 	allocation = malloc(values->target_alloc_bytes);
472 	if (!allocation) {
473 		values->child_allocated = true;
474 		return -1;
475 	}
476 	for (long i = 0; i < values->target_alloc_bytes; i += 4095)
477 		((char *)allocation)[i] = 'a';
478 	values->child_allocated = true;
479 	pause();
480 	free(allocation);
481 	return 0;
482 }
483 
484 /*
485  * When pages owned by a memcg are pushed to zswap by kswapd, they should be
486  * charged to that cgroup. This wasn't the case before commit
487  * cd08d80ecdac("mm: correctly charge compressed memory to its memcg").
488  *
489  * The test first allocates memory in a memcg, then raises min_free_kbytes to
490  * a very high value so that the allocation falls below low wm, then makes
491  * another allocation to trigger kswapd that should push the memcg-owned pages
492  * to zswap and verifies that the zswap pages are correctly charged.
493  *
494  * To be run on a VM with at most 4G of memory.
495  */
496 static int test_no_kmem_bypass(const char *root)
497 {
498 	size_t min_free_kb_high, min_free_kb_low, min_free_kb_original;
499 	struct no_kmem_bypass_child_args *values;
500 	size_t trigger_allocation_size;
501 	int wait_child_iteration = 0;
502 	long stored_pages_threshold;
503 	struct sysinfo sys_info;
504 	int ret = KSFT_FAIL;
505 	int child_status;
506 	char *test_group = NULL;
507 	pid_t child_pid;
508 
509 	/* Read sys info and compute test values accordingly */
510 	if (sysinfo(&sys_info) != 0)
511 		return KSFT_FAIL;
512 	if (sys_info.totalram > 5000000000)
513 		return KSFT_SKIP;
514 	values = mmap(0, sizeof(struct no_kmem_bypass_child_args), PROT_READ |
515 			PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
516 	if (values == MAP_FAILED)
517 		return KSFT_FAIL;
518 	if (read_min_free_kb(&min_free_kb_original))
519 		return KSFT_FAIL;
520 	min_free_kb_high = sys_info.totalram / 2000;
521 	min_free_kb_low = sys_info.totalram / 500000;
522 	values->target_alloc_bytes = (sys_info.totalram - min_free_kb_high * 1000) +
523 		sys_info.totalram * 5 / 100;
524 	stored_pages_threshold = sys_info.totalram / 5 / 4096;
525 	trigger_allocation_size = sys_info.totalram / 20;
526 
527 	/* Set up test memcg */
528 	test_group = cg_name(root, "kmem_bypass_test");
529 	if (!test_group)
530 		goto out;
531 
532 	/* Spawn memcg child and wait for it to allocate */
533 	set_min_free_kb(min_free_kb_low);
534 	if (cg_create(test_group))
535 		goto out;
536 	values->child_allocated = false;
537 	child_pid = cg_run_nowait(test_group, no_kmem_bypass_child, values);
538 	if (child_pid < 0)
539 		goto out;
540 	while (!values->child_allocated && wait_child_iteration++ < 10000)
541 		usleep(1000);
542 
543 	/* Try to wakeup kswapd and let it push child memory to zswap */
544 	set_min_free_kb(min_free_kb_high);
545 	for (int i = 0; i < 20; i++) {
546 		size_t stored_pages;
547 		char *trigger_allocation = malloc(trigger_allocation_size);
548 
549 		if (!trigger_allocation)
550 			break;
551 		for (int i = 0; i < trigger_allocation_size; i += 4095)
552 			trigger_allocation[i] = 'b';
553 		usleep(100000);
554 		free(trigger_allocation);
555 		if (get_zswap_stored_pages(&stored_pages))
556 			break;
557 		if (stored_pages < 0)
558 			break;
559 		/* If memory was pushed to zswap, verify it belongs to memcg */
560 		if (stored_pages > stored_pages_threshold) {
561 			int zswapped = cg_read_key_long(test_group, "memory.stat", "zswapped ");
562 			int delta = stored_pages * 4096 - zswapped;
563 			int result_ok = delta < stored_pages * 4096 / 4;
564 
565 			ret = result_ok ? KSFT_PASS : KSFT_FAIL;
566 			break;
567 		}
568 	}
569 
570 	kill(child_pid, SIGTERM);
571 	waitpid(child_pid, &child_status, 0);
572 out:
573 	set_min_free_kb(min_free_kb_original);
574 	cg_destroy(test_group);
575 	free(test_group);
576 	return ret;
577 }
578 
579 struct incomp_child_args {
580 	size_t size;
581 	int pipefd[2];
582 	int madvise_ret;
583 	int madvise_errno;
584 };
585 
586 static int allocate_random_and_wait(const char *cgroup, void *arg)
587 {
588 	struct incomp_child_args *values = arg;
589 	size_t size = values->size;
590 	char *mem;
591 	int fd;
592 	ssize_t n;
593 
594 	close(values->pipefd[0]);
595 
596 	mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
597 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
598 	if (mem == MAP_FAILED)
599 		return -1;
600 
601 	/* Fill with random data from /dev/urandom - incompressible */
602 	fd = open("/dev/urandom", O_RDONLY);
603 	if (fd < 0) {
604 		munmap(mem, size);
605 		return -1;
606 	}
607 
608 	for (size_t i = 0; i < size; ) {
609 		n = read(fd, mem + i, size - i);
610 		if (n <= 0)
611 			break;
612 		i += n;
613 	}
614 	close(fd);
615 
616 	/* Touch all pages to ensure they're faulted in */
617 	for (size_t i = 0; i < size; i += PAGE_SIZE)
618 		mem[i] = mem[i];
619 
620 	/* Use MADV_PAGEOUT to push pages into zswap */
621 	values->madvise_ret = madvise(mem, size, MADV_PAGEOUT);
622 	values->madvise_errno = errno;
623 
624 	/* Notify parent that allocation and pageout are done */
625 	write(values->pipefd[1], "x", 1);
626 	close(values->pipefd[1]);
627 
628 	/* Keep memory alive for parent to check stats */
629 	pause();
630 	munmap(mem, size);
631 	return 0;
632 }
633 
634 static long get_zswap_incomp(const char *cgroup)
635 {
636 	return cg_read_key_long(cgroup, "memory.stat", "zswap_incomp ");
637 }
638 
639 /*
640  * Test that incompressible pages (random data) are tracked by zswap_incomp.
641  *
642  * The child process allocates random data within memory.max, then uses
643  * MADV_PAGEOUT to push pages into zswap. The parent waits on a pipe for
644  * the child to finish, then checks the zswap_incomp stat before the child
645  * exits (zswap_incomp is a gauge that decreases on free).
646  */
647 static int test_zswap_incompressible(const char *root)
648 {
649 	int ret = KSFT_FAIL;
650 	struct incomp_child_args *values;
651 	char *test_group;
652 	long zswap_incomp;
653 	pid_t child_pid;
654 	int child_status;
655 	char buf;
656 
657 	values = mmap(0, sizeof(struct incomp_child_args), PROT_READ |
658 			PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
659 	if (values == MAP_FAILED)
660 		return KSFT_FAIL;
661 
662 	if (pipe(values->pipefd)) {
663 		munmap(values, sizeof(struct incomp_child_args));
664 		return KSFT_FAIL;
665 	}
666 
667 	test_group = cg_name(root, "zswap_incompressible_test");
668 	if (!test_group)
669 		goto out;
670 	if (cg_create(test_group))
671 		goto out;
672 	if (cg_write(test_group, "memory.max", "32M"))
673 		goto out;
674 
675 	values->size = MB(4);
676 	child_pid = cg_run_nowait(test_group, allocate_random_and_wait, values);
677 	if (child_pid < 0)
678 		goto out;
679 
680 	close(values->pipefd[1]);
681 
682 	/* Wait for child to finish allocating and pageout */
683 	read(values->pipefd[0], &buf, 1);
684 	close(values->pipefd[0]);
685 
686 	zswap_incomp = get_zswap_incomp(test_group);
687 	if (zswap_incomp <= 0) {
688 		long zswpout = get_zswpout(test_group);
689 		long zswapped = cg_read_key_long(test_group, "memory.stat", "zswapped ");
690 		long zswap_b = cg_read_key_long(test_group, "memory.stat", "zswap ");
691 
692 		ksft_print_msg("zswap_incomp not increased: %ld\n", zswap_incomp);
693 		ksft_print_msg("debug: zswpout=%ld zswapped=%ld zswap_b=%ld\n",
694 			       zswpout, zswapped, zswap_b);
695 		ksft_print_msg("debug: madvise ret=%d errno=%d\n",
696 			       values->madvise_ret, values->madvise_errno);
697 		goto out_kill;
698 	}
699 
700 	ret = KSFT_PASS;
701 
702 out_kill:
703 	kill(child_pid, SIGTERM);
704 	waitpid(child_pid, &child_status, 0);
705 out:
706 	cg_destroy(test_group);
707 	free(test_group);
708 	munmap(values, sizeof(struct incomp_child_args));
709 	return ret;
710 }
711 
712 #define T(x) { x, #x }
713 struct zswap_test {
714 	int (*fn)(const char *root);
715 	const char *name;
716 } tests[] = {
717 	T(test_zswap_usage),
718 	T(test_swapin_nozswap),
719 	T(test_zswapin),
720 	T(test_zswap_writeback_enabled),
721 	T(test_zswap_writeback_disabled),
722 	T(test_no_kmem_bypass),
723 	T(test_no_invasive_cgroup_shrink),
724 	T(test_zswap_incompressible),
725 };
726 #undef T
727 
728 static bool zswap_configured(void)
729 {
730 	return access("/sys/module/zswap", F_OK) == 0;
731 }
732 
733 int main(int argc, char **argv)
734 {
735 	char root[PATH_MAX];
736 	int i;
737 
738 	ksft_print_header();
739 	ksft_set_plan(ARRAY_SIZE(tests));
740 	if (cg_find_unified_root(root, sizeof(root), NULL))
741 		ksft_exit_skip("cgroup v2 isn't mounted\n");
742 
743 	if (!zswap_configured())
744 		ksft_exit_skip("zswap isn't configured\n");
745 
746 	/*
747 	 * Check that memory controller is available:
748 	 * memory is listed in cgroup.controllers
749 	 */
750 	if (cg_read_strstr(root, "cgroup.controllers", "memory"))
751 		ksft_exit_skip("memory controller isn't available\n");
752 
753 	if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
754 		if (cg_write(root, "cgroup.subtree_control", "+memory"))
755 			ksft_exit_skip("Failed to set memory controller\n");
756 
757 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
758 		switch (tests[i].fn(root)) {
759 		case KSFT_PASS:
760 			ksft_test_result_pass("%s\n", tests[i].name);
761 			break;
762 		case KSFT_SKIP:
763 			ksft_test_result_skip("%s\n", tests[i].name);
764 			break;
765 		default:
766 			ksft_test_result_fail("%s\n", tests[i].name);
767 			break;
768 		}
769 	}
770 
771 	ksft_finished();
772 }
773