xref: /linux/tools/testing/selftests/mm/prctl_thp_disable.c (revision 681f45deca1c7f517299d032783f655e5f2c36b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Basic tests for PR_GET/SET_THP_DISABLE prctl calls
4  *
5  * Author(s): Usama Arif <usamaarif642@gmail.com>
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <sys/mman.h>
12 #include <linux/mman.h>
13 #include <sys/prctl.h>
14 #include <sys/wait.h>
15 
16 #include "../kselftest_harness.h"
17 #include "thp_settings.h"
18 #include "vm_util.h"
19 
20 enum thp_collapse_type {
21 	THP_COLLAPSE_NONE,
22 	THP_COLLAPSE_MADV_NOHUGEPAGE,
23 	THP_COLLAPSE_MADV_HUGEPAGE,	/* MADV_HUGEPAGE before access */
24 	THP_COLLAPSE_MADV_COLLAPSE,	/* MADV_COLLAPSE after access */
25 };
26 
27 /*
28  * Function to mmap a buffer, fault it in, madvise it appropriately (before
29  * page fault for MADV_HUGE, and after for MADV_COLLAPSE), and check if the
30  * mmap region is huge.
31  * Returns:
32  * 0 if test doesn't give hugepage
33  * 1 if test gives a hugepage
34  * -errno if mmap fails
35  */
36 static int test_mmap_thp(enum thp_collapse_type madvise_buf, size_t pmdsize)
37 {
38 	char *mem, *mmap_mem;
39 	size_t mmap_size;
40 	int ret;
41 
42 	/* For alignment purposes, we need twice the THP size. */
43 	mmap_size = 2 * pmdsize;
44 	mmap_mem = (char *)mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
45 				    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
46 	if (mmap_mem == MAP_FAILED)
47 		return -errno;
48 
49 	/* We need a THP-aligned memory area. */
50 	mem = (char *)(((uintptr_t)mmap_mem + pmdsize) & ~(pmdsize - 1));
51 
52 	if (madvise_buf == THP_COLLAPSE_MADV_HUGEPAGE)
53 		madvise(mem, pmdsize, MADV_HUGEPAGE);
54 	else if (madvise_buf == THP_COLLAPSE_MADV_NOHUGEPAGE)
55 		madvise(mem, pmdsize, MADV_NOHUGEPAGE);
56 
57 	/* Ensure memory is allocated */
58 	memset(mem, 1, pmdsize);
59 
60 	if (madvise_buf == THP_COLLAPSE_MADV_COLLAPSE)
61 		madvise(mem, pmdsize, MADV_COLLAPSE);
62 
63 	/* HACK: make sure we have a separate VMA that we can check reliably. */
64 	mprotect(mem, pmdsize, PROT_READ);
65 
66 	ret = check_huge_anon(mem, 1, pmdsize);
67 	munmap(mmap_mem, mmap_size);
68 	return ret;
69 }
70 
71 static void prctl_thp_disable_completely_test(struct __test_metadata *const _metadata,
72 					      size_t pmdsize,
73 					      enum thp_enabled thp_policy)
74 {
75 	ASSERT_EQ(prctl(PR_GET_THP_DISABLE, NULL, NULL, NULL, NULL), 1);
76 
77 	/* tests after prctl overrides global policy */
78 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize), 0);
79 
80 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
81 
82 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize), 0);
83 
84 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 0);
85 
86 	/* Reset to global policy */
87 	ASSERT_EQ(prctl(PR_SET_THP_DISABLE, 0, NULL, NULL, NULL), 0);
88 
89 	/* tests after prctl is cleared, and only global policy is effective */
90 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize),
91 		  thp_policy == THP_ALWAYS ? 1 : 0);
92 
93 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
94 
95 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize),
96 		  thp_policy == THP_NEVER ? 0 : 1);
97 
98 	ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 1);
99 }
100 
101 FIXTURE(prctl_thp_disable_completely)
102 {
103 	struct thp_settings settings;
104 	size_t pmdsize;
105 };
106 
107 FIXTURE_VARIANT(prctl_thp_disable_completely)
108 {
109 	enum thp_enabled thp_policy;
110 };
111 
112 FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, never)
113 {
114 	.thp_policy = THP_NEVER,
115 };
116 
117 FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, madvise)
118 {
119 	.thp_policy = THP_MADVISE,
120 };
121 
122 FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, always)
123 {
124 	.thp_policy = THP_ALWAYS,
125 };
126 
127 FIXTURE_SETUP(prctl_thp_disable_completely)
128 {
129 	if (!thp_available())
130 		SKIP(return, "Transparent Hugepages not available\n");
131 
132 	self->pmdsize = read_pmd_pagesize();
133 	if (!self->pmdsize)
134 		SKIP(return, "Unable to read PMD size\n");
135 
136 	if (prctl(PR_SET_THP_DISABLE, 1, NULL, NULL, NULL))
137 		SKIP(return, "Unable to disable THPs completely for the process\n");
138 
139 	thp_save_settings();
140 	thp_read_settings(&self->settings);
141 	self->settings.thp_enabled = variant->thp_policy;
142 	self->settings.hugepages[sz2ord(self->pmdsize, getpagesize())].enabled = THP_INHERIT;
143 	thp_write_settings(&self->settings);
144 }
145 
146 FIXTURE_TEARDOWN(prctl_thp_disable_completely)
147 {
148 	thp_restore_settings();
149 }
150 
151 TEST_F(prctl_thp_disable_completely, nofork)
152 {
153 	prctl_thp_disable_completely_test(_metadata, self->pmdsize, variant->thp_policy);
154 }
155 
156 TEST_F(prctl_thp_disable_completely, fork)
157 {
158 	int ret = 0;
159 	pid_t pid;
160 
161 	/* Make sure prctl changes are carried across fork */
162 	pid = fork();
163 	ASSERT_GE(pid, 0);
164 
165 	if (!pid) {
166 		prctl_thp_disable_completely_test(_metadata, self->pmdsize, variant->thp_policy);
167 		return;
168 	}
169 
170 	wait(&ret);
171 	if (WIFEXITED(ret))
172 		ret = WEXITSTATUS(ret);
173 	else
174 		ret = -EINVAL;
175 	ASSERT_EQ(ret, 0);
176 }
177 
178 TEST_HARNESS_MAIN
179