xref: /linux/tools/testing/selftests/mm/migration.c (revision de008c9ba5684f14e83bcf86cd45fb0e4e6c4d82)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * The main purpose of the tests here is to exercise the migration entry code
4  * paths in the kernel.
5  */
6 
7 #include "kselftest_harness.h"
8 #include "thp_settings.h"
9 
10 #include <strings.h>
11 #include <pthread.h>
12 #include <numa.h>
13 #include <numaif.h>
14 #include <sys/mman.h>
15 #include <sys/prctl.h>
16 #include <sys/types.h>
17 #include <signal.h>
18 #include <time.h>
19 #include "vm_util.h"
20 
21 #define TWOMEG		(2<<20)
22 #define RUNTIME		(20)
23 #define MAX_RETRIES	100
24 #define ALIGN(x, a)	(((x) + (a - 1)) & (~((a) - 1)))
25 
26 FIXTURE(migration)
27 {
28 	pthread_t *threads;
29 	pid_t *pids;
30 	int nthreads;
31 	int n1;
32 	int n2;
33 };
34 
35 FIXTURE_SETUP(migration)
36 {
37 	int n;
38 
39 	if (numa_available() < 0)
40 		SKIP(return, "NUMA not available");
41 	self->nthreads = numa_num_task_cpus() - 1;
42 	self->n1 = -1;
43 	self->n2 = -1;
44 
45 	for (n = 0; n < numa_max_possible_node(); n++)
46 		if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
47 			if (self->n1 == -1) {
48 				self->n1 = n;
49 			} else {
50 				self->n2 = n;
51 				break;
52 			}
53 		}
54 
55 	self->threads = malloc(self->nthreads * sizeof(*self->threads));
56 	ASSERT_NE(self->threads, NULL);
57 	self->pids = malloc(self->nthreads * sizeof(*self->pids));
58 	ASSERT_NE(self->pids, NULL);
59 };
60 
61 FIXTURE_TEARDOWN(migration)
62 {
63 	free(self->threads);
64 	free(self->pids);
65 }
66 
67 int migrate(uint64_t *ptr, int n1, int n2)
68 {
69 	int ret, tmp;
70 	int status = 0;
71 	struct timespec ts1, ts2;
72 	int failures = 0;
73 
74 	if (clock_gettime(CLOCK_MONOTONIC, &ts1))
75 		return -1;
76 
77 	while (1) {
78 		if (clock_gettime(CLOCK_MONOTONIC, &ts2))
79 			return -1;
80 
81 		if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
82 			return 0;
83 
84 		ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
85 				MPOL_MF_MOVE_ALL);
86 		if (ret) {
87 			if (ret > 0) {
88 				/* Migration is best effort; try again */
89 				if (++failures < MAX_RETRIES)
90 					continue;
91 				printf("Didn't migrate %d pages\n", ret);
92 			}
93 			else
94 				perror("Couldn't migrate pages");
95 			return -2;
96 		}
97 		failures = 0;
98 		tmp = n2;
99 		n2 = n1;
100 		n1 = tmp;
101 	}
102 
103 	return 0;
104 }
105 
106 void *access_mem(void *ptr)
107 {
108 	while (1) {
109 		pthread_testcancel();
110 		/* Force a read from the memory pointed to by ptr. This ensures
111 		 * the memory access actually happens and prevents the compiler
112 		 * from optimizing away this entire loop.
113 		 */
114 		FORCE_READ(*(uint64_t *)ptr);
115 	}
116 
117 	return NULL;
118 }
119 
120 /*
121  * Basic migration entry testing. One thread will move pages back and forth
122  * between nodes whilst other threads try and access them triggering the
123  * migration entry wait paths in the kernel.
124  */
125 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
126 {
127 	uint64_t *ptr;
128 	int i;
129 
130 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
131 		SKIP(return, "Not enough threads or NUMA nodes available");
132 
133 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
134 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
135 	ASSERT_NE(ptr, MAP_FAILED);
136 
137 	memset(ptr, 0xde, TWOMEG);
138 	for (i = 0; i < self->nthreads - 1; i++)
139 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
140 			perror("Couldn't create thread");
141 
142 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
143 	for (i = 0; i < self->nthreads - 1; i++)
144 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
145 }
146 
147 /*
148  * Same as the previous test but with shared memory.
149  */
150 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
151 {
152 	pid_t pid;
153 	uint64_t *ptr;
154 	int i;
155 
156 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
157 		SKIP(return, "Not enough threads or NUMA nodes available");
158 
159 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
160 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
161 	ASSERT_NE(ptr, MAP_FAILED);
162 
163 	memset(ptr, 0xde, TWOMEG);
164 	for (i = 0; i < self->nthreads - 1; i++) {
165 		pid = fork();
166 		if (!pid) {
167 			prctl(PR_SET_PDEATHSIG, SIGHUP);
168 			/* Parent may have died before prctl so check now. */
169 			if (getppid() == 1)
170 				kill(getpid(), SIGHUP);
171 			access_mem(ptr);
172 		} else {
173 			self->pids[i] = pid;
174 		}
175 	}
176 
177 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
178 	for (i = 0; i < self->nthreads - 1; i++)
179 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
180 }
181 
182 /*
183  * Tests the pmd migration entry paths.
184  */
185 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
186 {
187 	uint64_t *ptr;
188 	int i;
189 
190 	if (!thp_is_enabled())
191 		SKIP(return, "Transparent Hugepages not available");
192 
193 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
194 		SKIP(return, "Not enough threads or NUMA nodes available");
195 
196 	ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
197 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
198 	ASSERT_NE(ptr, MAP_FAILED);
199 
200 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
201 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
202 	memset(ptr, 0xde, TWOMEG);
203 	for (i = 0; i < self->nthreads - 1; i++)
204 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
205 			perror("Couldn't create thread");
206 
207 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
208 	for (i = 0; i < self->nthreads - 1; i++)
209 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
210 }
211 
212 /*
213  * migration test with shared anon THP page
214  */
215 
216 TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
217 {
218 	pid_t pid;
219 	uint64_t *ptr;
220 	int i;
221 
222 	if (!thp_is_enabled())
223 		SKIP(return, "Transparent Hugepages not available");
224 
225 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
226 		SKIP(return, "Not enough threads or NUMA nodes available");
227 
228 	ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE,
229 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
230 	ASSERT_NE(ptr, MAP_FAILED);
231 
232 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
233 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
234 
235 	memset(ptr, 0xde, TWOMEG);
236 	for (i = 0; i < self->nthreads - 1; i++) {
237 		pid = fork();
238 		if (!pid) {
239 			prctl(PR_SET_PDEATHSIG, SIGHUP);
240 			/* Parent may have died before prctl so check now. */
241 			if (getppid() == 1)
242 				kill(getpid(), SIGHUP);
243 			access_mem(ptr);
244 		} else {
245 			self->pids[i] = pid;
246 		}
247 	}
248 
249 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
250 	for (i = 0; i < self->nthreads - 1; i++)
251 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
252 }
253 
254 /*
255  * migration test with private anon hugetlb page
256  */
257 TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
258 {
259 	uint64_t *ptr;
260 	int i;
261 
262 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
263 		SKIP(return, "Not enough threads or NUMA nodes available");
264 
265 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
266 		MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
267 	ASSERT_NE(ptr, MAP_FAILED);
268 
269 	memset(ptr, 0xde, TWOMEG);
270 	for (i = 0; i < self->nthreads - 1; i++)
271 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
272 			perror("Couldn't create thread");
273 
274 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
275 	for (i = 0; i < self->nthreads - 1; i++)
276 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
277 }
278 
279 /*
280  * migration test with shared anon hugetlb page
281  */
282 TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
283 {
284 	pid_t pid;
285 	uint64_t *ptr;
286 	int i;
287 
288 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
289 		SKIP(return, "Not enough threads or NUMA nodes available");
290 
291 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
292 		MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
293 	ASSERT_NE(ptr, MAP_FAILED);
294 
295 	memset(ptr, 0xde, TWOMEG);
296 	for (i = 0; i < self->nthreads - 1; i++) {
297 		pid = fork();
298 		if (!pid) {
299 			prctl(PR_SET_PDEATHSIG, SIGHUP);
300 			/* Parent may have died before prctl so check now. */
301 			if (getppid() == 1)
302 				kill(getpid(), SIGHUP);
303 			access_mem(ptr);
304 		} else {
305 			self->pids[i] = pid;
306 		}
307 	}
308 
309 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
310 	for (i = 0; i < self->nthreads - 1; i++)
311 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
312 }
313 
314 TEST_HARNESS_MAIN
315