xref: /linux/tools/testing/selftests/mm/migration.c (revision c5e67d40a10234541e220750297304df79aaedd0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * The main purpose of the tests here is to exercise the migration entry code
4  * paths in the kernel.
5  */
6 
7 #include "../kselftest_harness.h"
8 #include "thp_settings.h"
9 
10 #include <strings.h>
11 #include <pthread.h>
12 #include <numa.h>
13 #include <numaif.h>
14 #include <sys/mman.h>
15 #include <sys/prctl.h>
16 #include <sys/types.h>
17 #include <signal.h>
18 #include <time.h>
19 
20 #define TWOMEG		(2<<20)
21 #define RUNTIME		(20)
22 #define MAX_RETRIES	100
23 #define ALIGN(x, a)	(((x) + (a - 1)) & (~((a) - 1)))
24 
25 FIXTURE(migration)
26 {
27 	pthread_t *threads;
28 	pid_t *pids;
29 	int nthreads;
30 	int n1;
31 	int n2;
32 };
33 
34 FIXTURE_SETUP(migration)
35 {
36 	int n;
37 
38 	ASSERT_EQ(numa_available(), 0);
39 	self->nthreads = numa_num_task_cpus() - 1;
40 	self->n1 = -1;
41 	self->n2 = -1;
42 
43 	for (n = 0; n < numa_max_possible_node(); n++)
44 		if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
45 			if (self->n1 == -1) {
46 				self->n1 = n;
47 			} else {
48 				self->n2 = n;
49 				break;
50 			}
51 		}
52 
53 	self->threads = malloc(self->nthreads * sizeof(*self->threads));
54 	ASSERT_NE(self->threads, NULL);
55 	self->pids = malloc(self->nthreads * sizeof(*self->pids));
56 	ASSERT_NE(self->pids, NULL);
57 };
58 
59 FIXTURE_TEARDOWN(migration)
60 {
61 	free(self->threads);
62 	free(self->pids);
63 }
64 
65 int migrate(uint64_t *ptr, int n1, int n2)
66 {
67 	int ret, tmp;
68 	int status = 0;
69 	struct timespec ts1, ts2;
70 	int failures = 0;
71 
72 	if (clock_gettime(CLOCK_MONOTONIC, &ts1))
73 		return -1;
74 
75 	while (1) {
76 		if (clock_gettime(CLOCK_MONOTONIC, &ts2))
77 			return -1;
78 
79 		if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
80 			return 0;
81 
82 		ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
83 				MPOL_MF_MOVE_ALL);
84 		if (ret) {
85 			if (ret > 0) {
86 				/* Migration is best effort; try again */
87 				if (++failures < MAX_RETRIES)
88 					continue;
89 				printf("Didn't migrate %d pages\n", ret);
90 			}
91 			else
92 				perror("Couldn't migrate pages");
93 			return -2;
94 		}
95 		failures = 0;
96 		tmp = n2;
97 		n2 = n1;
98 		n1 = tmp;
99 	}
100 
101 	return 0;
102 }
103 
104 void *access_mem(void *ptr)
105 {
106 	volatile uint64_t y = 0;
107 	volatile uint64_t *x = ptr;
108 
109 	while (1) {
110 		pthread_testcancel();
111 		y += *x;
112 
113 		/* Prevent the compiler from optimizing out the writes to y: */
114 		asm volatile("" : "+r" (y));
115 	}
116 
117 	return NULL;
118 }
119 
120 /*
121  * Basic migration entry testing. One thread will move pages back and forth
122  * between nodes whilst other threads try and access them triggering the
123  * migration entry wait paths in the kernel.
124  */
125 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
126 {
127 	uint64_t *ptr;
128 	int i;
129 
130 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
131 		SKIP(return, "Not enough threads or NUMA nodes available");
132 
133 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
134 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
135 	ASSERT_NE(ptr, MAP_FAILED);
136 
137 	memset(ptr, 0xde, TWOMEG);
138 	for (i = 0; i < self->nthreads - 1; i++)
139 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
140 			perror("Couldn't create thread");
141 
142 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
143 	for (i = 0; i < self->nthreads - 1; i++)
144 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
145 }
146 
147 /*
148  * Same as the previous test but with shared memory.
149  */
150 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
151 {
152 	pid_t pid;
153 	uint64_t *ptr;
154 	int i;
155 
156 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
157 		SKIP(return, "Not enough threads or NUMA nodes available");
158 
159 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
160 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
161 	ASSERT_NE(ptr, MAP_FAILED);
162 
163 	memset(ptr, 0xde, TWOMEG);
164 	for (i = 0; i < self->nthreads - 1; i++) {
165 		pid = fork();
166 		if (!pid) {
167 			prctl(PR_SET_PDEATHSIG, SIGHUP);
168 			/* Parent may have died before prctl so check now. */
169 			if (getppid() == 1)
170 				kill(getpid(), SIGHUP);
171 			access_mem(ptr);
172 		} else {
173 			self->pids[i] = pid;
174 		}
175 	}
176 
177 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
178 	for (i = 0; i < self->nthreads - 1; i++)
179 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
180 }
181 
182 /*
183  * Tests the pmd migration entry paths.
184  */
185 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
186 {
187 	uint64_t *ptr;
188 	int i;
189 
190 	if (!thp_is_enabled())
191 		SKIP(return, "Transparent Hugepages not available");
192 
193 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
194 		SKIP(return, "Not enough threads or NUMA nodes available");
195 
196 	ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
197 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
198 	ASSERT_NE(ptr, MAP_FAILED);
199 
200 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
201 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
202 	memset(ptr, 0xde, TWOMEG);
203 	for (i = 0; i < self->nthreads - 1; i++)
204 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
205 			perror("Couldn't create thread");
206 
207 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
208 	for (i = 0; i < self->nthreads - 1; i++)
209 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
210 }
211 
212 /*
213  * migration test with shared anon THP page
214  */
215 
216 TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
217 {
218 	pid_t pid;
219 	uint64_t *ptr;
220 	int i;
221 
222 	if (!thp_is_enabled())
223 		SKIP(return, "Transparent Hugepages not available");
224 
225 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
226 		SKIP(return, "Not enough threads or NUMA nodes available");
227 
228 	ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE,
229 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
230 	ASSERT_NE(ptr, MAP_FAILED);
231 
232 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
233 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
234 
235 	memset(ptr, 0xde, TWOMEG);
236 	for (i = 0; i < self->nthreads - 1; i++) {
237 		pid = fork();
238 		if (!pid) {
239 			prctl(PR_SET_PDEATHSIG, SIGHUP);
240 			/* Parent may have died before prctl so check now. */
241 			if (getppid() == 1)
242 				kill(getpid(), SIGHUP);
243 			access_mem(ptr);
244 		} else {
245 			self->pids[i] = pid;
246 		}
247 	}
248 
249 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
250 	for (i = 0; i < self->nthreads - 1; i++)
251 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
252 }
253 
254 /*
255  * migration test with private anon hugetlb page
256  */
257 TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
258 {
259 	uint64_t *ptr;
260 	int i;
261 
262 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
263 		SKIP(return, "Not enough threads or NUMA nodes available");
264 
265 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
266 		MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
267 	ASSERT_NE(ptr, MAP_FAILED);
268 
269 	memset(ptr, 0xde, TWOMEG);
270 	for (i = 0; i < self->nthreads - 1; i++)
271 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
272 			perror("Couldn't create thread");
273 
274 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
275 	for (i = 0; i < self->nthreads - 1; i++)
276 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
277 }
278 
279 /*
280  * migration test with shared anon hugetlb page
281  */
282 TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
283 {
284 	pid_t pid;
285 	uint64_t *ptr;
286 	int i;
287 
288 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
289 		SKIP(return, "Not enough threads or NUMA nodes available");
290 
291 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
292 		MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
293 	ASSERT_NE(ptr, MAP_FAILED);
294 
295 	memset(ptr, 0xde, TWOMEG);
296 	for (i = 0; i < self->nthreads - 1; i++) {
297 		pid = fork();
298 		if (!pid) {
299 			prctl(PR_SET_PDEATHSIG, SIGHUP);
300 			/* Parent may have died before prctl so check now. */
301 			if (getppid() == 1)
302 				kill(getpid(), SIGHUP);
303 			access_mem(ptr);
304 		} else {
305 			self->pids[i] = pid;
306 		}
307 	}
308 
309 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
310 	for (i = 0; i < self->nthreads - 1; i++)
311 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
312 }
313 
314 TEST_HARNESS_MAIN
315