xref: /linux/tools/testing/selftests/mm/merge.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <sys/mman.h>
9 #include <sys/wait.h>
10 #include "vm_util.h"
11 
12 FIXTURE(merge)
13 {
14 	unsigned int page_size;
15 	char *carveout;
16 	struct procmap_fd procmap;
17 };
18 
19 FIXTURE_SETUP(merge)
20 {
21 	self->page_size = psize();
22 	/* Carve out PROT_NONE region to map over. */
23 	self->carveout = mmap(NULL, 12 * self->page_size, PROT_NONE,
24 			      MAP_ANON | MAP_PRIVATE, -1, 0);
25 	ASSERT_NE(self->carveout, MAP_FAILED);
26 	/* Setup PROCMAP_QUERY interface. */
27 	ASSERT_EQ(open_self_procmap(&self->procmap), 0);
28 }
29 
30 FIXTURE_TEARDOWN(merge)
31 {
32 	ASSERT_EQ(munmap(self->carveout, 12 * self->page_size), 0);
33 	ASSERT_EQ(close_procmap(&self->procmap), 0);
34 }
35 
36 TEST_F(merge, mprotect_unfaulted_left)
37 {
38 	unsigned int page_size = self->page_size;
39 	char *carveout = self->carveout;
40 	struct procmap_fd *procmap = &self->procmap;
41 	char *ptr;
42 
43 	/*
44 	 * Map 10 pages of R/W memory within. MAP_NORESERVE so we don't hit
45 	 * merge failure due to lack of VM_ACCOUNT flag by mistake.
46 	 *
47 	 * |-----------------------|
48 	 * |       unfaulted       |
49 	 * |-----------------------|
50 	 */
51 	ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
52 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
53 	ASSERT_NE(ptr, MAP_FAILED);
54 	/*
55 	 * Now make the first 5 pages read-only, splitting the VMA:
56 	 *
57 	 *      RO          RW
58 	 * |-----------|-----------|
59 	 * | unfaulted | unfaulted |
60 	 * |-----------|-----------|
61 	 */
62 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
63 	/*
64 	 * Fault in the first of the last 5 pages so it gets an anon_vma and
65 	 * thus the whole VMA becomes 'faulted':
66 	 *
67 	 *      RO          RW
68 	 * |-----------|-----------|
69 	 * | unfaulted |  faulted  |
70 	 * |-----------|-----------|
71 	 */
72 	ptr[5 * page_size] = 'x';
73 	/*
74 	 * Now mprotect() the RW region read-only, we should merge (though for
75 	 * ~15 years we did not! :):
76 	 *
77 	 *             RO
78 	 * |-----------------------|
79 	 * |        faulted        |
80 	 * |-----------------------|
81 	 */
82 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
83 
84 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
85 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
86 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
87 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
88 }
89 
90 TEST_F(merge, mprotect_unfaulted_right)
91 {
92 	unsigned int page_size = self->page_size;
93 	char *carveout = self->carveout;
94 	struct procmap_fd *procmap = &self->procmap;
95 	char *ptr;
96 
97 	/*
98 	 * |-----------------------|
99 	 * |       unfaulted       |
100 	 * |-----------------------|
101 	 */
102 	ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
103 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
104 	ASSERT_NE(ptr, MAP_FAILED);
105 	/*
106 	 * Now make the last 5 pages read-only, splitting the VMA:
107 	 *
108 	 *      RW          RO
109 	 * |-----------|-----------|
110 	 * | unfaulted | unfaulted |
111 	 * |-----------|-----------|
112 	 */
113 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
114 	/*
115 	 * Fault in the first of the first 5 pages so it gets an anon_vma and
116 	 * thus the whole VMA becomes 'faulted':
117 	 *
118 	 *      RW          RO
119 	 * |-----------|-----------|
120 	 * |  faulted  | unfaulted |
121 	 * |-----------|-----------|
122 	 */
123 	ptr[0] = 'x';
124 	/*
125 	 * Now mprotect() the RW region read-only, we should merge:
126 	 *
127 	 *             RO
128 	 * |-----------------------|
129 	 * |        faulted        |
130 	 * |-----------------------|
131 	 */
132 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
133 
134 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
135 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
136 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
137 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
138 }
139 
140 TEST_F(merge, mprotect_unfaulted_both)
141 {
142 	unsigned int page_size = self->page_size;
143 	char *carveout = self->carveout;
144 	struct procmap_fd *procmap = &self->procmap;
145 	char *ptr;
146 
147 	/*
148 	 * |-----------------------|
149 	 * |       unfaulted       |
150 	 * |-----------------------|
151 	 */
152 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
153 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
154 	ASSERT_NE(ptr, MAP_FAILED);
155 	/*
156 	 * Now make the first and last 3 pages read-only, splitting the VMA:
157 	 *
158 	 *      RO          RW          RO
159 	 * |-----------|-----------|-----------|
160 	 * | unfaulted | unfaulted | unfaulted |
161 	 * |-----------|-----------|-----------|
162 	 */
163 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
164 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
165 	/*
166 	 * Fault in the first of the middle 3 pages so it gets an anon_vma and
167 	 * thus the whole VMA becomes 'faulted':
168 	 *
169 	 *      RO          RW          RO
170 	 * |-----------|-----------|-----------|
171 	 * | unfaulted |  faulted  | unfaulted |
172 	 * |-----------|-----------|-----------|
173 	 */
174 	ptr[3 * page_size] = 'x';
175 	/*
176 	 * Now mprotect() the RW region read-only, we should merge:
177 	 *
178 	 *             RO
179 	 * |-----------------------|
180 	 * |        faulted        |
181 	 * |-----------------------|
182 	 */
183 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
184 
185 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
186 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
187 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
188 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
189 }
190 
191 TEST_F(merge, mprotect_faulted_left_unfaulted_right)
192 {
193 	unsigned int page_size = self->page_size;
194 	char *carveout = self->carveout;
195 	struct procmap_fd *procmap = &self->procmap;
196 	char *ptr;
197 
198 	/*
199 	 * |-----------------------|
200 	 * |       unfaulted       |
201 	 * |-----------------------|
202 	 */
203 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
204 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
205 	ASSERT_NE(ptr, MAP_FAILED);
206 	/*
207 	 * Now make the last 3 pages read-only, splitting the VMA:
208 	 *
209 	 *             RW               RO
210 	 * |-----------------------|-----------|
211 	 * |       unfaulted       | unfaulted |
212 	 * |-----------------------|-----------|
213 	 */
214 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
215 	/*
216 	 * Fault in the first of the first 6 pages so it gets an anon_vma and
217 	 * thus the whole VMA becomes 'faulted':
218 	 *
219 	 *             RW               RO
220 	 * |-----------------------|-----------|
221 	 * |       unfaulted       | unfaulted |
222 	 * |-----------------------|-----------|
223 	 */
224 	ptr[0] = 'x';
225 	/*
226 	 * Now make the first 3 pages read-only, splitting the VMA:
227 	 *
228 	 *      RO          RW          RO
229 	 * |-----------|-----------|-----------|
230 	 * |  faulted  |  faulted  | unfaulted |
231 	 * |-----------|-----------|-----------|
232 	 */
233 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
234 	/*
235 	 * Now mprotect() the RW region read-only, we should merge:
236 	 *
237 	 *             RO
238 	 * |-----------------------|
239 	 * |        faulted        |
240 	 * |-----------------------|
241 	 */
242 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
243 
244 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
245 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
246 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
247 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
248 }
249 
250 TEST_F(merge, mprotect_unfaulted_left_faulted_right)
251 {
252 	unsigned int page_size = self->page_size;
253 	char *carveout = self->carveout;
254 	struct procmap_fd *procmap = &self->procmap;
255 	char *ptr;
256 
257 	/*
258 	 * |-----------------------|
259 	 * |       unfaulted       |
260 	 * |-----------------------|
261 	 */
262 	ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
263 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
264 	ASSERT_NE(ptr, MAP_FAILED);
265 	/*
266 	 * Now make the first 3 pages read-only, splitting the VMA:
267 	 *
268 	 *      RO                RW
269 	 * |-----------|-----------------------|
270 	 * | unfaulted |       unfaulted       |
271 	 * |-----------|-----------------------|
272 	 */
273 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
274 	/*
275 	 * Fault in the first of the last 6 pages so it gets an anon_vma and
276 	 * thus the whole VMA becomes 'faulted':
277 	 *
278 	 *      RO                RW
279 	 * |-----------|-----------------------|
280 	 * | unfaulted |        faulted        |
281 	 * |-----------|-----------------------|
282 	 */
283 	ptr[3 * page_size] = 'x';
284 	/*
285 	 * Now make the last 3 pages read-only, splitting the VMA:
286 	 *
287 	 *      RO          RW          RO
288 	 * |-----------|-----------|-----------|
289 	 * | unfaulted |  faulted  |  faulted  |
290 	 * |-----------|-----------|-----------|
291 	 */
292 	ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
293 	/*
294 	 * Now mprotect() the RW region read-only, we should merge:
295 	 *
296 	 *             RO
297 	 * |-----------------------|
298 	 * |        faulted        |
299 	 * |-----------------------|
300 	 */
301 	ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
302 
303 	/* Assert that the merge succeeded using PROCMAP_QUERY. */
304 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
305 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
306 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
307 }
308 
309 TEST_F(merge, forked_target_vma)
310 {
311 	unsigned int page_size = self->page_size;
312 	char *carveout = self->carveout;
313 	struct procmap_fd *procmap = &self->procmap;
314 	pid_t pid;
315 	char *ptr, *ptr2;
316 	int i;
317 
318 	/*
319 	 * |-----------|
320 	 * | unfaulted |
321 	 * |-----------|
322 	 */
323 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
324 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
325 	ASSERT_NE(ptr, MAP_FAILED);
326 
327 	/*
328 	 * Fault in process.
329 	 *
330 	 * |-----------|
331 	 * |  faulted  |
332 	 * |-----------|
333 	 */
334 	ptr[0] = 'x';
335 
336 	pid = fork();
337 	ASSERT_NE(pid, -1);
338 
339 	if (pid != 0) {
340 		wait(NULL);
341 		return;
342 	}
343 
344 	/* Child process below: */
345 
346 	/* Reopen for child. */
347 	ASSERT_EQ(close_procmap(&self->procmap), 0);
348 	ASSERT_EQ(open_self_procmap(&self->procmap), 0);
349 
350 	/* unCOWing everything does not cause the AVC to go away. */
351 	for (i = 0; i < 5 * page_size; i += page_size)
352 		ptr[i] = 'x';
353 
354 	/*
355 	 * Map in adjacent VMA in child.
356 	 *
357 	 *     forked
358 	 * |-----------|-----------|
359 	 * |  faulted  | unfaulted |
360 	 * |-----------|-----------|
361 	 *      ptr         ptr2
362 	 */
363 	ptr2 = mmap(&ptr[5 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
364 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
365 	ASSERT_NE(ptr2, MAP_FAILED);
366 
367 	/* Make sure not merged. */
368 	ASSERT_TRUE(find_vma_procmap(procmap, ptr));
369 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
370 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 5 * page_size);
371 }
372 
373 TEST_F(merge, forked_source_vma)
374 {
375 	unsigned int page_size = self->page_size;
376 	char *carveout = self->carveout;
377 	struct procmap_fd *procmap = &self->procmap;
378 	pid_t pid;
379 	char *ptr, *ptr2;
380 	int i;
381 
382 	/*
383 	 * |-----------|------------|
384 	 * | unfaulted | <unmapped> |
385 	 * |-----------|------------|
386 	 */
387 	ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
388 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
389 	ASSERT_NE(ptr, MAP_FAILED);
390 
391 	/*
392 	 * Fault in process.
393 	 *
394 	 * |-----------|------------|
395 	 * |  faulted  | <unmapped> |
396 	 * |-----------|------------|
397 	 */
398 	ptr[0] = 'x';
399 
400 	pid = fork();
401 	ASSERT_NE(pid, -1);
402 
403 	if (pid != 0) {
404 		wait(NULL);
405 		return;
406 	}
407 
408 	/* Child process below: */
409 
410 	/* Reopen for child. */
411 	ASSERT_EQ(close_procmap(&self->procmap), 0);
412 	ASSERT_EQ(open_self_procmap(&self->procmap), 0);
413 
414 	/* unCOWing everything does not cause the AVC to go away. */
415 	for (i = 0; i < 5 * page_size; i += page_size)
416 		ptr[i] = 'x';
417 
418 	/*
419 	 * Map in adjacent VMA in child, ptr2 after ptr, but incompatible.
420 	 *
421 	 *   forked RW      RWX
422 	 * |-----------|-----------|
423 	 * |  faulted  | unfaulted |
424 	 * |-----------|-----------|
425 	 *      ptr        ptr2
426 	 */
427 	ptr2 = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC,
428 		   MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
429 	ASSERT_NE(ptr2, MAP_FAILED);
430 
431 	/* Make sure not merged. */
432 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
433 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
434 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
435 
436 	/*
437 	 * Now mprotect forked region to RWX so it becomes the source for the
438 	 * merge to unfaulted region:
439 	 *
440 	 *  forked RWX      RWX
441 	 * |-----------|-----------|
442 	 * |  faulted  | unfaulted |
443 	 * |-----------|-----------|
444 	 *      ptr         ptr2
445 	 *
446 	 * This should NOT result in a merge, as ptr was forked.
447 	 */
448 	ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC), 0);
449 	/* Again, make sure not merged. */
450 	ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
451 	ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
452 	ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
453 }
454 
455 TEST_HARNESS_MAIN
456