xref: /linux/tools/testing/selftests/mm/hugepage-vmemmap.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * A test case of using hugepage memory in a user application using the
4  * mmap system call with MAP_HUGETLB flag.  Before running this program
5  * make sure the administrator has allocated enough default sized huge
6  * pages to cover the 2 MB allocation.
7  */
8 #include <stdlib.h>
9 #include <stdio.h>
10 #include <unistd.h>
11 #include <sys/mman.h>
12 #include <fcntl.h>
13 
14 #define MAP_LENGTH		(2UL * 1024 * 1024)
15 
16 #ifndef MAP_HUGETLB
17 #define MAP_HUGETLB		0x40000	/* arch specific */
18 #endif
19 
20 #define PAGE_SIZE		4096
21 
22 #define PAGE_COMPOUND_HEAD	(1UL << 15)
23 #define PAGE_COMPOUND_TAIL	(1UL << 16)
24 #define PAGE_HUGE		(1UL << 17)
25 
26 #define HEAD_PAGE_FLAGS		(PAGE_COMPOUND_HEAD | PAGE_HUGE)
27 #define TAIL_PAGE_FLAGS		(PAGE_COMPOUND_TAIL | PAGE_HUGE)
28 
29 #define PM_PFRAME_BITS		55
30 #define PM_PFRAME_MASK		~((1UL << PM_PFRAME_BITS) - 1)
31 
32 /*
33  * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
34  * That means the addresses starting with 0x800000... will need to be
35  * specified.  Specifying a fixed address is not required on ppc64, i386
36  * or x86_64.
37  */
38 #ifdef __ia64__
39 #define MAP_ADDR		(void *)(0x8000000000000000UL)
40 #define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
41 #else
42 #define MAP_ADDR		NULL
43 #define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
44 #endif
45 
46 static void write_bytes(char *addr, size_t length)
47 {
48 	unsigned long i;
49 
50 	for (i = 0; i < length; i++)
51 		*(addr + i) = (char)i;
52 }
53 
54 static unsigned long virt_to_pfn(void *addr)
55 {
56 	int fd;
57 	unsigned long pagemap;
58 
59 	fd = open("/proc/self/pagemap", O_RDONLY);
60 	if (fd < 0)
61 		return -1UL;
62 
63 	lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
64 	read(fd, &pagemap, sizeof(pagemap));
65 	close(fd);
66 
67 	return pagemap & ~PM_PFRAME_MASK;
68 }
69 
70 static int check_page_flags(unsigned long pfn)
71 {
72 	int fd, i;
73 	unsigned long pageflags;
74 
75 	fd = open("/proc/kpageflags", O_RDONLY);
76 	if (fd < 0)
77 		return -1;
78 
79 	lseek(fd, pfn * sizeof(pageflags), SEEK_SET);
80 
81 	read(fd, &pageflags, sizeof(pageflags));
82 	if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
83 		close(fd);
84 		printf("Head page flags (%lx) is invalid\n", pageflags);
85 		return -1;
86 	}
87 
88 	/*
89 	 * pages other than the first page must be tail and shouldn't be head;
90 	 * this also verifies kernel has correctly set the fake page_head to tail
91 	 * while hugetlb_free_vmemmap is enabled.
92 	 */
93 	for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
94 		read(fd, &pageflags, sizeof(pageflags));
95 		if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
96 		    (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
97 			close(fd);
98 			printf("Tail page flags (%lx) is invalid\n", pageflags);
99 			return -1;
100 		}
101 	}
102 
103 	close(fd);
104 
105 	return 0;
106 }
107 
108 int main(int argc, char **argv)
109 {
110 	void *addr;
111 	unsigned long pfn;
112 
113 	addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
114 	if (addr == MAP_FAILED) {
115 		perror("mmap");
116 		exit(1);
117 	}
118 
119 	/* Trigger allocation of HugeTLB page. */
120 	write_bytes(addr, MAP_LENGTH);
121 
122 	pfn = virt_to_pfn(addr);
123 	if (pfn == -1UL) {
124 		munmap(addr, MAP_LENGTH);
125 		perror("virt_to_pfn");
126 		exit(1);
127 	}
128 
129 	printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
130 
131 	if (check_page_flags(pfn) < 0) {
132 		munmap(addr, MAP_LENGTH);
133 		perror("check_page_flags");
134 		exit(1);
135 	}
136 
137 	/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
138 	if (munmap(addr, MAP_LENGTH)) {
139 		perror("munmap");
140 		exit(1);
141 	}
142 
143 	return 0;
144 }
145