xref: /linux/tools/testing/selftests/mm/hugepage-vmemmap.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * A test case of using hugepage memory in a user application using the
4  * mmap system call with MAP_HUGETLB flag.  Before running this program
5  * make sure the administrator has allocated enough default sized huge
6  * pages to cover the 2 MB allocation.
7  */
8 #include <stdlib.h>
9 #include <stdio.h>
10 #include <unistd.h>
11 #include <sys/mman.h>
12 #include <fcntl.h>
13 #include "vm_util.h"
14 
15 #define PAGE_COMPOUND_HEAD	(1UL << 15)
16 #define PAGE_COMPOUND_TAIL	(1UL << 16)
17 #define PAGE_HUGE		(1UL << 17)
18 
19 #define HEAD_PAGE_FLAGS		(PAGE_COMPOUND_HEAD | PAGE_HUGE)
20 #define TAIL_PAGE_FLAGS		(PAGE_COMPOUND_TAIL | PAGE_HUGE)
21 
22 #define PM_PFRAME_BITS		55
23 #define PM_PFRAME_MASK		~((1UL << PM_PFRAME_BITS) - 1)
24 
25 /*
26  * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
27  * That means the addresses starting with 0x800000... will need to be
28  * specified.  Specifying a fixed address is not required on ppc64, i386
29  * or x86_64.
30  */
31 #ifdef __ia64__
32 #define MAP_ADDR		(void *)(0x8000000000000000UL)
33 #define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
34 #else
35 #define MAP_ADDR		NULL
36 #define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
37 #endif
38 
39 static size_t pagesize;
40 static size_t maplength;
41 
42 static void write_bytes(char *addr, size_t length)
43 {
44 	unsigned long i;
45 
46 	for (i = 0; i < length; i++)
47 		*(addr + i) = (char)i;
48 }
49 
50 static unsigned long virt_to_pfn(void *addr)
51 {
52 	int fd;
53 	unsigned long pagemap;
54 
55 	fd = open("/proc/self/pagemap", O_RDONLY);
56 	if (fd < 0)
57 		return -1UL;
58 
59 	lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
60 	read(fd, &pagemap, sizeof(pagemap));
61 	close(fd);
62 
63 	return pagemap & ~PM_PFRAME_MASK;
64 }
65 
66 static int check_page_flags(unsigned long pfn)
67 {
68 	int fd, i;
69 	unsigned long pageflags;
70 
71 	fd = open("/proc/kpageflags", O_RDONLY);
72 	if (fd < 0)
73 		return -1;
74 
75 	lseek(fd, pfn * sizeof(pageflags), SEEK_SET);
76 
77 	read(fd, &pageflags, sizeof(pageflags));
78 	if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
79 		close(fd);
80 		printf("Head page flags (%lx) is invalid\n", pageflags);
81 		return -1;
82 	}
83 
84 	/*
85 	 * pages other than the first page must be tail and shouldn't be head;
86 	 * this also verifies kernel has correctly set the fake page_head to tail
87 	 * while hugetlb_free_vmemmap is enabled.
88 	 */
89 	for (i = 1; i < maplength / pagesize; i++) {
90 		read(fd, &pageflags, sizeof(pageflags));
91 		if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
92 		    (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
93 			close(fd);
94 			printf("Tail page flags (%lx) is invalid\n", pageflags);
95 			return -1;
96 		}
97 	}
98 
99 	close(fd);
100 
101 	return 0;
102 }
103 
104 int main(int argc, char **argv)
105 {
106 	void *addr;
107 	unsigned long pfn;
108 
109 	pagesize  = psize();
110 	maplength = default_huge_page_size();
111 	if (!maplength) {
112 		printf("Unable to determine huge page size\n");
113 		exit(1);
114 	}
115 
116 	addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
117 	if (addr == MAP_FAILED) {
118 		perror("mmap");
119 		exit(1);
120 	}
121 
122 	/* Trigger allocation of HugeTLB page. */
123 	write_bytes(addr, maplength);
124 
125 	pfn = virt_to_pfn(addr);
126 	if (pfn == -1UL) {
127 		munmap(addr, maplength);
128 		perror("virt_to_pfn");
129 		exit(1);
130 	}
131 
132 	printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
133 
134 	if (check_page_flags(pfn) < 0) {
135 		munmap(addr, maplength);
136 		perror("check_page_flags");
137 		exit(1);
138 	}
139 
140 	/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
141 	if (munmap(addr, maplength)) {
142 		perror("munmap");
143 		exit(1);
144 	}
145 
146 	return 0;
147 }
148