xref: /linux/arch/x86/mm/hugetlbpage.c (revision 53ed0af4964229595b60594b35334d006d411ef0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IA-32 Huge TLB Page Support for Kernel.
4  *
5  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6  */
7 
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/elf.h>
21 
22 #ifdef CONFIG_HUGETLB_PAGE
23 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24 		unsigned long addr, unsigned long len,
25 		unsigned long pgoff, unsigned long flags)
26 {
27 	struct hstate *h = hstate_file(file);
28 	struct vm_unmapped_area_info info;
29 
30 	info.flags = 0;
31 	info.length = len;
32 	info.low_limit = get_mmap_base(1);
33 
34 	/*
35 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
36 	 * in the full address space.
37 	 */
38 	info.high_limit = in_32bit_syscall() ?
39 		task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
40 
41 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
42 	info.align_offset = 0;
43 	return vm_unmapped_area(&info);
44 }
45 
46 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
47 		unsigned long addr, unsigned long len,
48 		unsigned long pgoff, unsigned long flags)
49 {
50 	struct hstate *h = hstate_file(file);
51 	struct vm_unmapped_area_info info;
52 
53 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
54 	info.length = len;
55 	info.low_limit = PAGE_SIZE;
56 	info.high_limit = get_mmap_base(0);
57 
58 	/*
59 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
60 	 * in the full address space.
61 	 */
62 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
63 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
64 
65 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
66 	info.align_offset = 0;
67 	addr = vm_unmapped_area(&info);
68 
69 	/*
70 	 * A failed mmap() very likely causes application failure,
71 	 * so fall back to the bottom-up function here. This scenario
72 	 * can happen with large stack limits and large mmap()
73 	 * allocations.
74 	 */
75 	if (addr & ~PAGE_MASK) {
76 		VM_BUG_ON(addr != -ENOMEM);
77 		info.flags = 0;
78 		info.low_limit = TASK_UNMAPPED_BASE;
79 		info.high_limit = TASK_SIZE_LOW;
80 		addr = vm_unmapped_area(&info);
81 	}
82 
83 	return addr;
84 }
85 
86 unsigned long
87 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
88 		unsigned long len, unsigned long pgoff, unsigned long flags)
89 {
90 	struct hstate *h = hstate_file(file);
91 	struct mm_struct *mm = current->mm;
92 	struct vm_area_struct *vma;
93 
94 	if (len & ~huge_page_mask(h))
95 		return -EINVAL;
96 
97 	if (len > TASK_SIZE)
98 		return -ENOMEM;
99 
100 	/* No address checking. See comment at mmap_address_hint_valid() */
101 	if (flags & MAP_FIXED) {
102 		if (prepare_hugepage_range(file, addr, len))
103 			return -EINVAL;
104 		return addr;
105 	}
106 
107 	if (addr) {
108 		addr &= huge_page_mask(h);
109 		if (!mmap_address_hint_valid(addr, len))
110 			goto get_unmapped_area;
111 
112 		vma = find_vma(mm, addr);
113 		if (!vma || addr + len <= vm_start_gap(vma))
114 			return addr;
115 	}
116 
117 get_unmapped_area:
118 	if (mm->get_unmapped_area == arch_get_unmapped_area)
119 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
120 				pgoff, flags);
121 	else
122 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
123 				pgoff, flags);
124 }
125 #endif /* CONFIG_HUGETLB_PAGE */
126 
127 #ifdef CONFIG_X86_64
128 bool __init arch_hugetlb_valid_size(unsigned long size)
129 {
130 	if (size == PMD_SIZE)
131 		return true;
132 	else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
133 		return true;
134 	else
135 		return false;
136 }
137 
138 #ifdef CONFIG_CONTIG_ALLOC
139 static __init int gigantic_pages_init(void)
140 {
141 	/* With compaction or CMA we can allocate gigantic pages at runtime */
142 	if (boot_cpu_has(X86_FEATURE_GBPAGES))
143 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
144 	return 0;
145 }
146 arch_initcall(gigantic_pages_init);
147 #endif
148 #endif
149