xref: /linux/arch/x86/mm/hugetlbpage.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18 
19 static unsigned long page_table_shareable(struct vm_area_struct *svma,
20 				struct vm_area_struct *vma,
21 				unsigned long addr, pgoff_t idx)
22 {
23 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
24 				svma->vm_start;
25 	unsigned long sbase = saddr & PUD_MASK;
26 	unsigned long s_end = sbase + PUD_SIZE;
27 
28 	/* Allow segments to share if only one is marked locked */
29 	unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
30 	unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
31 
32 	/*
33 	 * match the virtual addresses, permission and the alignment of the
34 	 * page table page.
35 	 */
36 	if (pmd_index(addr) != pmd_index(saddr) ||
37 	    vm_flags != svm_flags ||
38 	    sbase < svma->vm_start || svma->vm_end < s_end)
39 		return 0;
40 
41 	return saddr;
42 }
43 
44 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45 {
46 	unsigned long base = addr & PUD_MASK;
47 	unsigned long end = base + PUD_SIZE;
48 
49 	/*
50 	 * check on proper vm_flags and page table alignment
51 	 */
52 	if (vma->vm_flags & VM_MAYSHARE &&
53 	    vma->vm_start <= base && end <= vma->vm_end)
54 		return 1;
55 	return 0;
56 }
57 
58 /*
59  * search for a shareable pmd page for hugetlb.
60  */
61 static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
62 {
63 	struct vm_area_struct *vma = find_vma(mm, addr);
64 	struct address_space *mapping = vma->vm_file->f_mapping;
65 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
66 			vma->vm_pgoff;
67 	struct prio_tree_iter iter;
68 	struct vm_area_struct *svma;
69 	unsigned long saddr;
70 	pte_t *spte = NULL;
71 
72 	if (!vma_shareable(vma, addr))
73 		return;
74 
75 	mutex_lock(&mapping->i_mmap_mutex);
76 	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
77 		if (svma == vma)
78 			continue;
79 
80 		saddr = page_table_shareable(svma, vma, addr, idx);
81 		if (saddr) {
82 			spte = huge_pte_offset(svma->vm_mm, saddr);
83 			if (spte) {
84 				get_page(virt_to_page(spte));
85 				break;
86 			}
87 		}
88 	}
89 
90 	if (!spte)
91 		goto out;
92 
93 	spin_lock(&mm->page_table_lock);
94 	if (pud_none(*pud))
95 		pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
96 	else
97 		put_page(virt_to_page(spte));
98 	spin_unlock(&mm->page_table_lock);
99 out:
100 	mutex_unlock(&mapping->i_mmap_mutex);
101 }
102 
103 /*
104  * unmap huge page backed by shared pte.
105  *
106  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
107  * indicated by page_count > 1, unmap is achieved by clearing pud and
108  * decrementing the ref count. If count == 1, the pte page is not shared.
109  *
110  * called with vma->vm_mm->page_table_lock held.
111  *
112  * returns: 1 successfully unmapped a shared pte page
113  *	    0 the underlying pte page is not shared, or it is the last user
114  */
115 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
116 {
117 	pgd_t *pgd = pgd_offset(mm, *addr);
118 	pud_t *pud = pud_offset(pgd, *addr);
119 
120 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
121 	if (page_count(virt_to_page(ptep)) == 1)
122 		return 0;
123 
124 	pud_clear(pud);
125 	put_page(virt_to_page(ptep));
126 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
127 	return 1;
128 }
129 
130 pte_t *huge_pte_alloc(struct mm_struct *mm,
131 			unsigned long addr, unsigned long sz)
132 {
133 	pgd_t *pgd;
134 	pud_t *pud;
135 	pte_t *pte = NULL;
136 
137 	pgd = pgd_offset(mm, addr);
138 	pud = pud_alloc(mm, pgd, addr);
139 	if (pud) {
140 		if (sz == PUD_SIZE) {
141 			pte = (pte_t *)pud;
142 		} else {
143 			BUG_ON(sz != PMD_SIZE);
144 			if (pud_none(*pud))
145 				huge_pmd_share(mm, addr, pud);
146 			pte = (pte_t *) pmd_alloc(mm, pud, addr);
147 		}
148 	}
149 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
150 
151 	return pte;
152 }
153 
154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
155 {
156 	pgd_t *pgd;
157 	pud_t *pud;
158 	pmd_t *pmd = NULL;
159 
160 	pgd = pgd_offset(mm, addr);
161 	if (pgd_present(*pgd)) {
162 		pud = pud_offset(pgd, addr);
163 		if (pud_present(*pud)) {
164 			if (pud_large(*pud))
165 				return (pte_t *)pud;
166 			pmd = pmd_offset(pud, addr);
167 		}
168 	}
169 	return (pte_t *) pmd;
170 }
171 
172 #if 0	/* This is just for testing */
173 struct page *
174 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
175 {
176 	unsigned long start = address;
177 	int length = 1;
178 	int nr;
179 	struct page *page;
180 	struct vm_area_struct *vma;
181 
182 	vma = find_vma(mm, addr);
183 	if (!vma || !is_vm_hugetlb_page(vma))
184 		return ERR_PTR(-EINVAL);
185 
186 	pte = huge_pte_offset(mm, address);
187 
188 	/* hugetlb should be locked, and hence, prefaulted */
189 	WARN_ON(!pte || pte_none(*pte));
190 
191 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
192 
193 	WARN_ON(!PageHead(page));
194 
195 	return page;
196 }
197 
198 int pmd_huge(pmd_t pmd)
199 {
200 	return 0;
201 }
202 
203 int pud_huge(pud_t pud)
204 {
205 	return 0;
206 }
207 
208 struct page *
209 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
210 		pmd_t *pmd, int write)
211 {
212 	return NULL;
213 }
214 
215 #else
216 
217 struct page *
218 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
219 {
220 	return ERR_PTR(-EINVAL);
221 }
222 
223 int pmd_huge(pmd_t pmd)
224 {
225 	return !!(pmd_val(pmd) & _PAGE_PSE);
226 }
227 
228 int pud_huge(pud_t pud)
229 {
230 	return !!(pud_val(pud) & _PAGE_PSE);
231 }
232 
233 struct page *
234 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
235 		pmd_t *pmd, int write)
236 {
237 	struct page *page;
238 
239 	page = pte_page(*(pte_t *)pmd);
240 	if (page)
241 		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
242 	return page;
243 }
244 
245 struct page *
246 follow_huge_pud(struct mm_struct *mm, unsigned long address,
247 		pud_t *pud, int write)
248 {
249 	struct page *page;
250 
251 	page = pte_page(*(pte_t *)pud);
252 	if (page)
253 		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
254 	return page;
255 }
256 
257 #endif
258 
259 /* x86_64 also uses this file */
260 
261 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
262 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
263 		unsigned long addr, unsigned long len,
264 		unsigned long pgoff, unsigned long flags)
265 {
266 	struct hstate *h = hstate_file(file);
267 	struct mm_struct *mm = current->mm;
268 	struct vm_area_struct *vma;
269 	unsigned long start_addr;
270 
271 	if (len > mm->cached_hole_size) {
272 	        start_addr = mm->free_area_cache;
273 	} else {
274 	        start_addr = TASK_UNMAPPED_BASE;
275 	        mm->cached_hole_size = 0;
276 	}
277 
278 full_search:
279 	addr = ALIGN(start_addr, huge_page_size(h));
280 
281 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
282 		/* At this point:  (!vma || addr < vma->vm_end). */
283 		if (TASK_SIZE - len < addr) {
284 			/*
285 			 * Start a new search - just in case we missed
286 			 * some holes.
287 			 */
288 			if (start_addr != TASK_UNMAPPED_BASE) {
289 				start_addr = TASK_UNMAPPED_BASE;
290 				mm->cached_hole_size = 0;
291 				goto full_search;
292 			}
293 			return -ENOMEM;
294 		}
295 		if (!vma || addr + len <= vma->vm_start) {
296 			mm->free_area_cache = addr + len;
297 			return addr;
298 		}
299 		if (addr + mm->cached_hole_size < vma->vm_start)
300 		        mm->cached_hole_size = vma->vm_start - addr;
301 		addr = ALIGN(vma->vm_end, huge_page_size(h));
302 	}
303 }
304 
305 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
306 		unsigned long addr0, unsigned long len,
307 		unsigned long pgoff, unsigned long flags)
308 {
309 	struct hstate *h = hstate_file(file);
310 	struct mm_struct *mm = current->mm;
311 	struct vm_area_struct *vma, *prev_vma;
312 	unsigned long base = mm->mmap_base, addr = addr0;
313 	unsigned long largest_hole = mm->cached_hole_size;
314 	int first_time = 1;
315 
316 	/* don't allow allocations above current base */
317 	if (mm->free_area_cache > base)
318 		mm->free_area_cache = base;
319 
320 	if (len <= largest_hole) {
321 	        largest_hole = 0;
322 		mm->free_area_cache  = base;
323 	}
324 try_again:
325 	/* make sure it can fit in the remaining address space */
326 	if (mm->free_area_cache < len)
327 		goto fail;
328 
329 	/* either no address requested or can't fit in requested address hole */
330 	addr = (mm->free_area_cache - len) & huge_page_mask(h);
331 	do {
332 		/*
333 		 * Lookup failure means no vma is above this address,
334 		 * i.e. return with success:
335 		 */
336 		if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
337 			return addr;
338 
339 		/*
340 		 * new region fits between prev_vma->vm_end and
341 		 * vma->vm_start, use it:
342 		 */
343 		if (addr + len <= vma->vm_start &&
344 		            (!prev_vma || (addr >= prev_vma->vm_end))) {
345 			/* remember the address as a hint for next time */
346 		        mm->cached_hole_size = largest_hole;
347 		        return (mm->free_area_cache = addr);
348 		} else {
349 			/* pull free_area_cache down to the first hole */
350 		        if (mm->free_area_cache == vma->vm_end) {
351 				mm->free_area_cache = vma->vm_start;
352 				mm->cached_hole_size = largest_hole;
353 			}
354 		}
355 
356 		/* remember the largest hole we saw so far */
357 		if (addr + largest_hole < vma->vm_start)
358 		        largest_hole = vma->vm_start - addr;
359 
360 		/* try just below the current vma->vm_start */
361 		addr = (vma->vm_start - len) & huge_page_mask(h);
362 	} while (len <= vma->vm_start);
363 
364 fail:
365 	/*
366 	 * if hint left us with no space for the requested
367 	 * mapping then try again:
368 	 */
369 	if (first_time) {
370 		mm->free_area_cache = base;
371 		largest_hole = 0;
372 		first_time = 0;
373 		goto try_again;
374 	}
375 	/*
376 	 * A failed mmap() very likely causes application failure,
377 	 * so fall back to the bottom-up function here. This scenario
378 	 * can happen with large stack limits and large mmap()
379 	 * allocations.
380 	 */
381 	mm->free_area_cache = TASK_UNMAPPED_BASE;
382 	mm->cached_hole_size = ~0UL;
383 	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
384 			len, pgoff, flags);
385 
386 	/*
387 	 * Restore the topdown base:
388 	 */
389 	mm->free_area_cache = base;
390 	mm->cached_hole_size = ~0UL;
391 
392 	return addr;
393 }
394 
395 unsigned long
396 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
397 		unsigned long len, unsigned long pgoff, unsigned long flags)
398 {
399 	struct hstate *h = hstate_file(file);
400 	struct mm_struct *mm = current->mm;
401 	struct vm_area_struct *vma;
402 
403 	if (len & ~huge_page_mask(h))
404 		return -EINVAL;
405 	if (len > TASK_SIZE)
406 		return -ENOMEM;
407 
408 	if (flags & MAP_FIXED) {
409 		if (prepare_hugepage_range(file, addr, len))
410 			return -EINVAL;
411 		return addr;
412 	}
413 
414 	if (addr) {
415 		addr = ALIGN(addr, huge_page_size(h));
416 		vma = find_vma(mm, addr);
417 		if (TASK_SIZE - len >= addr &&
418 		    (!vma || addr + len <= vma->vm_start))
419 			return addr;
420 	}
421 	if (mm->get_unmapped_area == arch_get_unmapped_area)
422 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
423 				pgoff, flags);
424 	else
425 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
426 				pgoff, flags);
427 }
428 
429 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
430 
431 #ifdef CONFIG_X86_64
432 static __init int setup_hugepagesz(char *opt)
433 {
434 	unsigned long ps = memparse(opt, &opt);
435 	if (ps == PMD_SIZE) {
436 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
437 	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
438 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
439 	} else {
440 		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
441 			ps >> 20);
442 		return 0;
443 	}
444 	return 1;
445 }
446 __setup("hugepagesz=", setup_hugepagesz);
447 #endif
448