xref: /linux/arch/sparc/mm/hugetlbpage.c (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SPARC64 Huge TLB page support.
4  *
5  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14 
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 /* Slightly simplified from the non-hugepage variant because by
23  * definition we don't have to worry about any page coloring stuff
24  */
25 
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 							unsigned long addr,
28 							unsigned long len,
29 							unsigned long pgoff,
30 							unsigned long flags)
31 {
32 	struct hstate *h = hstate_file(filp);
33 	unsigned long task_size = TASK_SIZE;
34 	struct vm_unmapped_area_info info = {};
35 
36 	if (test_thread_flag(TIF_32BIT))
37 		task_size = STACK_TOP32;
38 
39 	info.length = len;
40 	info.low_limit = TASK_UNMAPPED_BASE;
41 	info.high_limit = min(task_size, VA_EXCLUDE_START);
42 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
43 	addr = vm_unmapped_area(&info);
44 
45 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
46 		VM_BUG_ON(addr != -ENOMEM);
47 		info.low_limit = VA_EXCLUDE_END;
48 		info.high_limit = task_size;
49 		addr = vm_unmapped_area(&info);
50 	}
51 
52 	return addr;
53 }
54 
55 static unsigned long
56 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57 				  const unsigned long len,
58 				  const unsigned long pgoff,
59 				  const unsigned long flags)
60 {
61 	struct hstate *h = hstate_file(filp);
62 	struct mm_struct *mm = current->mm;
63 	unsigned long addr = addr0;
64 	struct vm_unmapped_area_info info = {};
65 
66 	/* This should only ever run for 32-bit processes.  */
67 	BUG_ON(!test_thread_flag(TIF_32BIT));
68 
69 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
70 	info.length = len;
71 	info.low_limit = PAGE_SIZE;
72 	info.high_limit = mm->mmap_base;
73 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
74 	addr = vm_unmapped_area(&info);
75 
76 	/*
77 	 * A failed mmap() very likely causes application failure,
78 	 * so fall back to the bottom-up function here. This scenario
79 	 * can happen with large stack limits and large mmap()
80 	 * allocations.
81 	 */
82 	if (addr & ~PAGE_MASK) {
83 		VM_BUG_ON(addr != -ENOMEM);
84 		info.flags = 0;
85 		info.low_limit = TASK_UNMAPPED_BASE;
86 		info.high_limit = STACK_TOP32;
87 		addr = vm_unmapped_area(&info);
88 	}
89 
90 	return addr;
91 }
92 
93 unsigned long
94 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
95 		unsigned long len, unsigned long pgoff, unsigned long flags)
96 {
97 	struct hstate *h = hstate_file(file);
98 	struct mm_struct *mm = current->mm;
99 	struct vm_area_struct *vma;
100 	unsigned long task_size = TASK_SIZE;
101 
102 	if (test_thread_flag(TIF_32BIT))
103 		task_size = STACK_TOP32;
104 
105 	if (len & ~huge_page_mask(h))
106 		return -EINVAL;
107 	if (len > task_size)
108 		return -ENOMEM;
109 
110 	if (flags & MAP_FIXED) {
111 		if (prepare_hugepage_range(file, addr, len))
112 			return -EINVAL;
113 		return addr;
114 	}
115 
116 	if (addr) {
117 		addr = ALIGN(addr, huge_page_size(h));
118 		vma = find_vma(mm, addr);
119 		if (task_size - len >= addr &&
120 		    (!vma || addr + len <= vm_start_gap(vma)))
121 			return addr;
122 	}
123 	if (!test_bit(MMF_TOPDOWN, &mm->flags))
124 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
125 				pgoff, flags);
126 	else
127 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
128 				pgoff, flags);
129 }
130 
131 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
132 {
133 	return entry;
134 }
135 
136 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
137 {
138 	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
139 
140 	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
141 
142 	switch (shift) {
143 	case HPAGE_16GB_SHIFT:
144 		hugepage_size = _PAGE_SZ16GB_4V;
145 		pte_val(entry) |= _PAGE_PUD_HUGE;
146 		break;
147 	case HPAGE_2GB_SHIFT:
148 		hugepage_size = _PAGE_SZ2GB_4V;
149 		pte_val(entry) |= _PAGE_PMD_HUGE;
150 		break;
151 	case HPAGE_256MB_SHIFT:
152 		hugepage_size = _PAGE_SZ256MB_4V;
153 		pte_val(entry) |= _PAGE_PMD_HUGE;
154 		break;
155 	case HPAGE_SHIFT:
156 		pte_val(entry) |= _PAGE_PMD_HUGE;
157 		break;
158 	case HPAGE_64K_SHIFT:
159 		hugepage_size = _PAGE_SZ64K_4V;
160 		break;
161 	default:
162 		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
163 	}
164 
165 	pte_val(entry) = pte_val(entry) | hugepage_size;
166 	return entry;
167 }
168 
169 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
170 {
171 	if (tlb_type == hypervisor)
172 		return sun4v_hugepage_shift_to_tte(entry, shift);
173 	else
174 		return sun4u_hugepage_shift_to_tte(entry, shift);
175 }
176 
177 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
178 {
179 	pte_t pte;
180 
181 	entry = pte_mkhuge(entry);
182 	pte = hugepage_shift_to_tte(entry, shift);
183 
184 #ifdef CONFIG_SPARC64
185 	/* If this vma has ADI enabled on it, turn on TTE.mcd
186 	 */
187 	if (flags & VM_SPARC_ADI)
188 		return pte_mkmcd(pte);
189 	else
190 		return pte_mknotmcd(pte);
191 #else
192 	return pte;
193 #endif
194 }
195 
196 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
197 {
198 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
199 	unsigned int shift;
200 
201 	switch (tte_szbits) {
202 	case _PAGE_SZ16GB_4V:
203 		shift = HPAGE_16GB_SHIFT;
204 		break;
205 	case _PAGE_SZ2GB_4V:
206 		shift = HPAGE_2GB_SHIFT;
207 		break;
208 	case _PAGE_SZ256MB_4V:
209 		shift = HPAGE_256MB_SHIFT;
210 		break;
211 	case _PAGE_SZ4MB_4V:
212 		shift = REAL_HPAGE_SHIFT;
213 		break;
214 	case _PAGE_SZ64K_4V:
215 		shift = HPAGE_64K_SHIFT;
216 		break;
217 	default:
218 		shift = PAGE_SHIFT;
219 		break;
220 	}
221 	return shift;
222 }
223 
224 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
225 {
226 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
227 	unsigned int shift;
228 
229 	switch (tte_szbits) {
230 	case _PAGE_SZ256MB_4U:
231 		shift = HPAGE_256MB_SHIFT;
232 		break;
233 	case _PAGE_SZ4MB_4U:
234 		shift = REAL_HPAGE_SHIFT;
235 		break;
236 	case _PAGE_SZ64K_4U:
237 		shift = HPAGE_64K_SHIFT;
238 		break;
239 	default:
240 		shift = PAGE_SHIFT;
241 		break;
242 	}
243 	return shift;
244 }
245 
246 static unsigned long tte_to_shift(pte_t entry)
247 {
248 	if (tlb_type == hypervisor)
249 		return sun4v_huge_tte_to_shift(entry);
250 
251 	return sun4u_huge_tte_to_shift(entry);
252 }
253 
254 static unsigned int huge_tte_to_shift(pte_t entry)
255 {
256 	unsigned long shift = tte_to_shift(entry);
257 
258 	if (shift == PAGE_SHIFT)
259 		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
260 			  pte_val(entry));
261 
262 	return shift;
263 }
264 
265 static unsigned long huge_tte_to_size(pte_t pte)
266 {
267 	unsigned long size = 1UL << huge_tte_to_shift(pte);
268 
269 	if (size == REAL_HPAGE_SIZE)
270 		size = HPAGE_SIZE;
271 	return size;
272 }
273 
274 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
275 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
276 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
277 
278 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
279 			unsigned long addr, unsigned long sz)
280 {
281 	pgd_t *pgd;
282 	p4d_t *p4d;
283 	pud_t *pud;
284 	pmd_t *pmd;
285 
286 	pgd = pgd_offset(mm, addr);
287 	p4d = p4d_offset(pgd, addr);
288 	pud = pud_alloc(mm, p4d, addr);
289 	if (!pud)
290 		return NULL;
291 	if (sz >= PUD_SIZE)
292 		return (pte_t *)pud;
293 	pmd = pmd_alloc(mm, pud, addr);
294 	if (!pmd)
295 		return NULL;
296 	if (sz >= PMD_SIZE)
297 		return (pte_t *)pmd;
298 	return pte_alloc_huge(mm, pmd, addr);
299 }
300 
301 pte_t *huge_pte_offset(struct mm_struct *mm,
302 		       unsigned long addr, unsigned long sz)
303 {
304 	pgd_t *pgd;
305 	p4d_t *p4d;
306 	pud_t *pud;
307 	pmd_t *pmd;
308 
309 	pgd = pgd_offset(mm, addr);
310 	if (pgd_none(*pgd))
311 		return NULL;
312 	p4d = p4d_offset(pgd, addr);
313 	if (p4d_none(*p4d))
314 		return NULL;
315 	pud = pud_offset(p4d, addr);
316 	if (pud_none(*pud))
317 		return NULL;
318 	if (is_hugetlb_pud(*pud))
319 		return (pte_t *)pud;
320 	pmd = pmd_offset(pud, addr);
321 	if (pmd_none(*pmd))
322 		return NULL;
323 	if (is_hugetlb_pmd(*pmd))
324 		return (pte_t *)pmd;
325 	return pte_offset_huge(pmd, addr);
326 }
327 
328 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
329 		     pte_t *ptep, pte_t entry)
330 {
331 	unsigned int nptes, orig_shift, shift;
332 	unsigned long i, size;
333 	pte_t orig;
334 
335 	size = huge_tte_to_size(entry);
336 
337 	shift = PAGE_SHIFT;
338 	if (size >= PUD_SIZE)
339 		shift = PUD_SHIFT;
340 	else if (size >= PMD_SIZE)
341 		shift = PMD_SHIFT;
342 	else
343 		shift = PAGE_SHIFT;
344 
345 	nptes = size >> shift;
346 
347 	if (!pte_present(*ptep) && pte_present(entry))
348 		mm->context.hugetlb_pte_count += nptes;
349 
350 	addr &= ~(size - 1);
351 	orig = *ptep;
352 	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
353 
354 	for (i = 0; i < nptes; i++)
355 		ptep[i] = __pte(pte_val(entry) + (i << shift));
356 
357 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
358 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
359 	if (size == HPAGE_SIZE)
360 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
361 				    orig_shift);
362 }
363 
364 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
365 		     pte_t *ptep, pte_t entry, unsigned long sz)
366 {
367 	__set_huge_pte_at(mm, addr, ptep, entry);
368 }
369 
370 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
371 			      pte_t *ptep)
372 {
373 	unsigned int i, nptes, orig_shift, shift;
374 	unsigned long size;
375 	pte_t entry;
376 
377 	entry = *ptep;
378 	size = huge_tte_to_size(entry);
379 
380 	shift = PAGE_SHIFT;
381 	if (size >= PUD_SIZE)
382 		shift = PUD_SHIFT;
383 	else if (size >= PMD_SIZE)
384 		shift = PMD_SHIFT;
385 	else
386 		shift = PAGE_SHIFT;
387 
388 	nptes = size >> shift;
389 	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
390 
391 	if (pte_present(entry))
392 		mm->context.hugetlb_pte_count -= nptes;
393 
394 	addr &= ~(size - 1);
395 	for (i = 0; i < nptes; i++)
396 		ptep[i] = __pte(0UL);
397 
398 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
399 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
400 	if (size == HPAGE_SIZE)
401 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
402 				    orig_shift);
403 
404 	return entry;
405 }
406 
407 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
408 			   unsigned long addr)
409 {
410 	pgtable_t token = pmd_pgtable(*pmd);
411 
412 	pmd_clear(pmd);
413 	pte_free_tlb(tlb, token, addr);
414 	mm_dec_nr_ptes(tlb->mm);
415 }
416 
417 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
418 				   unsigned long addr, unsigned long end,
419 				   unsigned long floor, unsigned long ceiling)
420 {
421 	pmd_t *pmd;
422 	unsigned long next;
423 	unsigned long start;
424 
425 	start = addr;
426 	pmd = pmd_offset(pud, addr);
427 	do {
428 		next = pmd_addr_end(addr, end);
429 		if (pmd_none(*pmd))
430 			continue;
431 		if (is_hugetlb_pmd(*pmd))
432 			pmd_clear(pmd);
433 		else
434 			hugetlb_free_pte_range(tlb, pmd, addr);
435 	} while (pmd++, addr = next, addr != end);
436 
437 	start &= PUD_MASK;
438 	if (start < floor)
439 		return;
440 	if (ceiling) {
441 		ceiling &= PUD_MASK;
442 		if (!ceiling)
443 			return;
444 	}
445 	if (end - 1 > ceiling - 1)
446 		return;
447 
448 	pmd = pmd_offset(pud, start);
449 	pud_clear(pud);
450 	pmd_free_tlb(tlb, pmd, start);
451 	mm_dec_nr_pmds(tlb->mm);
452 }
453 
454 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
455 				   unsigned long addr, unsigned long end,
456 				   unsigned long floor, unsigned long ceiling)
457 {
458 	pud_t *pud;
459 	unsigned long next;
460 	unsigned long start;
461 
462 	start = addr;
463 	pud = pud_offset(p4d, addr);
464 	do {
465 		next = pud_addr_end(addr, end);
466 		if (pud_none_or_clear_bad(pud))
467 			continue;
468 		if (is_hugetlb_pud(*pud))
469 			pud_clear(pud);
470 		else
471 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
472 					       ceiling);
473 	} while (pud++, addr = next, addr != end);
474 
475 	start &= PGDIR_MASK;
476 	if (start < floor)
477 		return;
478 	if (ceiling) {
479 		ceiling &= PGDIR_MASK;
480 		if (!ceiling)
481 			return;
482 	}
483 	if (end - 1 > ceiling - 1)
484 		return;
485 
486 	pud = pud_offset(p4d, start);
487 	p4d_clear(p4d);
488 	pud_free_tlb(tlb, pud, start);
489 	mm_dec_nr_puds(tlb->mm);
490 }
491 
492 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
493 			    unsigned long addr, unsigned long end,
494 			    unsigned long floor, unsigned long ceiling)
495 {
496 	pgd_t *pgd;
497 	p4d_t *p4d;
498 	unsigned long next;
499 
500 	addr &= PMD_MASK;
501 	if (addr < floor) {
502 		addr += PMD_SIZE;
503 		if (!addr)
504 			return;
505 	}
506 	if (ceiling) {
507 		ceiling &= PMD_MASK;
508 		if (!ceiling)
509 			return;
510 	}
511 	if (end - 1 > ceiling - 1)
512 		end -= PMD_SIZE;
513 	if (addr > end - 1)
514 		return;
515 
516 	pgd = pgd_offset(tlb->mm, addr);
517 	p4d = p4d_offset(pgd, addr);
518 	do {
519 		next = p4d_addr_end(addr, end);
520 		if (p4d_none_or_clear_bad(p4d))
521 			continue;
522 		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
523 	} while (p4d++, addr = next, addr != end);
524 }
525