xref: /linux/arch/powerpc/mm/book3s64/radix_hugetlbpage.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/hugetlb.h>
4 #include <linux/security.h>
5 #include <asm/pgalloc.h>
6 #include <asm/cacheflush.h>
7 #include <asm/machdep.h>
8 #include <asm/mman.h>
9 #include <asm/tlb.h>
10 
11 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
12 {
13 	int psize;
14 	struct hstate *hstate = hstate_file(vma->vm_file);
15 
16 	psize = hstate_get_psize(hstate);
17 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
18 }
19 
20 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
21 {
22 	int psize;
23 	struct hstate *hstate = hstate_file(vma->vm_file);
24 
25 	psize = hstate_get_psize(hstate);
26 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
27 }
28 
29 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
30 				   unsigned long end)
31 {
32 	int psize;
33 	struct hstate *hstate = hstate_file(vma->vm_file);
34 
35 	psize = hstate_get_psize(hstate);
36 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
37 }
38 
39 /*
40  * A vairant of hugetlb_get_unmapped_area doing topdown search
41  * FIXME!! should we do as x86 does or non hugetlb area does ?
42  * ie, use topdown or not based on mmap_is_legacy check ?
43  */
44 unsigned long
45 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
46 				unsigned long len, unsigned long pgoff,
47 				unsigned long flags)
48 {
49 	struct mm_struct *mm = current->mm;
50 	struct vm_area_struct *vma;
51 	struct hstate *h = hstate_file(file);
52 	int fixed = (flags & MAP_FIXED);
53 	unsigned long high_limit;
54 	struct vm_unmapped_area_info info;
55 
56 	high_limit = DEFAULT_MAP_WINDOW;
57 	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
58 		high_limit = TASK_SIZE;
59 
60 	if (len & ~huge_page_mask(h))
61 		return -EINVAL;
62 	if (len > high_limit)
63 		return -ENOMEM;
64 
65 	if (fixed) {
66 		if (addr > high_limit - len)
67 			return -ENOMEM;
68 		if (prepare_hugepage_range(file, addr, len))
69 			return -EINVAL;
70 		return addr;
71 	}
72 
73 	if (addr) {
74 		addr = ALIGN(addr, huge_page_size(h));
75 		vma = find_vma(mm, addr);
76 		if (high_limit - len >= addr && addr >= mmap_min_addr &&
77 		    (!vma || addr + len <= vm_start_gap(vma)))
78 			return addr;
79 	}
80 	/*
81 	 * We are always doing an topdown search here. Slice code
82 	 * does that too.
83 	 */
84 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
85 	info.length = len;
86 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
87 	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
88 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
89 	info.align_offset = 0;
90 
91 	return vm_unmapped_area(&info);
92 }
93 
94 void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
95 					 unsigned long addr, pte_t *ptep,
96 					 pte_t old_pte, pte_t pte)
97 {
98 	struct mm_struct *mm = vma->vm_mm;
99 
100 	/*
101 	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
102 	 * we set the new value.
103 	 */
104 	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
105 	    (atomic_read(&mm->context.copros) > 0))
106 		radix__flush_hugetlb_page(vma, addr);
107 
108 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
109 }
110