1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PARISC64 Huge TLB page support. 4 * 5 * This parisc implementation is heavily based on the SPARC and x86 code. 6 * 7 * Copyright (C) 2015 Helge Deller <deller@gmx.de> 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include <linux/sched/mm.h> 13 #include <linux/hugetlb.h> 14 #include <linux/pagemap.h> 15 #include <linux/sysctl.h> 16 17 #include <asm/mman.h> 18 #include <asm/pgalloc.h> 19 #include <asm/tlb.h> 20 #include <asm/tlbflush.h> 21 #include <asm/cacheflush.h> 22 #include <asm/mmu_context.h> 23 24 25 unsigned long 26 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 27 unsigned long len, unsigned long pgoff, unsigned long flags) 28 { 29 struct hstate *h = hstate_file(file); 30 31 if (len & ~huge_page_mask(h)) 32 return -EINVAL; 33 if (len > TASK_SIZE) 34 return -ENOMEM; 35 36 if (flags & MAP_FIXED) 37 if (prepare_hugepage_range(file, addr, len)) 38 return -EINVAL; 39 40 if (addr) 41 addr = ALIGN(addr, huge_page_size(h)); 42 43 /* we need to make sure the colouring is OK */ 44 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 45 } 46 47 48 pte_t *huge_pte_alloc(struct mm_struct *mm, 49 unsigned long addr, unsigned long sz) 50 { 51 pgd_t *pgd; 52 p4d_t *p4d; 53 pud_t *pud; 54 pmd_t *pmd; 55 pte_t *pte = NULL; 56 57 /* We must align the address, because our caller will run 58 * set_huge_pte_at() on whatever we return, which writes out 59 * all of the sub-ptes for the hugepage range. So we have 60 * to give it the first such sub-pte. 61 */ 62 addr &= HPAGE_MASK; 63 64 pgd = pgd_offset(mm, addr); 65 p4d = p4d_offset(pgd, addr); 66 pud = pud_alloc(mm, p4d, addr); 67 if (pud) { 68 pmd = pmd_alloc(mm, pud, addr); 69 if (pmd) 70 pte = pte_alloc_map(mm, pmd, addr); 71 } 72 return pte; 73 } 74 75 pte_t *huge_pte_offset(struct mm_struct *mm, 76 unsigned long addr, unsigned long sz) 77 { 78 pgd_t *pgd; 79 p4d_t *p4d; 80 pud_t *pud; 81 pmd_t *pmd; 82 pte_t *pte = NULL; 83 84 addr &= HPAGE_MASK; 85 86 pgd = pgd_offset(mm, addr); 87 if (!pgd_none(*pgd)) { 88 p4d = p4d_offset(pgd, addr); 89 if (!p4d_none(*p4d)) { 90 pud = pud_offset(p4d, addr); 91 if (!pud_none(*pud)) { 92 pmd = pmd_offset(pud, addr); 93 if (!pmd_none(*pmd)) 94 pte = pte_offset_map(pmd, addr); 95 } 96 } 97 } 98 return pte; 99 } 100 101 /* Purge data and instruction TLB entries. Must be called holding 102 * the pa_tlb_lock. The TLB purge instructions are slow on SMP 103 * machines since the purge must be broadcast to all CPUs. 104 */ 105 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) 106 { 107 int i; 108 109 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate 110 * Linux standard huge pages (e.g. 2 MB) */ 111 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); 112 113 addr &= HPAGE_MASK; 114 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; 115 116 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { 117 purge_tlb_entries(mm, addr); 118 addr += (1UL << REAL_HPAGE_SHIFT); 119 } 120 } 121 122 /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */ 123 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 124 pte_t *ptep, pte_t entry) 125 { 126 unsigned long addr_start; 127 int i; 128 129 addr &= HPAGE_MASK; 130 addr_start = addr; 131 132 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 133 set_pte(ptep, entry); 134 ptep++; 135 136 addr += PAGE_SIZE; 137 pte_val(entry) += PAGE_SIZE; 138 } 139 140 purge_tlb_entries_huge(mm, addr_start); 141 } 142 143 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 144 pte_t *ptep, pte_t entry) 145 { 146 unsigned long flags; 147 148 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); 149 __set_huge_pte_at(mm, addr, ptep, entry); 150 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); 151 } 152 153 154 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 155 pte_t *ptep) 156 { 157 unsigned long flags; 158 pte_t entry; 159 160 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); 161 entry = *ptep; 162 __set_huge_pte_at(mm, addr, ptep, __pte(0)); 163 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); 164 165 return entry; 166 } 167 168 169 void huge_ptep_set_wrprotect(struct mm_struct *mm, 170 unsigned long addr, pte_t *ptep) 171 { 172 unsigned long flags; 173 pte_t old_pte; 174 175 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); 176 old_pte = *ptep; 177 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 178 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); 179 } 180 181 int huge_ptep_set_access_flags(struct vm_area_struct *vma, 182 unsigned long addr, pte_t *ptep, 183 pte_t pte, int dirty) 184 { 185 unsigned long flags; 186 int changed; 187 struct mm_struct *mm = vma->vm_mm; 188 189 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); 190 changed = !pte_same(*ptep, pte); 191 if (changed) { 192 __set_huge_pte_at(mm, addr, ptep, pte); 193 } 194 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); 195 return changed; 196 } 197 198 199 int pmd_huge(pmd_t pmd) 200 { 201 return 0; 202 } 203 204 int pud_huge(pud_t pud) 205 { 206 return 0; 207 } 208