1 /* 2 * PARISC64 Huge TLB page support. 3 * 4 * This parisc implementation is heavily based on the SPARC and x86 code. 5 * 6 * Copyright (C) 2015 Helge Deller <deller@gmx.de> 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 12 #include <linux/hugetlb.h> 13 #include <linux/pagemap.h> 14 #include <linux/sysctl.h> 15 16 #include <asm/mman.h> 17 #include <asm/pgalloc.h> 18 #include <asm/tlb.h> 19 #include <asm/tlbflush.h> 20 #include <asm/cacheflush.h> 21 #include <asm/mmu_context.h> 22 23 24 unsigned long 25 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 26 unsigned long len, unsigned long pgoff, unsigned long flags) 27 { 28 struct hstate *h = hstate_file(file); 29 30 if (len & ~huge_page_mask(h)) 31 return -EINVAL; 32 if (len > TASK_SIZE) 33 return -ENOMEM; 34 35 if (flags & MAP_FIXED) 36 if (prepare_hugepage_range(file, addr, len)) 37 return -EINVAL; 38 39 if (addr) 40 addr = ALIGN(addr, huge_page_size(h)); 41 42 /* we need to make sure the colouring is OK */ 43 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 44 } 45 46 47 pte_t *huge_pte_alloc(struct mm_struct *mm, 48 unsigned long addr, unsigned long sz) 49 { 50 pgd_t *pgd; 51 pud_t *pud; 52 pmd_t *pmd; 53 pte_t *pte = NULL; 54 55 /* We must align the address, because our caller will run 56 * set_huge_pte_at() on whatever we return, which writes out 57 * all of the sub-ptes for the hugepage range. So we have 58 * to give it the first such sub-pte. 59 */ 60 addr &= HPAGE_MASK; 61 62 pgd = pgd_offset(mm, addr); 63 pud = pud_alloc(mm, pgd, addr); 64 if (pud) { 65 pmd = pmd_alloc(mm, pud, addr); 66 if (pmd) 67 pte = pte_alloc_map(mm, pmd, addr); 68 } 69 return pte; 70 } 71 72 pte_t *huge_pte_offset(struct mm_struct *mm, 73 unsigned long addr, unsigned long sz) 74 { 75 pgd_t *pgd; 76 pud_t *pud; 77 pmd_t *pmd; 78 pte_t *pte = NULL; 79 80 addr &= HPAGE_MASK; 81 82 pgd = pgd_offset(mm, addr); 83 if (!pgd_none(*pgd)) { 84 pud = pud_offset(pgd, addr); 85 if (!pud_none(*pud)) { 86 pmd = pmd_offset(pud, addr); 87 if (!pmd_none(*pmd)) 88 pte = pte_offset_map(pmd, addr); 89 } 90 } 91 return pte; 92 } 93 94 /* Purge data and instruction TLB entries. Must be called holding 95 * the pa_tlb_lock. The TLB purge instructions are slow on SMP 96 * machines since the purge must be broadcast to all CPUs. 97 */ 98 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) 99 { 100 int i; 101 102 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate 103 * Linux standard huge pages (e.g. 2 MB) */ 104 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); 105 106 addr &= HPAGE_MASK; 107 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; 108 109 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { 110 purge_tlb_entries(mm, addr); 111 addr += (1UL << REAL_HPAGE_SHIFT); 112 } 113 } 114 115 /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */ 116 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 117 pte_t *ptep, pte_t entry) 118 { 119 unsigned long addr_start; 120 int i; 121 122 addr &= HPAGE_MASK; 123 addr_start = addr; 124 125 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 126 set_pte(ptep, entry); 127 ptep++; 128 129 addr += PAGE_SIZE; 130 pte_val(entry) += PAGE_SIZE; 131 } 132 133 purge_tlb_entries_huge(mm, addr_start); 134 } 135 136 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 137 pte_t *ptep, pte_t entry) 138 { 139 unsigned long flags; 140 141 purge_tlb_start(flags); 142 __set_huge_pte_at(mm, addr, ptep, entry); 143 purge_tlb_end(flags); 144 } 145 146 147 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 148 pte_t *ptep) 149 { 150 unsigned long flags; 151 pte_t entry; 152 153 purge_tlb_start(flags); 154 entry = *ptep; 155 __set_huge_pte_at(mm, addr, ptep, __pte(0)); 156 purge_tlb_end(flags); 157 158 return entry; 159 } 160 161 162 void huge_ptep_set_wrprotect(struct mm_struct *mm, 163 unsigned long addr, pte_t *ptep) 164 { 165 unsigned long flags; 166 pte_t old_pte; 167 168 purge_tlb_start(flags); 169 old_pte = *ptep; 170 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 171 purge_tlb_end(flags); 172 } 173 174 int huge_ptep_set_access_flags(struct vm_area_struct *vma, 175 unsigned long addr, pte_t *ptep, 176 pte_t pte, int dirty) 177 { 178 unsigned long flags; 179 int changed; 180 181 purge_tlb_start(flags); 182 changed = !pte_same(*ptep, pte); 183 if (changed) { 184 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 185 } 186 purge_tlb_end(flags); 187 return changed; 188 } 189 190 191 int pmd_huge(pmd_t pmd) 192 { 193 return 0; 194 } 195 196 int pud_huge(pud_t pud) 197 { 198 return 0; 199 } 200