1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PPC Huge TLB Page Support for Book3E MMU 4 * 5 * Copyright (C) 2009 David Gibson, IBM Corporation. 6 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor 7 * 8 */ 9 #include <linux/mm.h> 10 #include <linux/hugetlb.h> 11 12 #include <asm/mmu.h> 13 14 #ifdef CONFIG_PPC64 15 #include <asm/paca.h> 16 17 static inline int tlb1_next(void) 18 { 19 struct paca_struct *paca = get_paca(); 20 struct tlb_core_data *tcd; 21 int this, next; 22 23 tcd = paca->tcd_ptr; 24 this = tcd->esel_next; 25 26 next = this + 1; 27 if (next >= tcd->esel_max) 28 next = tcd->esel_first; 29 30 tcd->esel_next = next; 31 return this; 32 } 33 34 static inline void book3e_tlb_lock(void) 35 { 36 struct paca_struct *paca = get_paca(); 37 unsigned long tmp; 38 int token = smp_processor_id() + 1; 39 40 /* 41 * Besides being unnecessary in the absence of SMT, this 42 * check prevents trying to do lbarx/stbcx. on e5500 which 43 * doesn't implement either feature. 44 */ 45 if (!cpu_has_feature(CPU_FTR_SMT)) 46 return; 47 48 asm volatile(".machine push;" 49 ".machine e6500;" 50 "1: lbarx %0, 0, %1;" 51 "cmpwi %0, 0;" 52 "bne 2f;" 53 "stbcx. %2, 0, %1;" 54 "bne 1b;" 55 "b 3f;" 56 "2: lbzx %0, 0, %1;" 57 "cmpwi %0, 0;" 58 "bne 2b;" 59 "b 1b;" 60 "3:" 61 ".machine pop;" 62 : "=&r" (tmp) 63 : "r" (&paca->tcd_ptr->lock), "r" (token) 64 : "memory"); 65 } 66 67 static inline void book3e_tlb_unlock(void) 68 { 69 struct paca_struct *paca = get_paca(); 70 71 if (!cpu_has_feature(CPU_FTR_SMT)) 72 return; 73 74 isync(); 75 paca->tcd_ptr->lock = 0; 76 } 77 #else 78 static inline int tlb1_next(void) 79 { 80 int index, ncams; 81 82 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 83 84 index = this_cpu_read(next_tlbcam_idx); 85 86 /* Just round-robin the entries and wrap when we hit the end */ 87 if (unlikely(index == ncams - 1)) 88 __this_cpu_write(next_tlbcam_idx, tlbcam_index); 89 else 90 __this_cpu_inc(next_tlbcam_idx); 91 92 return index; 93 } 94 95 static inline void book3e_tlb_lock(void) 96 { 97 } 98 99 static inline void book3e_tlb_unlock(void) 100 { 101 } 102 #endif 103 104 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) 105 { 106 int found = 0; 107 108 mtspr(SPRN_MAS6, pid << 16); 109 asm volatile( 110 "tlbsx 0,%1\n" 111 "mfspr %0,0x271\n" 112 "srwi %0,%0,31\n" 113 : "=&r"(found) : "r"(ea)); 114 115 return found; 116 } 117 118 static void 119 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) 120 { 121 unsigned long mas1, mas2; 122 u64 mas7_3; 123 unsigned long psize, tsize, shift; 124 unsigned long flags; 125 struct mm_struct *mm; 126 int index; 127 128 if (unlikely(is_kernel_addr(ea))) 129 return; 130 131 mm = vma->vm_mm; 132 133 psize = vma_mmu_pagesize(vma); 134 shift = __ilog2(psize); 135 tsize = shift - 10; 136 /* 137 * We can't be interrupted while we're setting up the MAS 138 * registers or after we've confirmed that no tlb exists. 139 */ 140 local_irq_save(flags); 141 142 book3e_tlb_lock(); 143 144 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { 145 book3e_tlb_unlock(); 146 local_irq_restore(flags); 147 return; 148 } 149 150 /* We have to use the CAM(TLB1) on FSL parts for hugepages */ 151 index = tlb1_next(); 152 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); 153 154 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); 155 mas2 = ea & ~((1UL << shift) - 1); 156 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; 157 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; 158 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; 159 if (!pte_dirty(pte)) 160 mas7_3 &= ~(MAS3_SW|MAS3_UW); 161 162 mtspr(SPRN_MAS1, mas1); 163 mtspr(SPRN_MAS2, mas2); 164 165 if (mmu_has_feature(MMU_FTR_BIG_PHYS)) 166 mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 167 mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); 168 169 asm volatile ("tlbwe"); 170 171 book3e_tlb_unlock(); 172 local_irq_restore(flags); 173 } 174 175 /* 176 * This is called at the end of handling a user page fault, when the 177 * fault has been handled by updating a PTE in the linux page tables. 178 * 179 * This must always be called with the pte lock held. 180 */ 181 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 182 { 183 if (is_vm_hugetlb_page(vma)) 184 book3e_hugetlb_preload(vma, address, *ptep); 185 } 186 187 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 188 { 189 struct hstate *hstate = hstate_file(vma->vm_file); 190 unsigned long tsize = huge_page_shift(hstate) - 10; 191 192 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); 193 } 194