1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * MMU context allocation for 64-bit kernels. 4 * 5 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> 6 */ 7 8 #include <linux/sched.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/pkeys.h> 15 #include <linux/spinlock.h> 16 #include <linux/idr.h> 17 #include <linux/export.h> 18 #include <linux/gfp.h> 19 #include <linux/slab.h> 20 21 #include <asm/mmu_context.h> 22 #include <asm/pgalloc.h> 23 24 static DEFINE_IDA(mmu_context_ida); 25 26 static int alloc_context_id(int min_id, int max_id) 27 { 28 return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); 29 } 30 31 void hash__reserve_context_id(int id) 32 { 33 int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); 34 35 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); 36 } 37 38 int hash__alloc_context_id(void) 39 { 40 unsigned long max; 41 42 if (mmu_has_feature(MMU_FTR_68_BIT_VA)) 43 max = MAX_USER_CONTEXT; 44 else 45 max = MAX_USER_CONTEXT_65BIT_VA; 46 47 return alloc_context_id(MIN_USER_CONTEXT, max); 48 } 49 EXPORT_SYMBOL_GPL(hash__alloc_context_id); 50 51 void slb_setup_new_exec(void); 52 53 static int hash__init_new_context(struct mm_struct *mm) 54 { 55 int index; 56 57 index = hash__alloc_context_id(); 58 if (index < 0) 59 return index; 60 61 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), 62 GFP_KERNEL); 63 if (!mm->context.hash_context) { 64 ida_free(&mmu_context_ida, index); 65 return -ENOMEM; 66 } 67 68 /* 69 * The old code would re-promote on fork, we don't do that when using 70 * slices as it could cause problem promoting slices that have been 71 * forced down to 4K. 72 * 73 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check 74 * explicitly against context.id == 0. This ensures that we properly 75 * initialize context slice details for newly allocated mm's (which will 76 * have id == 0) and don't alter context slice inherited via fork (which 77 * will have id != 0). 78 * 79 * We should not be calling init_new_context() on init_mm. Hence a 80 * check against 0 is OK. 81 */ 82 if (mm->context.id == 0) { 83 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); 84 slice_init_new_context_exec(mm); 85 } else { 86 /* This is fork. Copy hash_context details from current->mm */ 87 memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); 88 #ifdef CONFIG_PPC_SUBPAGE_PROT 89 /* inherit subpage prot detalis if we have one. */ 90 if (current->mm->context.hash_context->spt) { 91 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), 92 GFP_KERNEL); 93 if (!mm->context.hash_context->spt) { 94 ida_free(&mmu_context_ida, index); 95 kfree(mm->context.hash_context); 96 return -ENOMEM; 97 } 98 } 99 #endif 100 101 } 102 103 pkey_mm_init(mm); 104 return index; 105 } 106 107 void hash__setup_new_exec(void) 108 { 109 slice_setup_new_exec(); 110 111 slb_setup_new_exec(); 112 } 113 114 static int radix__init_new_context(struct mm_struct *mm) 115 { 116 unsigned long rts_field; 117 int index, max_id; 118 119 max_id = (1 << mmu_pid_bits) - 1; 120 index = alloc_context_id(mmu_base_pid, max_id); 121 if (index < 0) 122 return index; 123 124 /* 125 * set the process table entry, 126 */ 127 rts_field = radix__get_tree_size(); 128 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); 129 130 /* 131 * Order the above store with subsequent update of the PID 132 * register (at which point HW can start loading/caching 133 * the entry) and the corresponding load by the MMU from 134 * the L2 cache. 135 */ 136 asm volatile("ptesync;isync" : : : "memory"); 137 138 mm->context.npu_context = NULL; 139 mm->context.hash_context = NULL; 140 141 return index; 142 } 143 144 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 145 { 146 int index; 147 148 if (radix_enabled()) 149 index = radix__init_new_context(mm); 150 else 151 index = hash__init_new_context(mm); 152 153 if (index < 0) 154 return index; 155 156 mm->context.id = index; 157 158 mm->context.pte_frag = NULL; 159 mm->context.pmd_frag = NULL; 160 #ifdef CONFIG_SPAPR_TCE_IOMMU 161 mm_iommu_init(mm); 162 #endif 163 atomic_set(&mm->context.active_cpus, 0); 164 atomic_set(&mm->context.copros, 0); 165 166 return 0; 167 } 168 169 void __destroy_context(int context_id) 170 { 171 ida_free(&mmu_context_ida, context_id); 172 } 173 EXPORT_SYMBOL_GPL(__destroy_context); 174 175 static void destroy_contexts(mm_context_t *ctx) 176 { 177 int index, context_id; 178 179 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { 180 context_id = ctx->extended_id[index]; 181 if (context_id) 182 ida_free(&mmu_context_ida, context_id); 183 } 184 kfree(ctx->hash_context); 185 } 186 187 static void pmd_frag_destroy(void *pmd_frag) 188 { 189 int count; 190 struct page *page; 191 192 page = virt_to_page(pmd_frag); 193 /* drop all the pending references */ 194 count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; 195 /* We allow PTE_FRAG_NR fragments from a PTE page */ 196 if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) { 197 pgtable_pmd_page_dtor(page); 198 __free_page(page); 199 } 200 } 201 202 static void destroy_pagetable_cache(struct mm_struct *mm) 203 { 204 void *frag; 205 206 frag = mm->context.pte_frag; 207 if (frag) 208 pte_frag_destroy(frag); 209 210 frag = mm->context.pmd_frag; 211 if (frag) 212 pmd_frag_destroy(frag); 213 return; 214 } 215 216 void destroy_context(struct mm_struct *mm) 217 { 218 #ifdef CONFIG_SPAPR_TCE_IOMMU 219 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); 220 #endif 221 if (radix_enabled()) 222 WARN_ON(process_tb[mm->context.id].prtb0 != 0); 223 else 224 subpage_prot_free(mm); 225 destroy_contexts(&mm->context); 226 mm->context.id = MMU_NO_CONTEXT; 227 } 228 229 void arch_exit_mmap(struct mm_struct *mm) 230 { 231 destroy_pagetable_cache(mm); 232 233 if (radix_enabled()) { 234 /* 235 * Radix doesn't have a valid bit in the process table 236 * entries. However we know that at least P9 implementation 237 * will avoid caching an entry with an invalid RTS field, 238 * and 0 is invalid. So this will do. 239 * 240 * This runs before the "fullmm" tlb flush in exit_mmap, 241 * which does a RIC=2 tlbie to clear the process table 242 * entry. See the "fullmm" comments in tlb-radix.c. 243 * 244 * No barrier required here after the store because 245 * this process will do the invalidate, which starts with 246 * ptesync. 247 */ 248 process_tb[mm->context.id].prtb0 = 0; 249 } 250 } 251 252 #ifdef CONFIG_PPC_RADIX_MMU 253 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) 254 { 255 mtspr(SPRN_PID, next->context.id); 256 isync(); 257 } 258 #endif 259