Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * MMU context allocation for 64-bit kernels.
13 #include <linux/mm.h>
62 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
67 * the array, so that we can test if they're non-zero to decide if we
73 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
74 if (i == 0 || ctx->extended_id[i]) {
76 if (id < 0)
79 ctx->extended_id[i] = id;
84 return ctx->id;
87 for (i--; i >= 0; i--) {
88 if (ctx->extended_id[i])
89 ida_free(&mmu_context_ida, ctx->extended_id[i]);
95 static int hash__init_new_context(struct mm_struct *mm)
99 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
101 if (!mm->context.hash_context)
102 return -ENOMEM;
105 * The old code would re-promote on fork, we don't do that when using
109 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
110 * explicitly against context.id == 0. This ensures that we properly
111 * initialize context slice details for newly allocated mm's (which will
112 * have id == 0) and don't alter context slice inherited via fork (which
113 * will have id != 0).
116 * check against 0 is OK.
118 if (mm->context.id == 0) {
119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
120 slice_init_new_context_exec(mm);
122 /* This is fork. Copy hash_context details from current->mm */
123 memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
126 if (current->mm->context.hash_context->spt) {
127 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
129 if (!mm->context.hash_context->spt) {
130 kfree(mm->context.hash_context);
131 return -ENOMEM;
137 index = realloc_context_ids(&mm->context);
138 if (index < 0) {
140 kfree(mm->context.hash_context->spt);
142 kfree(mm->context.hash_context);
146 pkey_mm_init(mm);
157 static inline int hash__init_new_context(struct mm_struct *mm)
160 return 0;
164 static int radix__init_new_context(struct mm_struct *mm)
169 max_id = (1 << mmu_pid_bits) - 1;
171 if (index < 0)
178 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
189 mm->context.hash_context = NULL;
195 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
200 index = radix__init_new_context(mm);
202 index = hash__init_new_context(mm);
204 if (index < 0)
207 mm->context.id = index;
209 mm->context.pte_frag = NULL;
210 mm->context.pmd_frag = NULL;
212 mm_iommu_init(mm);
214 atomic_set(&mm->context.active_cpus, 0);
215 atomic_set(&mm->context.copros, 0);
217 return 0;
229 ida_free(&mmu_context_ida, ctx->id);
234 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
235 context_id = ctx->extended_id[index];
239 kfree(ctx->hash_context);
255 if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
261 static void destroy_pagetable_cache(struct mm_struct *mm)
265 frag = mm->context.pte_frag;
269 frag = mm->context.pmd_frag;
275 void destroy_context(struct mm_struct *mm)
278 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
294 process_tb[mm->context.id].prtb0 = 0;
296 subpage_prot_free(mm);
297 destroy_contexts(&mm->context);
298 mm->context.id = MMU_NO_CONTEXT;
301 void arch_exit_mmap(struct mm_struct *mm)
303 destroy_pagetable_cache(mm);
310 * and 0 is invalid. So this will do.
314 * entry. See the "fullmm" comments in tlb-radix.c.
320 process_tb[mm->context.id].prtb0 = 0;
327 mtspr(SPRN_PID, next->context.id);
333 * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)