Lines Matching +full:zero +full:- +full:initialised
1 /* SPDX-License-Identifier: GPL-2.0-only */
24 #include <asm/proc-fns.h>
102 struct mm_struct *mm = current->active_mm; in cpu_uninstall_idmap()
109 cpu_switch_mm(mm->pgd, mm); in cpu_uninstall_idmap()
126 * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
127 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
128 * services), while for a userspace-driven test_resume cycle it points to
129 * userspace page tables (and we must point it at a zero page ourselves).
140 /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */ in cpu_install_ttbr0()
177 atomic64_set(&mm->context.id, 0); in init_new_context()
178 refcount_set(&mm->context.pinned, 0); in init_new_context()
181 mm->context.pkey_allocation_map = BIT(0); in init_new_context()
190 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; in arch_dup_pkeys()
221 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; in update_saved_ttbr0()
223 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); in update_saved_ttbr0()
238 * zero page. in enter_lazy_tlb()
265 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous in switch_mm()
266 * value may have not been initialised yet (activate_mm caller) or the in switch_mm()
295 return -1UL >> 8; in mm_untag_mask()
323 #include <asm-generic/mmu_context.h>