1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/mm/proc.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * Copyright (C) 2012 ARM Ltd. 7 * Author: Catalin Marinas <catalin.marinas@arm.com> 8 */ 9 10#include <linux/init.h> 11#include <linux/linkage.h> 12#include <linux/pgtable.h> 13#include <linux/cfi_types.h> 14#include <asm/assembler.h> 15#include <asm/asm-offsets.h> 16#include <asm/asm_pointer_auth.h> 17#include <asm/hwcap.h> 18#include <asm/kernel-pgtable.h> 19#include <asm/pgtable-hwdef.h> 20#include <asm/cpufeature.h> 21#include <asm/alternative.h> 22#include <asm/smp.h> 23#include <asm/sysreg.h> 24 25#ifdef CONFIG_ARM64_64K_PAGES 26#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 27#elif defined(CONFIG_ARM64_16K_PAGES) 28#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 29#else /* CONFIG_ARM64_4K_PAGES */ 30#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 31#endif 32 33#ifdef CONFIG_RANDOMIZE_BASE 34#define TCR_KASLR_FLAGS TCR_NFD1 35#else 36#define TCR_KASLR_FLAGS 0 37#endif 38 39#define TCR_SMP_FLAGS TCR_SHARED 40 41/* PTWs cacheable, inner/outer WBWA */ 42#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 43 44#ifdef CONFIG_KASAN_SW_TAGS 45#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 46#else 47#define TCR_KASAN_SW_FLAGS 0 48#endif 49 50#ifdef CONFIG_KASAN_HW_TAGS 51#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 52#elif defined(CONFIG_ARM64_MTE) 53/* 54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on 55 * TBI being enabled at EL1. 56 */ 57#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 58#else 59#define TCR_MTE_FLAGS 0 60#endif 61 62/* 63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and 64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. 65 */ 66#define MAIR_EL1_SET \ 67 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 68 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 69 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 70 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 71 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) 72 73#ifdef CONFIG_CPU_PM 74/** 75 * cpu_do_suspend - save CPU registers context 76 * 77 * x0: virtual address of context pointer 78 * 79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. 80 */ 81SYM_FUNC_START(cpu_do_suspend) 82 mrs x2, tpidr_el0 83 mrs x3, tpidrro_el0 84 mrs x4, contextidr_el1 85 mrs x5, osdlr_el1 86 mrs x6, cpacr_el1 87 mrs x7, tcr_el1 88 mrs x8, vbar_el1 89 mrs x9, mdscr_el1 90 mrs x10, oslsr_el1 91 mrs x11, sctlr_el1 92 get_this_cpu_offset x12 93 mrs x13, sp_el0 94 stp x2, x3, [x0] 95 stp x4, x5, [x0, #16] 96 stp x6, x7, [x0, #32] 97 stp x8, x9, [x0, #48] 98 stp x10, x11, [x0, #64] 99 stp x12, x13, [x0, #80] 100 /* 101 * Save x18 as it may be used as a platform register, e.g. by shadow 102 * call stack. 103 */ 104 str x18, [x0, #96] 105 ret 106SYM_FUNC_END(cpu_do_suspend) 107 108/** 109 * cpu_do_resume - restore CPU register context 110 * 111 * x0: Address of context pointer 112 */ 113 .pushsection ".idmap.text", "awx" 114SYM_FUNC_START(cpu_do_resume) 115 ldp x2, x3, [x0] 116 ldp x4, x5, [x0, #16] 117 ldp x6, x8, [x0, #32] 118 ldp x9, x10, [x0, #48] 119 ldp x11, x12, [x0, #64] 120 ldp x13, x14, [x0, #80] 121 /* 122 * Restore x18, as it may be used as a platform register, and clear 123 * the buffer to minimize the risk of exposure when used for shadow 124 * call stack. 125 */ 126 ldr x18, [x0, #96] 127 str xzr, [x0, #96] 128 msr tpidr_el0, x2 129 msr tpidrro_el0, x3 130 msr contextidr_el1, x4 131 msr cpacr_el1, x6 132 133 /* Don't change t0sz here, mask those bits when restoring */ 134 mrs x7, tcr_el1 135 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 136 137 msr tcr_el1, x8 138 msr vbar_el1, x9 139 140 /* 141 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking 142 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug 143 * exception. Mask them until local_daif_restore() in cpu_suspend() 144 * resets them. 145 */ 146 disable_daif 147 msr mdscr_el1, x10 148 149 msr sctlr_el1, x12 150 set_this_cpu_offset x13 151 msr sp_el0, x14 152 /* 153 * Restore oslsr_el1 by writing oslar_el1 154 */ 155 msr osdlr_el1, x5 156 ubfx x11, x11, #1, #1 157 msr oslar_el1, x11 158 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 159 reset_amuserenr_el0 x0 // Disable AMU access from EL0 160 161alternative_if ARM64_HAS_RAS_EXTN 162 msr_s SYS_DISR_EL1, xzr 163alternative_else_nop_endif 164 165 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 166 isb 167 ret 168SYM_FUNC_END(cpu_do_resume) 169 .popsection 170#endif 171 172 .pushsection ".idmap.text", "awx" 173 174.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 175 adrp \tmp1, reserved_pg_dir 176 phys_to_ttbr \tmp2, \tmp1 177 offset_ttbr1 \tmp2, \tmp1 178 msr ttbr1_el1, \tmp2 179 isb 180 tlbi vmalle1 181 dsb nsh 182 isb 183.endm 184 185/* 186 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) 187 * 188 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be 189 * called by anything else. It can only be executed from a TTBR0 mapping. 190 */ 191SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) 192 __idmap_cpu_set_reserved_ttbr1 x1, x3 193 194 offset_ttbr1 x0, x3 195 msr ttbr1_el1, x0 196 isb 197 198 ret 199SYM_FUNC_END(idmap_cpu_replace_ttbr1) 200 .popsection 201 202#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 203 204#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) 205 206 .pushsection ".idmap.text", "awx" 207 208 .macro kpti_mk_tbl_ng, type, num_entries 209 add end_\type\()p, cur_\type\()p, #\num_entries * 8 210.Ldo_\type: 211 ldr \type, [cur_\type\()p] // Load the entry 212 tbz \type, #0, .Lnext_\type // Skip invalid and 213 tbnz \type, #11, .Lnext_\type // non-global entries 214 orr \type, \type, #PTE_NG // Same bit for blocks and pages 215 str \type, [cur_\type\()p] // Update the entry 216 .ifnc \type, pte 217 tbnz \type, #1, .Lderef_\type 218 .endif 219.Lnext_\type: 220 add cur_\type\()p, cur_\type\()p, #8 221 cmp cur_\type\()p, end_\type\()p 222 b.ne .Ldo_\type 223 .endm 224 225 /* 226 * Dereference the current table entry and map it into the temporary 227 * fixmap slot associated with the current level. 228 */ 229 .macro kpti_map_pgtbl, type, level 230 str xzr, [temp_pte, #8 * (\level + 1)] // break before make 231 dsb nshst 232 add pte, temp_pte, #PAGE_SIZE * (\level + 1) 233 lsr pte, pte, #12 234 tlbi vaae1, pte 235 dsb nsh 236 isb 237 238 phys_to_pte pte, cur_\type\()p 239 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1) 240 orr pte, pte, pte_flags 241 str pte, [temp_pte, #8 * (\level + 1)] 242 dsb nshst 243 .endm 244 245/* 246 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, 247 * unsigned long temp_pte_va) 248 * 249 * Called exactly once from stop_machine context by each CPU found during boot. 250 */ 251 .pushsection ".data", "aw", %progbits 252SYM_DATA(__idmap_kpti_flag, .long 1) 253 .popsection 254 255SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) 256 cpu .req w0 257 temp_pte .req x0 258 num_cpus .req w1 259 pte_flags .req x1 260 temp_pgd_phys .req x2 261 swapper_ttb .req x3 262 flag_ptr .req x4 263 cur_pgdp .req x5 264 end_pgdp .req x6 265 pgd .req x7 266 cur_pudp .req x8 267 end_pudp .req x9 268 cur_pmdp .req x11 269 end_pmdp .req x12 270 cur_ptep .req x14 271 end_ptep .req x15 272 pte .req x16 273 valid .req x17 274 275 mov x5, x3 // preserve temp_pte arg 276 mrs swapper_ttb, ttbr1_el1 277 adr_l flag_ptr, __idmap_kpti_flag 278 279 cbnz cpu, __idmap_kpti_secondary 280 281 /* We're the boot CPU. Wait for the others to catch up */ 282 sevl 2831: wfe 284 ldaxr w17, [flag_ptr] 285 eor w17, w17, num_cpus 286 cbnz w17, 1b 287 288 /* Switch to the temporary page tables on this CPU only */ 289 __idmap_cpu_set_reserved_ttbr1 x8, x9 290 offset_ttbr1 temp_pgd_phys, x8 291 msr ttbr1_el1, temp_pgd_phys 292 isb 293 294 mov temp_pte, x5 295 mov pte_flags, #KPTI_NG_PTE_FLAGS 296 297 /* Everybody is enjoying the idmap, so we can rewrite swapper. */ 298 /* PGD */ 299 adrp cur_pgdp, swapper_pg_dir 300 kpti_map_pgtbl pgd, 0 301 kpti_mk_tbl_ng pgd, PTRS_PER_PGD 302 303 /* Ensure all the updated entries are visible to secondary CPUs */ 304 dsb ishst 305 306 /* We're done: fire up swapper_pg_dir again */ 307 __idmap_cpu_set_reserved_ttbr1 x8, x9 308 msr ttbr1_el1, swapper_ttb 309 isb 310 311 /* Set the flag to zero to indicate that we're all done */ 312 str wzr, [flag_ptr] 313 ret 314 315.Lderef_pgd: 316 /* PUD */ 317 .if CONFIG_PGTABLE_LEVELS > 3 318 pud .req x10 319 pte_to_phys cur_pudp, pgd 320 kpti_map_pgtbl pud, 1 321 kpti_mk_tbl_ng pud, PTRS_PER_PUD 322 b .Lnext_pgd 323 .else /* CONFIG_PGTABLE_LEVELS <= 3 */ 324 pud .req pgd 325 .set .Lnext_pud, .Lnext_pgd 326 .endif 327 328.Lderef_pud: 329 /* PMD */ 330 .if CONFIG_PGTABLE_LEVELS > 2 331 pmd .req x13 332 pte_to_phys cur_pmdp, pud 333 kpti_map_pgtbl pmd, 2 334 kpti_mk_tbl_ng pmd, PTRS_PER_PMD 335 b .Lnext_pud 336 .else /* CONFIG_PGTABLE_LEVELS <= 2 */ 337 pmd .req pgd 338 .set .Lnext_pmd, .Lnext_pgd 339 .endif 340 341.Lderef_pmd: 342 /* PTE */ 343 pte_to_phys cur_ptep, pmd 344 kpti_map_pgtbl pte, 3 345 kpti_mk_tbl_ng pte, PTRS_PER_PTE 346 b .Lnext_pmd 347 348 .unreq cpu 349 .unreq temp_pte 350 .unreq num_cpus 351 .unreq pte_flags 352 .unreq temp_pgd_phys 353 .unreq cur_pgdp 354 .unreq end_pgdp 355 .unreq pgd 356 .unreq cur_pudp 357 .unreq end_pudp 358 .unreq pud 359 .unreq cur_pmdp 360 .unreq end_pmdp 361 .unreq pmd 362 .unreq cur_ptep 363 .unreq end_ptep 364 .unreq pte 365 .unreq valid 366 367 /* Secondary CPUs end up here */ 368__idmap_kpti_secondary: 369 /* Uninstall swapper before surgery begins */ 370 __idmap_cpu_set_reserved_ttbr1 x16, x17 371 372 /* Increment the flag to let the boot CPU we're ready */ 3731: ldxr w16, [flag_ptr] 374 add w16, w16, #1 375 stxr w17, w16, [flag_ptr] 376 cbnz w17, 1b 377 378 /* Wait for the boot CPU to finish messing around with swapper */ 379 sevl 3801: wfe 381 ldxr w16, [flag_ptr] 382 cbnz w16, 1b 383 384 /* All done, act like nothing happened */ 385 msr ttbr1_el1, swapper_ttb 386 isb 387 ret 388 389 .unreq swapper_ttb 390 .unreq flag_ptr 391SYM_FUNC_END(idmap_kpti_install_ng_mappings) 392 .popsection 393#endif 394 395/* 396 * __cpu_setup 397 * 398 * Initialise the processor for turning the MMU on. 399 * 400 * Input: 401 * x0 - actual number of VA bits (ignored unless VA_BITS > 48) 402 * Output: 403 * Return in x0 the value of the SCTLR_EL1 register. 404 */ 405 .pushsection ".idmap.text", "awx" 406SYM_FUNC_START(__cpu_setup) 407 tlbi vmalle1 // Invalidate local TLB 408 dsb nsh 409 410 mov x1, #3 << 20 411 msr cpacr_el1, x1 // Enable FP/ASIMD 412 mov x1, #1 << 12 // Reset mdscr_el1 and disable 413 msr mdscr_el1, x1 // access to the DCC from EL0 414 isb // Unmask debug exceptions now, 415 enable_dbg // since this is per-cpu 416 reset_pmuserenr_el0 x1 // Disable PMU access from EL0 417 reset_amuserenr_el0 x1 // Disable AMU access from EL0 418 419 /* 420 * Default values for VMSA control registers. These will be adjusted 421 * below depending on detected CPU features. 422 */ 423 mair .req x17 424 tcr .req x16 425 mov_q mair, MAIR_EL1_SET 426 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 427 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 428 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS 429 430 tcr_clear_errata_bits tcr, x9, x5 431 432#ifdef CONFIG_ARM64_VA_BITS_52 433 sub x9, xzr, x0 434 add x9, x9, #64 435 tcr_set_t1sz tcr, x9 436#else 437 idmap_get_t0sz x9 438#endif 439 tcr_set_t0sz tcr, x9 440 441 /* 442 * Set the IPS bits in TCR_EL1. 443 */ 444 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 445#ifdef CONFIG_ARM64_HW_AFDBM 446 /* 447 * Enable hardware update of the Access Flags bit. 448 * Hardware dirty bit management is enabled later, 449 * via capabilities. 450 */ 451 mrs x9, ID_AA64MMFR1_EL1 452 and x9, x9, #0xf 453 cbz x9, 1f 454 orr tcr, tcr, #TCR_HA // hardware Access flag update 4551: 456#endif /* CONFIG_ARM64_HW_AFDBM */ 457 msr mair_el1, mair 458 msr tcr_el1, tcr 459 /* 460 * Prepare SCTLR 461 */ 462 mov_q x0, INIT_SCTLR_EL1_MMU_ON 463 ret // return to head.S 464 465 .unreq mair 466 .unreq tcr 467SYM_FUNC_END(__cpu_setup) 468