1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_INIT_H__ 8 #define __ARM_KVM_INIT_H__ 9 10 #ifndef __ASSEMBLY__ 11 #error Assembly-only header 12 #endif 13 14 #include <asm/kvm_arm.h> 15 #include <asm/ptrace.h> 16 #include <asm/sysreg.h> 17 #include <linux/irqchip/arm-gic-v3.h> 18 19 .macro init_el2_hcr val 20 mov_q x0, \val 21 22 /* 23 * Compliant CPUs advertise their VHE-onlyness with 24 * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it 25 * can reset into an UNKNOWN state and might not read as 1 until it has 26 * been initialized explicitly. 27 * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H 28 * indicating whether the CPU is running in E2H mode. 29 */ 30 mrs_s x1, SYS_ID_AA64MMFR4_EL1 31 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH 32 cmp x1, #0 33 b.lt .LnE2H0_\@ 34 35 /* 36 * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised 37 * as such via ID_AA64MMFR4_EL1.E2H0: 38 * 39 * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to 40 * have HCR_EL2.E2H implemented as RAO/WI. 41 * 42 * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest 43 * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV 44 * guests on these hosts can write to HCR_EL2.E2H without 45 * trapping to the hypervisor, but these writes have no 46 * functional effect. 47 * 48 * Handle both cases by checking for an essential VHE property 49 * (system register remapping) to decide whether we're 50 * effectively VHE-only or not. 51 */ 52 msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE 53 isb 54 mov x1, #1 // Write something to FAR_EL1 55 msr far_el1, x1 56 isb 57 mov x1, #2 // Try to overwrite it via FAR_EL2 58 msr far_el2, x1 59 isb 60 mrs x1, far_el1 // If we see the latest write in FAR_EL1, 61 cmp x1, #2 // we can safely assume we are VHE only. 62 b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. 63 64 .LnE2H0_\@: 65 orr x0, x0, #HCR_E2H 66 msr_hcr_el2 x0 67 isb 68 .LnVHE_\@: 69 .endm 70 71 .macro __init_el2_sctlr 72 mov_q x0, INIT_SCTLR_EL2_MMU_OFF 73 msr sctlr_el2, x0 74 isb 75 .endm 76 77 .macro __init_el2_hcrx 78 mrs x0, id_aa64mmfr1_el1 79 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 80 cbz x0, .Lskip_hcrx_\@ 81 mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) 82 83 /* Enable GCS if supported */ 84 mrs_s x1, SYS_ID_AA64PFR1_EL1 85 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 86 cbz x1, .Lset_hcrx_\@ 87 orr x0, x0, #HCRX_EL2_GCSEn 88 89 .Lset_hcrx_\@: 90 msr_s SYS_HCRX_EL2, x0 91 .Lskip_hcrx_\@: 92 .endm 93 94 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ 95 .macro __check_hvhe fail, tmp 96 mrs \tmp, hcr_el2 97 and \tmp, \tmp, #HCR_E2H 98 cbz \tmp, \fail 99 .endm 100 101 /* 102 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 103 * This is not necessary for VHE, since the host kernel runs in EL2, 104 * and EL0 accesses are configured in the later stage of boot process. 105 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout 106 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined 107 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 108 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in 109 * EL2. 110 */ 111 .macro __init_el2_timers 112 mov x0, #3 // Enable EL1 physical timers 113 __check_hvhe .LnVHE_\@, x1 114 lsl x0, x0, #10 115 .LnVHE_\@: 116 msr cnthctl_el2, x0 117 msr cntvoff_el2, xzr // Clear virtual offset 118 .endm 119 120 /* Branch to skip_label if SPE version is less than given version */ 121 .macro __spe_vers_imp skip_label, version, tmp 122 mrs \tmp, id_aa64dfr0_el1 123 ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 124 cmp \tmp, \version 125 b.lt \skip_label 126 .endm 127 128 .macro __init_el2_debug 129 mrs x1, id_aa64dfr0_el1 130 ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 131 cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI 132 ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne 133 b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF 134 mrs x0, pmcr_el0 // Disable debug access traps 135 ubfx x0, x0, #11, #5 // to EL2 and allow access to 136 .Lskip_pmu_\@: 137 csel x2, xzr, x0, eq // all PMU counters from EL1 138 139 /* Statistical profiling */ 140 __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present 141 142 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, 143 and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) 144 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical 145 mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ 146 1 << PMSCR_EL2_PA_SHIFT) 147 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter 148 .Lskip_spe_el2_\@: 149 mov x0, #MDCR_EL2_E2PB_MASK 150 orr x2, x2, x0 // If we don't have VHE, then 151 // use EL1&0 translation. 152 153 .Lskip_spe_\@: 154 /* Trace buffer */ 155 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 156 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 157 158 mrs_s x0, SYS_TRBIDR_EL1 159 and x0, x0, TRBIDR_EL1_P 160 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 161 162 mov x0, #MDCR_EL2_E2TB_MASK 163 orr x2, x2, x0 // allow the EL1&0 translation 164 // to own it. 165 166 .Lskip_trace_\@: 167 msr mdcr_el2, x2 // Configure debug traps 168 .endm 169 170 /* LORegions */ 171 .macro __init_el2_lor 172 mrs x1, id_aa64mmfr1_el1 173 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 174 cbz x0, .Lskip_lor_\@ 175 msr_s SYS_LORC_EL1, xzr 176 .Lskip_lor_\@: 177 .endm 178 179 /* Stage-2 translation */ 180 .macro __init_el2_stage2 181 msr vttbr_el2, xzr 182 .endm 183 184 /* GICv3 system register access */ 185 .macro __init_el2_gicv3 186 mrs x0, id_aa64pfr0_el1 187 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 188 cbz x0, .Lskip_gicv3_\@ 189 190 mrs_s x0, SYS_ICC_SRE_EL2 191 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 192 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 193 msr_s SYS_ICC_SRE_EL2, x0 194 isb // Make sure SRE is now set 195 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 196 tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks 197 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults 198 .Lskip_gicv3_\@: 199 .endm 200 201 /* GICv5 system register access */ 202 .macro __init_el2_gicv5 203 mrs_s x0, SYS_ID_AA64PFR2_EL1 204 ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4 205 cbz x0, .Lskip_gicv5_\@ 206 207 mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \ 208 ICH_HFGITR_EL2_GICRCDIA | \ 209 ICH_HFGITR_EL2_GICCDDI | \ 210 ICH_HFGITR_EL2_GICCDEOI | \ 211 ICH_HFGITR_EL2_GICCDHM | \ 212 ICH_HFGITR_EL2_GICCDRCFG | \ 213 ICH_HFGITR_EL2_GICCDPEND | \ 214 ICH_HFGITR_EL2_GICCDAFF | \ 215 ICH_HFGITR_EL2_GICCDPRI | \ 216 ICH_HFGITR_EL2_GICCDDIS | \ 217 ICH_HFGITR_EL2_GICCDEN) 218 msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps 219 mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 220 ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 221 ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \ 222 ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 223 ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \ 224 ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \ 225 ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \ 226 ICH_HFGRTR_EL2_ICC_PCR_EL1 | \ 227 ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \ 228 ICH_HFGRTR_EL2_ICC_HAPR_EL1 | \ 229 ICH_HFGRTR_EL2_ICC_CR0_EL1 | \ 230 ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \ 231 ICH_HFGRTR_EL2_ICC_APR_EL1) 232 msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps 233 mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 234 ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 235 ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \ 236 ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 237 ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \ 238 ICH_HFGWTR_EL2_ICC_PCR_EL1 | \ 239 ICH_HFGWTR_EL2_ICC_CR0_EL1 | \ 240 ICH_HFGWTR_EL2_ICC_APR_EL1) 241 msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps 242 .Lskip_gicv5_\@: 243 .endm 244 245 .macro __init_el2_hstr 246 msr hstr_el2, xzr // Disable CP15 traps to EL2 247 .endm 248 249 /* Virtual CPU ID registers */ 250 .macro __init_el2_nvhe_idregs 251 mrs x0, midr_el1 252 mrs x1, mpidr_el1 253 msr vpidr_el2, x0 254 msr vmpidr_el2, x1 255 .endm 256 257 /* Coprocessor traps */ 258 .macro __init_el2_cptr 259 __check_hvhe .LnVHE_\@, x1 260 mov x0, #CPACR_EL1_FPEN 261 msr cpacr_el1, x0 262 b .Lskip_set_cptr_\@ 263 .LnVHE_\@: 264 mov x0, #0x33ff 265 msr cptr_el2, x0 // Disable copro. traps to EL2 266 .Lskip_set_cptr_\@: 267 .endm 268 269 /* 270 * Configure BRBE to permit recording cycle counts and branch mispredicts. 271 * 272 * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and 273 * BRBCR_EL1.CC=1. 274 * 275 * At any EL, to record branch mispredicts BRBE requires that both 276 * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. 277 * 278 * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are 279 * executing in EL1. 280 */ 281 .macro __init_el2_brbe 282 mrs x1, id_aa64dfr0_el1 283 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 284 cbz x1, .Lskip_brbe_\@ 285 286 mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED 287 msr_s SYS_BRBCR_EL2, x0 288 .Lskip_brbe_\@: 289 .endm 290 291 /* Disable any fine grained traps */ 292 .macro __init_el2_fgt 293 mrs x1, id_aa64mmfr0_el1 294 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 295 cbz x1, .Lskip_fgt_\@ 296 297 mov x0, xzr 298 mov x2, xzr 299 /* If SPEv1p2 is implemented, */ 300 __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1 301 /* Disable PMSNEVFR_EL1 read and write traps */ 302 orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK 303 orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK 304 305 .Lskip_spe_fgt_\@: 306 mrs x1, id_aa64dfr0_el1 307 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 308 cbz x1, .Lskip_brbe_fgt_\@ 309 310 /* 311 * Disable read traps for the following registers 312 * 313 * [BRBSRC|BRBTGT|RBINF]_EL1 314 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 315 */ 316 orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK 317 318 /* 319 * Disable write traps for the following registers 320 * 321 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 322 */ 323 orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK 324 325 /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ 326 orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK 327 orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK 328 329 /* Disable read traps for BRBIDR_EL1 */ 330 orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK 331 332 .Lskip_brbe_fgt_\@: 333 334 .Lset_debug_fgt_\@: 335 msr_s SYS_HDFGRTR_EL2, x0 336 msr_s SYS_HDFGWTR_EL2, x2 337 338 mov x0, xzr 339 mov x2, xzr 340 341 mrs x1, id_aa64dfr0_el1 342 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 343 cbz x1, .Lskip_brbe_insn_fgt_\@ 344 345 /* Disable traps for BRBIALL instruction */ 346 orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK 347 348 /* Disable traps for BRBINJ instruction */ 349 orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK 350 351 .Lskip_brbe_insn_fgt_\@: 352 mrs x1, id_aa64pfr1_el1 353 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 354 cbz x1, .Lskip_sme_fgt_\@ 355 356 /* Disable nVHE traps of TPIDR2 and SMPRI */ 357 orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK 358 orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK 359 360 .Lskip_sme_fgt_\@: 361 mrs_s x1, SYS_ID_AA64MMFR3_EL1 362 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 363 cbz x1, .Lskip_pie_fgt_\@ 364 365 /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ 366 orr x0, x0, #HFGRTR_EL2_nPIR_EL1 367 orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1 368 369 .Lskip_pie_fgt_\@: 370 mrs_s x1, SYS_ID_AA64MMFR3_EL1 371 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4 372 cbz x1, .Lskip_poe_fgt_\@ 373 374 /* Disable trapping of POR_EL0 */ 375 orr x0, x0, #HFGRTR_EL2_nPOR_EL0 376 377 .Lskip_poe_fgt_\@: 378 /* GCS depends on PIE so we don't check it if PIE is absent */ 379 mrs_s x1, SYS_ID_AA64PFR1_EL1 380 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 381 cbz x1, .Lskip_gce_fgt_\@ 382 383 /* Disable traps of access to GCS registers at EL0 and EL1 */ 384 orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK 385 orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK 386 387 .Lskip_gce_fgt_\@: 388 389 .Lset_fgt_\@: 390 msr_s SYS_HFGRTR_EL2, x0 391 msr_s SYS_HFGWTR_EL2, x0 392 msr_s SYS_HFGITR_EL2, x2 393 394 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU 395 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 396 cbz x1, .Lskip_amu_fgt_\@ 397 398 msr_s SYS_HAFGRTR_EL2, xzr 399 400 .Lskip_amu_fgt_\@: 401 402 .Lskip_fgt_\@: 403 .endm 404 405 .macro __init_el2_fgt2 406 mrs x1, id_aa64mmfr0_el1 407 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 408 cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2 409 b.lt .Lskip_fgt2_\@ 410 411 mov x0, xzr 412 mrs x1, id_aa64dfr0_el1 413 ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 414 cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9 415 b.lt .Lskip_pmuv3p9_\@ 416 417 orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0 418 orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 419 orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 420 .Lskip_pmuv3p9_\@: 421 /* If SPE is implemented, */ 422 __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1 423 /* we can read PMSIDR and */ 424 mrs_s x1, SYS_PMSIDR_EL1 425 and x1, x1, #PMSIDR_EL1_FDS 426 /* if FEAT_SPE_FDS is implemented, */ 427 cbz x1, .Lskip_spefds_\@ 428 /* disable traps of PMSDSFR to EL2. */ 429 orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1 430 431 .Lskip_spefds_\@: 432 msr_s SYS_HDFGRTR2_EL2, x0 433 msr_s SYS_HDFGWTR2_EL2, x0 434 msr_s SYS_HFGRTR2_EL2, xzr 435 msr_s SYS_HFGWTR2_EL2, xzr 436 msr_s SYS_HFGITR2_EL2, xzr 437 .Lskip_fgt2_\@: 438 .endm 439 440 /** 441 * Initialize EL2 registers to sane values. This should be called early on all 442 * cores that were booted in EL2. Note that everything gets initialised as 443 * if VHE was not available. The kernel context will be upgraded to VHE 444 * if possible later on in the boot process 445 * 446 * Regs: x0, x1 and x2 are clobbered. 447 */ 448 .macro init_el2_state 449 __init_el2_sctlr 450 __init_el2_hcrx 451 __init_el2_timers 452 __init_el2_debug 453 __init_el2_brbe 454 __init_el2_lor 455 __init_el2_stage2 456 __init_el2_gicv3 457 __init_el2_gicv5 458 __init_el2_hstr 459 __init_el2_nvhe_idregs 460 __init_el2_cptr 461 __init_el2_fgt 462 __init_el2_fgt2 463 .endm 464 465 #ifndef __KVM_NVHE_HYPERVISOR__ 466 // This will clobber tmp1 and tmp2, and expect tmp1 to contain 467 // the id register value as read from the HW 468 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2 469 ubfx \tmp1, \tmp1, #\fld, #\width 470 cbz \tmp1, \fail 471 472 adr_l \tmp1, \idreg\()_override 473 ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET] 474 ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET] 475 ubfx \tmp2, \tmp2, #\fld, #\width 476 ubfx \tmp1, \tmp1, #\fld, #\width 477 cmp \tmp1, xzr 478 and \tmp2, \tmp2, \tmp1 479 csinv \tmp2, \tmp2, xzr, ne 480 cbnz \tmp2, \pass 481 b \fail 482 .endm 483 484 // This will clobber tmp1 and tmp2 485 .macro check_override idreg, fld, pass, fail, tmp1, tmp2 486 mrs \tmp1, \idreg\()_el1 487 __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2 488 .endm 489 #else 490 // This will clobber tmp 491 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore 492 ldr_l \tmp, \idreg\()_el1_sys_val 493 ubfx \tmp, \tmp, #\fld, #\width 494 cbnz \tmp, \pass 495 b \fail 496 .endm 497 498 .macro check_override idreg, fld, pass, fail, tmp, ignore 499 __check_override \idreg \fld 4 \pass \fail \tmp \ignore 500 .endm 501 #endif 502 503 .macro finalise_el2_state 504 check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2 505 506 .Linit_mpam_\@: 507 msr_s SYS_MPAM2_EL2, xzr // use the default partition 508 // and disable lower traps 509 mrs_s x0, SYS_MPAMIDR_EL1 510 tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg 511 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 512 513 .Lskip_mpam_\@: 514 check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 515 516 .Linit_gcs_\@: 517 msr_s SYS_GCSCR_EL1, xzr 518 msr_s SYS_GCSCRE0_EL1, xzr 519 520 .Lskip_gcs_\@: 521 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 522 523 .Linit_sve_\@: /* SVE register access */ 524 __check_hvhe .Lcptr_nvhe_\@, x1 525 526 // (h)VHE case 527 mrs x0, cpacr_el1 // Disable SVE traps 528 orr x0, x0, #CPACR_EL1_ZEN 529 msr cpacr_el1, x0 530 b .Lskip_set_cptr_\@ 531 532 .Lcptr_nvhe_\@: // nVHE case 533 mrs x0, cptr_el2 // Disable SVE traps 534 bic x0, x0, #CPTR_EL2_TZ 535 msr cptr_el2, x0 536 .Lskip_set_cptr_\@: 537 isb 538 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 539 msr_s SYS_ZCR_EL2, x1 // length for EL1. 540 541 .Lskip_sve_\@: 542 check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 543 544 .Linit_sme_\@: /* SME register access and priority mapping */ 545 __check_hvhe .Lcptr_nvhe_sme_\@, x1 546 547 // (h)VHE case 548 mrs x0, cpacr_el1 // Disable SME traps 549 orr x0, x0, #CPACR_EL1_SMEN 550 msr cpacr_el1, x0 551 b .Lskip_set_cptr_sme_\@ 552 553 .Lcptr_nvhe_sme_\@: // nVHE case 554 mrs x0, cptr_el2 // Disable SME traps 555 bic x0, x0, #CPTR_EL2_TSM 556 msr cptr_el2, x0 557 .Lskip_set_cptr_sme_\@: 558 isb 559 560 mrs x1, sctlr_el2 561 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 562 msr sctlr_el2, x1 563 isb 564 565 mov x0, #0 // SMCR controls 566 567 // Full FP in SM? 568 mrs_s x1, SYS_ID_AA64SMFR0_EL1 569 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2 570 571 .Linit_sme_fa64_\@: 572 orr x0, x0, SMCR_ELx_FA64_MASK 573 .Lskip_sme_fa64_\@: 574 575 // ZT0 available? 576 mrs_s x1, SYS_ID_AA64SMFR0_EL1 577 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2 578 .Linit_sme_zt0_\@: 579 orr x0, x0, SMCR_ELx_EZT0_MASK 580 .Lskip_sme_zt0_\@: 581 582 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 583 msr_s SYS_SMCR_EL2, x0 // length for EL1. 584 585 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 586 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 587 cbz x1, .Lskip_sme_\@ 588 589 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 590 .Lskip_sme_\@: 591 .endm 592 593 #endif /* __ARM_KVM_INIT_H__ */ 594