1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_INIT_H__ 8 #define __ARM_KVM_INIT_H__ 9 10 #ifndef __ASSEMBLER__ 11 #error Assembly-only header 12 #endif 13 14 #include <asm/kvm_arm.h> 15 #include <asm/ptrace.h> 16 #include <asm/sysreg.h> 17 #include <linux/irqchip/arm-gic-v3.h> 18 19 .macro init_el2_hcr val 20 mov_q x0, \val 21 22 /* 23 * Compliant CPUs advertise their VHE-onlyness with 24 * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it 25 * can reset into an UNKNOWN state and might not read as 1 until it has 26 * been initialized explicitly. 27 * Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H 28 * indicating whether the CPU is running in E2H mode. 29 */ 30 mrs_s x1, SYS_ID_AA64MMFR4_EL1 31 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH 32 cmp x1, #0 33 b.lt .LnE2H0_\@ 34 35 /* 36 * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised 37 * as such via ID_AA64MMFR4_EL1.E2H0: 38 * 39 * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to 40 * have HCR_EL2.E2H implemented as RAO/WI. 41 * 42 * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest 43 * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV 44 * guests on these hosts can write to HCR_EL2.E2H without 45 * trapping to the hypervisor, but these writes have no 46 * functional effect. 47 * 48 * Handle both cases by checking for an essential VHE property 49 * (system register remapping) to decide whether we're 50 * effectively VHE-only or not. 51 */ 52 msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE 53 isb 54 mov x1, #1 // Write something to FAR_EL1 55 msr far_el1, x1 56 isb 57 mov x1, #2 // Try to overwrite it via FAR_EL2 58 msr far_el2, x1 59 isb 60 mrs x1, far_el1 // If we see the latest write in FAR_EL1, 61 cmp x1, #2 // we can safely assume we are VHE only. 62 b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. 63 64 .LnE2H0_\@: 65 orr x0, x0, #HCR_E2H 66 msr_hcr_el2 x0 67 isb 68 .LnVHE_\@: 69 .endm 70 71 .macro __init_el2_sctlr 72 mov_q x0, INIT_SCTLR_EL2_MMU_OFF 73 msr sctlr_el2, x0 74 isb 75 .endm 76 77 .macro __init_el2_hcrx 78 mrs x0, id_aa64mmfr1_el1 79 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 80 cbz x0, .Lskip_hcrx_\@ 81 mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) 82 83 /* Enable GCS if supported */ 84 mrs_s x1, SYS_ID_AA64PFR1_EL1 85 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 86 cbz x1, .Lskip_gcs_hcrx_\@ 87 orr x0, x0, #HCRX_EL2_GCSEn 88 89 .Lskip_gcs_hcrx_\@: 90 /* Enable LS64, LS64_V if supported */ 91 mrs_s x1, SYS_ID_AA64ISAR1_EL1 92 ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4 93 cbz x1, .Lset_hcrx_\@ 94 orr x0, x0, #HCRX_EL2_EnALS 95 cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V 96 b.lt .Lset_hcrx_\@ 97 orr x0, x0, #HCRX_EL2_EnASR 98 99 .Lset_hcrx_\@: 100 msr_s SYS_HCRX_EL2, x0 101 .Lskip_hcrx_\@: 102 .endm 103 104 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ 105 .macro __check_hvhe fail, tmp 106 mrs \tmp, hcr_el2 107 and \tmp, \tmp, #HCR_E2H 108 cbz \tmp, \fail 109 .endm 110 111 /* 112 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 113 * This is not necessary for VHE, since the host kernel runs in EL2, 114 * and EL0 accesses are configured in the later stage of boot process. 115 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout 116 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined 117 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 118 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in 119 * EL2. 120 */ 121 .macro __init_el2_timers 122 mov x0, #3 // Enable EL1 physical timers 123 __check_hvhe .LnVHE_\@, x1 124 lsl x0, x0, #10 125 .LnVHE_\@: 126 msr cnthctl_el2, x0 127 msr cntvoff_el2, xzr // Clear virtual offset 128 .endm 129 130 /* Branch to skip_label if SPE version is less than given version */ 131 .macro __spe_vers_imp skip_label, version, tmp 132 mrs \tmp, id_aa64dfr0_el1 133 ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 134 cmp \tmp, \version 135 b.lt \skip_label 136 .endm 137 138 .macro __init_el2_debug 139 mrs x1, id_aa64dfr0_el1 140 ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 141 cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI 142 ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne 143 b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF 144 mrs x0, pmcr_el0 // Disable debug access traps 145 ubfx x0, x0, #11, #5 // to EL2 and allow access to 146 .Lskip_pmu_\@: 147 csel x2, xzr, x0, eq // all PMU counters from EL1 148 149 /* Statistical profiling */ 150 __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present 151 152 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, 153 and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) 154 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical 155 mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ 156 1 << PMSCR_EL2_PA_SHIFT) 157 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter 158 .Lskip_spe_el2_\@: 159 mov x0, #MDCR_EL2_E2PB_MASK 160 orr x2, x2, x0 // If we don't have VHE, then 161 // use EL1&0 translation. 162 163 .Lskip_spe_\@: 164 /* Trace buffer */ 165 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 166 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 167 168 mrs_s x0, SYS_TRBIDR_EL1 169 and x0, x0, TRBIDR_EL1_P 170 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 171 172 mov x0, #MDCR_EL2_E2TB_MASK 173 orr x2, x2, x0 // allow the EL1&0 translation 174 // to own it. 175 176 .Lskip_trace_\@: 177 msr mdcr_el2, x2 // Configure debug traps 178 .endm 179 180 /* LORegions */ 181 .macro __init_el2_lor 182 mrs x1, id_aa64mmfr1_el1 183 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 184 cbz x0, .Lskip_lor_\@ 185 msr_s SYS_LORC_EL1, xzr 186 .Lskip_lor_\@: 187 .endm 188 189 /* Stage-2 translation */ 190 .macro __init_el2_stage2 191 msr vttbr_el2, xzr 192 .endm 193 194 /* GICv3 system register access */ 195 .macro __init_el2_gicv3 196 mrs x0, id_aa64pfr0_el1 197 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 198 cbz x0, .Lskip_gicv3_\@ 199 200 mrs_s x0, SYS_ICC_SRE_EL2 201 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 202 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 203 msr_s SYS_ICC_SRE_EL2, x0 204 isb // Make sure SRE is now set 205 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 206 tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks 207 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults 208 .Lskip_gicv3_\@: 209 .endm 210 211 /* GICv5 system register access */ 212 .macro __init_el2_gicv5 213 mrs_s x0, SYS_ID_AA64PFR2_EL1 214 ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4 215 cbz x0, .Lskip_gicv5_\@ 216 217 mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \ 218 ICH_HFGITR_EL2_GICRCDIA | \ 219 ICH_HFGITR_EL2_GICCDDI | \ 220 ICH_HFGITR_EL2_GICCDEOI | \ 221 ICH_HFGITR_EL2_GICCDHM | \ 222 ICH_HFGITR_EL2_GICCDRCFG | \ 223 ICH_HFGITR_EL2_GICCDPEND | \ 224 ICH_HFGITR_EL2_GICCDAFF | \ 225 ICH_HFGITR_EL2_GICCDPRI | \ 226 ICH_HFGITR_EL2_GICCDDIS | \ 227 ICH_HFGITR_EL2_GICCDEN) 228 msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps 229 mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 230 ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 231 ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \ 232 ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 233 ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \ 234 ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \ 235 ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \ 236 ICH_HFGRTR_EL2_ICC_PCR_EL1 | \ 237 ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \ 238 ICH_HFGRTR_EL2_ICC_CR0_EL1 | \ 239 ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \ 240 ICH_HFGRTR_EL2_ICC_APR_EL1) 241 msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps 242 mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 243 ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 244 ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \ 245 ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 246 ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \ 247 ICH_HFGWTR_EL2_ICC_PCR_EL1 | \ 248 ICH_HFGWTR_EL2_ICC_CR0_EL1 | \ 249 ICH_HFGWTR_EL2_ICC_APR_EL1) 250 msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps 251 mov x0, #(ICH_VCTLR_EL2_En) 252 msr_s SYS_ICH_VCTLR_EL2, x0 // Enable vHPPI selection 253 .Lskip_gicv5_\@: 254 .endm 255 256 .macro __init_el2_hstr 257 msr hstr_el2, xzr // Disable CP15 traps to EL2 258 .endm 259 260 /* Virtual CPU ID registers */ 261 .macro __init_el2_nvhe_idregs 262 mrs x0, midr_el1 263 mrs x1, mpidr_el1 264 msr vpidr_el2, x0 265 msr vmpidr_el2, x1 266 .endm 267 268 /* Coprocessor traps */ 269 .macro __init_el2_cptr 270 __check_hvhe .LnVHE_\@, x1 271 mov x0, #CPACR_EL1_FPEN 272 msr cpacr_el1, x0 273 b .Lskip_set_cptr_\@ 274 .LnVHE_\@: 275 mov x0, #0x33ff 276 msr cptr_el2, x0 // Disable copro. traps to EL2 277 .Lskip_set_cptr_\@: 278 .endm 279 280 /* 281 * Configure BRBE to permit recording cycle counts and branch mispredicts. 282 * 283 * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and 284 * BRBCR_EL1.CC=1. 285 * 286 * At any EL, to record branch mispredicts BRBE requires that both 287 * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. 288 * 289 * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are 290 * executing in EL1. 291 */ 292 .macro __init_el2_brbe 293 mrs x1, id_aa64dfr0_el1 294 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 295 cbz x1, .Lskip_brbe_\@ 296 297 mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED 298 msr_s SYS_BRBCR_EL2, x0 299 .Lskip_brbe_\@: 300 .endm 301 302 /* Disable any fine grained traps */ 303 .macro __init_el2_fgt 304 mrs x1, id_aa64mmfr0_el1 305 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 306 cbz x1, .Lskip_fgt_\@ 307 308 mov x0, xzr 309 mov x2, xzr 310 /* If SPEv1p2 is implemented, */ 311 __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1 312 /* Disable PMSNEVFR_EL1 read and write traps */ 313 orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK 314 orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK 315 316 .Lskip_spe_fgt_\@: 317 mrs x1, id_aa64dfr0_el1 318 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 319 cbz x1, .Lskip_brbe_fgt_\@ 320 321 /* 322 * Disable read traps for the following registers 323 * 324 * [BRBSRC|BRBTGT|RBINF]_EL1 325 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 326 */ 327 orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK 328 329 /* 330 * Disable write traps for the following registers 331 * 332 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 333 */ 334 orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK 335 336 /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ 337 orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK 338 orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK 339 340 /* Disable read traps for BRBIDR_EL1 */ 341 orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK 342 343 .Lskip_brbe_fgt_\@: 344 345 .Lset_debug_fgt_\@: 346 msr_s SYS_HDFGRTR_EL2, x0 347 msr_s SYS_HDFGWTR_EL2, x2 348 349 mov x0, xzr 350 mov x2, xzr 351 352 mrs x1, id_aa64dfr0_el1 353 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 354 cbz x1, .Lskip_brbe_insn_fgt_\@ 355 356 /* Disable traps for BRBIALL instruction */ 357 orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK 358 359 /* Disable traps for BRBINJ instruction */ 360 orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK 361 362 .Lskip_brbe_insn_fgt_\@: 363 mrs x1, id_aa64pfr1_el1 364 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 365 cbz x1, .Lskip_sme_fgt_\@ 366 367 /* Disable nVHE traps of TPIDR2 and SMPRI */ 368 orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK 369 orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK 370 371 .Lskip_sme_fgt_\@: 372 mrs_s x1, SYS_ID_AA64MMFR3_EL1 373 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 374 cbz x1, .Lskip_pie_fgt_\@ 375 376 /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ 377 orr x0, x0, #HFGRTR_EL2_nPIR_EL1 378 orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1 379 380 .Lskip_pie_fgt_\@: 381 mrs_s x1, SYS_ID_AA64MMFR3_EL1 382 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4 383 cbz x1, .Lskip_poe_fgt_\@ 384 385 /* Disable trapping of POR_EL0 */ 386 orr x0, x0, #HFGRTR_EL2_nPOR_EL0 387 388 .Lskip_poe_fgt_\@: 389 /* GCS depends on PIE so we don't check it if PIE is absent */ 390 mrs_s x1, SYS_ID_AA64PFR1_EL1 391 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 392 cbz x1, .Lskip_gce_fgt_\@ 393 394 /* Disable traps of access to GCS registers at EL0 and EL1 */ 395 orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK 396 orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK 397 398 .Lskip_gce_fgt_\@: 399 400 .Lset_fgt_\@: 401 msr_s SYS_HFGRTR_EL2, x0 402 msr_s SYS_HFGWTR_EL2, x0 403 msr_s SYS_HFGITR_EL2, x2 404 405 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU 406 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 407 cbz x1, .Lskip_amu_fgt_\@ 408 409 msr_s SYS_HAFGRTR_EL2, xzr 410 411 .Lskip_amu_fgt_\@: 412 413 .Lskip_fgt_\@: 414 .endm 415 416 .macro __init_el2_fgt2 417 mrs x1, id_aa64mmfr0_el1 418 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 419 cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2 420 b.lt .Lskip_fgt2_\@ 421 422 mov x0, xzr 423 mrs x1, id_aa64dfr0_el1 424 ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 425 cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9 426 b.lt .Lskip_pmuv3p9_\@ 427 428 orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0 429 orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 430 orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 431 .Lskip_pmuv3p9_\@: 432 /* If SPE is implemented, */ 433 __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1 434 /* we can read PMSIDR and */ 435 mrs_s x1, SYS_PMSIDR_EL1 436 and x1, x1, #PMSIDR_EL1_FDS 437 /* if FEAT_SPE_FDS is implemented, */ 438 cbz x1, .Lskip_spefds_\@ 439 /* disable traps of PMSDSFR to EL2. */ 440 orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1 441 442 .Lskip_spefds_\@: 443 msr_s SYS_HDFGRTR2_EL2, x0 444 msr_s SYS_HDFGWTR2_EL2, x0 445 msr_s SYS_HFGRTR2_EL2, xzr 446 msr_s SYS_HFGWTR2_EL2, xzr 447 msr_s SYS_HFGITR2_EL2, xzr 448 .Lskip_fgt2_\@: 449 .endm 450 451 /** 452 * Initialize EL2 registers to sane values. This should be called early on all 453 * cores that were booted in EL2. Note that everything gets initialised as 454 * if VHE was not available. The kernel context will be upgraded to VHE 455 * if possible later on in the boot process 456 * 457 * Regs: x0, x1 and x2 are clobbered. 458 */ 459 .macro init_el2_state 460 __init_el2_sctlr 461 __init_el2_hcrx 462 __init_el2_timers 463 __init_el2_debug 464 __init_el2_brbe 465 __init_el2_lor 466 __init_el2_stage2 467 __init_el2_gicv3 468 __init_el2_gicv5 469 __init_el2_hstr 470 __init_el2_nvhe_idregs 471 __init_el2_cptr 472 __init_el2_fgt 473 __init_el2_fgt2 474 .endm 475 476 #ifndef __KVM_NVHE_HYPERVISOR__ 477 // This will clobber tmp1 and tmp2, and expect tmp1 to contain 478 // the id register value as read from the HW 479 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2 480 ubfx \tmp1, \tmp1, #\fld, #\width 481 cbz \tmp1, \fail 482 483 adr_l \tmp1, \idreg\()_override 484 ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET] 485 ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET] 486 ubfx \tmp2, \tmp2, #\fld, #\width 487 ubfx \tmp1, \tmp1, #\fld, #\width 488 cmp \tmp1, xzr 489 and \tmp2, \tmp2, \tmp1 490 csinv \tmp2, \tmp2, xzr, ne 491 cbnz \tmp2, \pass 492 b \fail 493 .endm 494 495 // This will clobber tmp1 and tmp2 496 .macro check_override idreg, fld, pass, fail, tmp1, tmp2 497 mrs \tmp1, \idreg\()_el1 498 __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2 499 .endm 500 #else 501 // This will clobber tmp 502 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore 503 ldr_l \tmp, \idreg\()_el1_sys_val 504 ubfx \tmp, \tmp, #\fld, #\width 505 cbnz \tmp, \pass 506 b \fail 507 .endm 508 509 .macro check_override idreg, fld, pass, fail, tmp, ignore 510 __check_override \idreg \fld 4 \pass \fail \tmp \ignore 511 .endm 512 #endif 513 514 .macro finalise_el2_state 515 check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2 516 517 .Linit_mpam_\@: 518 msr_s SYS_MPAM2_EL2, xzr // use the default partition 519 // and disable lower traps 520 mrs_s x0, SYS_MPAMIDR_EL1 521 tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg 522 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 523 524 .Lskip_mpam_\@: 525 check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 526 527 .Linit_gcs_\@: 528 msr_s SYS_GCSCR_EL1, xzr 529 msr_s SYS_GCSCRE0_EL1, xzr 530 531 .Lskip_gcs_\@: 532 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 533 534 .Linit_sve_\@: /* SVE register access */ 535 __check_hvhe .Lcptr_nvhe_\@, x1 536 537 // (h)VHE case 538 mrs x0, cpacr_el1 // Disable SVE traps 539 orr x0, x0, #CPACR_EL1_ZEN 540 msr cpacr_el1, x0 541 b .Lskip_set_cptr_\@ 542 543 .Lcptr_nvhe_\@: // nVHE case 544 mrs x0, cptr_el2 // Disable SVE traps 545 bic x0, x0, #CPTR_EL2_TZ 546 msr cptr_el2, x0 547 .Lskip_set_cptr_\@: 548 isb 549 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 550 msr_s SYS_ZCR_EL2, x1 // length for EL1. 551 552 .Lskip_sve_\@: 553 check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 554 555 .Linit_sme_\@: /* SME register access and priority mapping */ 556 __check_hvhe .Lcptr_nvhe_sme_\@, x1 557 558 // (h)VHE case 559 mrs x0, cpacr_el1 // Disable SME traps 560 orr x0, x0, #CPACR_EL1_SMEN 561 msr cpacr_el1, x0 562 b .Lskip_set_cptr_sme_\@ 563 564 .Lcptr_nvhe_sme_\@: // nVHE case 565 mrs x0, cptr_el2 // Disable SME traps 566 bic x0, x0, #CPTR_EL2_TSM 567 msr cptr_el2, x0 568 .Lskip_set_cptr_sme_\@: 569 isb 570 571 mrs x1, sctlr_el2 572 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 573 msr sctlr_el2, x1 574 isb 575 576 mov x0, #0 // SMCR controls 577 578 // Full FP in SM? 579 mrs_s x1, SYS_ID_AA64SMFR0_EL1 580 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2 581 582 .Linit_sme_fa64_\@: 583 orr x0, x0, SMCR_ELx_FA64_MASK 584 .Lskip_sme_fa64_\@: 585 586 // ZT0 available? 587 mrs_s x1, SYS_ID_AA64SMFR0_EL1 588 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2 589 .Linit_sme_zt0_\@: 590 orr x0, x0, SMCR_ELx_EZT0_MASK 591 .Lskip_sme_zt0_\@: 592 593 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 594 msr_s SYS_SMCR_EL2, x0 // length for EL1. 595 596 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 597 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 598 cbz x1, .Lskip_sme_\@ 599 600 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 601 .Lskip_sme_\@: 602 .endm 603 604 #endif /* __ARM_KVM_INIT_H__ */ 605