1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_INIT_H__ 8 #define __ARM_KVM_INIT_H__ 9 10 #ifndef __ASSEMBLER__ 11 #error Assembly-only header 12 #endif 13 14 #include <asm/kvm_arm.h> 15 #include <asm/ptrace.h> 16 #include <asm/sysreg.h> 17 #include <linux/irqchip/arm-gic-v3.h> 18 19 .macro init_el2_hcr val 20 mov_q x0, \val 21 22 /* 23 * Compliant CPUs advertise their VHE-onlyness with 24 * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it 25 * can reset into an UNKNOWN state and might not read as 1 until it has 26 * been initialized explicitly. 27 * Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H 28 * indicating whether the CPU is running in E2H mode. 29 */ 30 mrs_s x1, SYS_ID_AA64MMFR4_EL1 31 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH 32 cmp x1, #0 33 b.lt .LnE2H0_\@ 34 35 /* 36 * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised 37 * as such via ID_AA64MMFR4_EL1.E2H0: 38 * 39 * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to 40 * have HCR_EL2.E2H implemented as RAO/WI. 41 * 42 * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest 43 * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV 44 * guests on these hosts can write to HCR_EL2.E2H without 45 * trapping to the hypervisor, but these writes have no 46 * functional effect. 47 * 48 * Handle both cases by checking for an essential VHE property 49 * (system register remapping) to decide whether we're 50 * effectively VHE-only or not. 51 */ 52 msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE 53 mov x1, #1 // Write something to FAR_EL1 54 msr far_el1, x1 55 isb 56 mov x1, #2 // Try to overwrite it via FAR_EL2 57 msr far_el2, x1 58 isb 59 mrs x1, far_el1 // If we see the latest write in FAR_EL1, 60 cmp x1, #2 // we can safely assume we are VHE only. 61 b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. 62 63 .LnE2H0_\@: 64 orr x0, x0, #HCR_E2H 65 msr_hcr_el2 x0 66 .LnVHE_\@: 67 .endm 68 69 .macro __init_el2_sctlr 70 mov_q x0, INIT_SCTLR_EL2_MMU_OFF 71 msr sctlr_el2, x0 72 isb 73 .endm 74 75 .macro __init_el2_hcrx 76 mrs x0, id_aa64mmfr1_el1 77 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 78 cbz x0, .Lskip_hcrx_\@ 79 mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) 80 81 /* Enable GCS if supported */ 82 mrs_s x1, SYS_ID_AA64PFR1_EL1 83 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 84 cbz x1, .Lskip_gcs_hcrx_\@ 85 orr x0, x0, #HCRX_EL2_GCSEn 86 87 .Lskip_gcs_hcrx_\@: 88 /* Enable LS64, LS64_V if supported */ 89 mrs_s x1, SYS_ID_AA64ISAR1_EL1 90 ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4 91 cbz x1, .Lset_hcrx_\@ 92 orr x0, x0, #HCRX_EL2_EnALS 93 cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V 94 b.lt .Lset_hcrx_\@ 95 orr x0, x0, #HCRX_EL2_EnASR 96 97 .Lset_hcrx_\@: 98 msr_s SYS_HCRX_EL2, x0 99 .Lskip_hcrx_\@: 100 .endm 101 102 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ 103 .macro __check_hvhe fail, tmp 104 mrs \tmp, hcr_el2 105 and \tmp, \tmp, #HCR_E2H 106 cbz \tmp, \fail 107 .endm 108 109 /* 110 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 111 * This is not necessary for VHE, since the host kernel runs in EL2, 112 * and EL0 accesses are configured in the later stage of boot process. 113 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout 114 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined 115 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 116 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in 117 * EL2. 118 */ 119 .macro __init_el2_timers 120 mov x0, #3 // Enable EL1 physical timers 121 __check_hvhe .LnVHE_\@, x1 122 lsl x0, x0, #10 123 .LnVHE_\@: 124 msr cnthctl_el2, x0 125 msr cntvoff_el2, xzr // Clear virtual offset 126 .endm 127 128 /* Branch to skip_label if SPE version is less than given version */ 129 .macro __spe_vers_imp skip_label, version, tmp 130 mrs \tmp, id_aa64dfr0_el1 131 ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 132 cmp \tmp, \version 133 b.lt \skip_label 134 .endm 135 136 .macro __init_el2_debug 137 mrs x1, id_aa64dfr0_el1 138 ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 139 cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI 140 ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne 141 b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF 142 mrs x0, pmcr_el0 // Disable debug access traps 143 ubfx x0, x0, #11, #5 // to EL2 and allow access to 144 .Lskip_pmu_\@: 145 csel x2, xzr, x0, eq // all PMU counters from EL1 146 147 /* Statistical profiling */ 148 __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present 149 150 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, 151 and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) 152 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical 153 mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ 154 1 << PMSCR_EL2_PA_SHIFT) 155 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter 156 .Lskip_spe_el2_\@: 157 mov x0, #MDCR_EL2_E2PB_MASK 158 orr x2, x2, x0 // If we don't have VHE, then 159 // use EL1&0 translation. 160 161 .Lskip_spe_\@: 162 /* Trace buffer */ 163 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 164 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 165 166 mrs_s x0, SYS_TRBIDR_EL1 167 and x0, x0, TRBIDR_EL1_P 168 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 169 170 mov x0, #MDCR_EL2_E2TB_MASK 171 orr x2, x2, x0 // allow the EL1&0 translation 172 // to own it. 173 174 .Lskip_trace_\@: 175 msr mdcr_el2, x2 // Configure debug traps 176 .endm 177 178 /* LORegions */ 179 .macro __init_el2_lor 180 mrs x1, id_aa64mmfr1_el1 181 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 182 cbz x0, .Lskip_lor_\@ 183 msr_s SYS_LORC_EL1, xzr 184 .Lskip_lor_\@: 185 .endm 186 187 /* Stage-2 translation */ 188 .macro __init_el2_stage2 189 msr vttbr_el2, xzr 190 .endm 191 192 /* GICv3 system register access */ 193 .macro __init_el2_gicv3 194 mrs x0, id_aa64pfr0_el1 195 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 196 cbz x0, .Lskip_gicv3_\@ 197 198 mrs_s x0, SYS_ICC_SRE_EL2 199 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 200 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 201 msr_s SYS_ICC_SRE_EL2, x0 202 isb // Make sure SRE is now set 203 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 204 tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks 205 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults 206 .Lskip_gicv3_\@: 207 .endm 208 209 /* GICv5 system register access */ 210 .macro __init_el2_gicv5 211 mrs_s x0, SYS_ID_AA64PFR2_EL1 212 ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4 213 cbz x0, .Lskip_gicv5_\@ 214 215 mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \ 216 ICH_HFGITR_EL2_GICRCDIA | \ 217 ICH_HFGITR_EL2_GICCDDI | \ 218 ICH_HFGITR_EL2_GICCDEOI | \ 219 ICH_HFGITR_EL2_GICCDHM | \ 220 ICH_HFGITR_EL2_GICCDRCFG | \ 221 ICH_HFGITR_EL2_GICCDPEND | \ 222 ICH_HFGITR_EL2_GICCDAFF | \ 223 ICH_HFGITR_EL2_GICCDPRI | \ 224 ICH_HFGITR_EL2_GICCDDIS | \ 225 ICH_HFGITR_EL2_GICCDEN) 226 msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps 227 mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 228 ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 229 ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \ 230 ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 231 ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \ 232 ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \ 233 ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \ 234 ICH_HFGRTR_EL2_ICC_PCR_EL1 | \ 235 ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \ 236 ICH_HFGRTR_EL2_ICC_CR0_EL1 | \ 237 ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \ 238 ICH_HFGRTR_EL2_ICC_APR_EL1) 239 msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps 240 mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 241 ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 242 ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \ 243 ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 244 ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \ 245 ICH_HFGWTR_EL2_ICC_PCR_EL1 | \ 246 ICH_HFGWTR_EL2_ICC_CR0_EL1 | \ 247 ICH_HFGWTR_EL2_ICC_APR_EL1) 248 msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps 249 mov x0, #(ICH_VCTLR_EL2_En) 250 msr_s SYS_ICH_VCTLR_EL2, x0 // Enable vHPPI selection 251 .Lskip_gicv5_\@: 252 .endm 253 254 .macro __init_el2_hstr 255 msr hstr_el2, xzr // Disable CP15 traps to EL2 256 .endm 257 258 /* Virtual CPU ID registers */ 259 .macro __init_el2_nvhe_idregs 260 mrs x0, midr_el1 261 mrs x1, mpidr_el1 262 msr vpidr_el2, x0 263 msr vmpidr_el2, x1 264 .endm 265 266 /* Coprocessor traps */ 267 .macro __init_el2_cptr 268 __check_hvhe .LnVHE_\@, x1 269 mov x0, #CPACR_EL1_FPEN 270 msr cpacr_el1, x0 271 b .Lskip_set_cptr_\@ 272 .LnVHE_\@: 273 mov x0, #0x33ff 274 msr cptr_el2, x0 // Disable copro. traps to EL2 275 .Lskip_set_cptr_\@: 276 .endm 277 278 /* 279 * Configure BRBE to permit recording cycle counts and branch mispredicts. 280 * 281 * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and 282 * BRBCR_EL1.CC=1. 283 * 284 * At any EL, to record branch mispredicts BRBE requires that both 285 * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. 286 * 287 * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are 288 * executing in EL1. 289 */ 290 .macro __init_el2_brbe 291 mrs x1, id_aa64dfr0_el1 292 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 293 cbz x1, .Lskip_brbe_\@ 294 295 mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED 296 msr_s SYS_BRBCR_EL2, x0 297 .Lskip_brbe_\@: 298 .endm 299 300 /* Disable any fine grained traps */ 301 .macro __init_el2_fgt 302 mrs x1, id_aa64mmfr0_el1 303 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 304 cbz x1, .Lskip_fgt_\@ 305 306 mov x0, xzr 307 mov x2, xzr 308 /* If SPEv1p2 is implemented, */ 309 __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1 310 /* Disable PMSNEVFR_EL1 read and write traps */ 311 orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK 312 orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK 313 314 .Lskip_spe_fgt_\@: 315 mrs x1, id_aa64dfr0_el1 316 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 317 cbz x1, .Lskip_brbe_fgt_\@ 318 319 /* 320 * Disable read traps for the following registers 321 * 322 * [BRBSRC|BRBTGT|RBINF]_EL1 323 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 324 */ 325 orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK 326 327 /* 328 * Disable write traps for the following registers 329 * 330 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 331 */ 332 orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK 333 334 /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ 335 orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK 336 orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK 337 338 /* Disable read traps for BRBIDR_EL1 */ 339 orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK 340 341 .Lskip_brbe_fgt_\@: 342 343 .Lset_debug_fgt_\@: 344 msr_s SYS_HDFGRTR_EL2, x0 345 msr_s SYS_HDFGWTR_EL2, x2 346 347 mov x0, xzr 348 mov x2, xzr 349 350 mrs x1, id_aa64dfr0_el1 351 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 352 cbz x1, .Lskip_brbe_insn_fgt_\@ 353 354 /* Disable traps for BRBIALL instruction */ 355 orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK 356 357 /* Disable traps for BRBINJ instruction */ 358 orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK 359 360 .Lskip_brbe_insn_fgt_\@: 361 mrs x1, id_aa64pfr1_el1 362 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 363 cbz x1, .Lskip_sme_fgt_\@ 364 365 /* Disable nVHE traps of TPIDR2 and SMPRI */ 366 orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK 367 orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK 368 369 .Lskip_sme_fgt_\@: 370 mrs_s x1, SYS_ID_AA64MMFR3_EL1 371 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 372 cbz x1, .Lskip_pie_fgt_\@ 373 374 /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ 375 orr x0, x0, #HFGRTR_EL2_nPIR_EL1 376 orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1 377 378 .Lskip_pie_fgt_\@: 379 mrs_s x1, SYS_ID_AA64MMFR3_EL1 380 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4 381 cbz x1, .Lskip_poe_fgt_\@ 382 383 /* Disable trapping of POR_EL0 */ 384 orr x0, x0, #HFGRTR_EL2_nPOR_EL0 385 386 .Lskip_poe_fgt_\@: 387 /* GCS depends on PIE so we don't check it if PIE is absent */ 388 mrs_s x1, SYS_ID_AA64PFR1_EL1 389 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 390 cbz x1, .Lskip_gce_fgt_\@ 391 392 /* Disable traps of access to GCS registers at EL0 and EL1 */ 393 orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK 394 orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK 395 396 .Lskip_gce_fgt_\@: 397 398 .Lset_fgt_\@: 399 msr_s SYS_HFGRTR_EL2, x0 400 msr_s SYS_HFGWTR_EL2, x0 401 msr_s SYS_HFGITR_EL2, x2 402 403 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU 404 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 405 cbz x1, .Lskip_amu_fgt_\@ 406 407 msr_s SYS_HAFGRTR_EL2, xzr 408 409 .Lskip_amu_fgt_\@: 410 411 .Lskip_fgt_\@: 412 .endm 413 414 .macro __init_el2_fgt2 415 mrs x1, id_aa64mmfr0_el1 416 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 417 cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2 418 b.lt .Lskip_fgt2_\@ 419 420 mov x0, xzr 421 mrs x1, id_aa64dfr0_el1 422 ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 423 cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9 424 b.lt .Lskip_pmuv3p9_\@ 425 426 orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0 427 orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 428 orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 429 .Lskip_pmuv3p9_\@: 430 /* If SPE is implemented, */ 431 __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1 432 /* we can read PMSIDR and */ 433 mrs_s x1, SYS_PMSIDR_EL1 434 and x1, x1, #PMSIDR_EL1_FDS 435 /* if FEAT_SPE_FDS is implemented, */ 436 cbz x1, .Lskip_spefds_\@ 437 /* disable traps of PMSDSFR to EL2. */ 438 orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1 439 440 .Lskip_spefds_\@: 441 msr_s SYS_HDFGRTR2_EL2, x0 442 msr_s SYS_HDFGWTR2_EL2, x0 443 msr_s SYS_HFGRTR2_EL2, xzr 444 msr_s SYS_HFGWTR2_EL2, xzr 445 msr_s SYS_HFGITR2_EL2, xzr 446 .Lskip_fgt2_\@: 447 .endm 448 449 /** 450 * Initialize EL2 registers to sane values. This should be called early on all 451 * cores that were booted in EL2. Note that everything gets initialised as 452 * if VHE was not available. The kernel context will be upgraded to VHE 453 * if possible later on in the boot process 454 * 455 * Regs: x0, x1 and x2 are clobbered. 456 */ 457 .macro init_el2_state 458 __init_el2_sctlr 459 __init_el2_hcrx 460 __init_el2_timers 461 __init_el2_debug 462 __init_el2_brbe 463 __init_el2_lor 464 __init_el2_stage2 465 __init_el2_gicv3 466 __init_el2_gicv5 467 __init_el2_hstr 468 __init_el2_nvhe_idregs 469 __init_el2_cptr 470 __init_el2_fgt 471 __init_el2_fgt2 472 .endm 473 474 #ifndef __KVM_NVHE_HYPERVISOR__ 475 // This will clobber tmp1 and tmp2, and expect tmp1 to contain 476 // the id register value as read from the HW 477 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2 478 ubfx \tmp1, \tmp1, #\fld, #\width 479 cbz \tmp1, \fail 480 481 adr_l \tmp1, \idreg\()_override 482 ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET] 483 ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET] 484 ubfx \tmp2, \tmp2, #\fld, #\width 485 ubfx \tmp1, \tmp1, #\fld, #\width 486 cmp \tmp1, xzr 487 and \tmp2, \tmp2, \tmp1 488 csinv \tmp2, \tmp2, xzr, ne 489 cbnz \tmp2, \pass 490 b \fail 491 .endm 492 493 // This will clobber tmp1 and tmp2 494 .macro check_override idreg, fld, pass, fail, tmp1, tmp2 495 mrs \tmp1, \idreg\()_el1 496 __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2 497 .endm 498 #else 499 // This will clobber tmp 500 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore 501 ldr_l \tmp, \idreg\()_el1_sys_val 502 ubfx \tmp, \tmp, #\fld, #\width 503 cbnz \tmp, \pass 504 b \fail 505 .endm 506 507 .macro check_override idreg, fld, pass, fail, tmp, ignore 508 __check_override \idreg \fld 4 \pass \fail \tmp \ignore 509 .endm 510 #endif 511 512 .macro finalise_el2_state 513 check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2 514 515 .Linit_mpam_\@: 516 mov x0, #MPAM2_EL2_EnMPAMSM_MASK 517 msr_s SYS_MPAM2_EL2, x0 // use the default partition, 518 // and disable lower traps 519 mrs_s x0, SYS_MPAMIDR_EL1 520 tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg 521 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 522 523 .Lskip_mpam_\@: 524 check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 525 526 .Linit_gcs_\@: 527 msr_s SYS_GCSCR_EL1, xzr 528 msr_s SYS_GCSCRE0_EL1, xzr 529 530 .Lskip_gcs_\@: 531 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 532 533 .Linit_sve_\@: /* SVE register access */ 534 __check_hvhe .Lcptr_nvhe_\@, x1 535 536 // (h)VHE case 537 mrs x0, cpacr_el1 // Disable SVE traps 538 orr x0, x0, #CPACR_EL1_ZEN 539 msr cpacr_el1, x0 540 b .Lskip_set_cptr_\@ 541 542 .Lcptr_nvhe_\@: // nVHE case 543 mrs x0, cptr_el2 // Disable SVE traps 544 bic x0, x0, #CPTR_EL2_TZ 545 msr cptr_el2, x0 546 .Lskip_set_cptr_\@: 547 isb 548 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 549 msr_s SYS_ZCR_EL2, x1 // length for EL1. 550 551 .Lskip_sve_\@: 552 check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 553 554 .Linit_sme_\@: /* SME register access and priority mapping */ 555 __check_hvhe .Lcptr_nvhe_sme_\@, x1 556 557 // (h)VHE case 558 mrs x0, cpacr_el1 // Disable SME traps 559 orr x0, x0, #CPACR_EL1_SMEN 560 msr cpacr_el1, x0 561 b .Lskip_set_cptr_sme_\@ 562 563 .Lcptr_nvhe_sme_\@: // nVHE case 564 mrs x0, cptr_el2 // Disable SME traps 565 bic x0, x0, #CPTR_EL2_TSM 566 msr cptr_el2, x0 567 .Lskip_set_cptr_sme_\@: 568 isb 569 570 mrs x1, sctlr_el2 571 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 572 msr sctlr_el2, x1 573 isb 574 575 mov x0, #0 // SMCR controls 576 577 // Full FP in SM? 578 mrs_s x1, SYS_ID_AA64SMFR0_EL1 579 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2 580 581 .Linit_sme_fa64_\@: 582 orr x0, x0, SMCR_ELx_FA64_MASK 583 .Lskip_sme_fa64_\@: 584 585 // ZT0 available? 586 mrs_s x1, SYS_ID_AA64SMFR0_EL1 587 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2 588 .Linit_sme_zt0_\@: 589 orr x0, x0, SMCR_ELx_EZT0_MASK 590 .Lskip_sme_zt0_\@: 591 592 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 593 msr_s SYS_SMCR_EL2, x0 // length for EL1. 594 595 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 596 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 597 cbz x1, .Lskip_sme_\@ 598 599 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 600 .Lskip_sme_\@: 601 .endm 602 603 #endif /* __ARM_KVM_INIT_H__ */ 604