1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_INIT_H__ 8 #define __ARM_KVM_INIT_H__ 9 10 #ifndef __ASSEMBLER__ 11 #error Assembly-only header 12 #endif 13 14 #include <asm/kvm_arm.h> 15 #include <asm/ptrace.h> 16 #include <asm/sysreg.h> 17 #include <linux/irqchip/arm-gic-v3.h> 18 19 .macro init_el2_hcr val 20 mov_q x0, \val 21 22 /* 23 * Compliant CPUs advertise their VHE-onlyness with 24 * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it 25 * can reset into an UNKNOWN state and might not read as 1 until it has 26 * been initialized explicitly. 27 * Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H 28 * indicating whether the CPU is running in E2H mode. 29 */ 30 mrs_s x1, SYS_ID_AA64MMFR4_EL1 31 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH 32 cmp x1, #0 33 b.lt .LnE2H0_\@ 34 35 /* 36 * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised 37 * as such via ID_AA64MMFR4_EL1.E2H0: 38 * 39 * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to 40 * have HCR_EL2.E2H implemented as RAO/WI. 41 * 42 * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest 43 * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV 44 * guests on these hosts can write to HCR_EL2.E2H without 45 * trapping to the hypervisor, but these writes have no 46 * functional effect. 47 * 48 * Handle both cases by checking for an essential VHE property 49 * (system register remapping) to decide whether we're 50 * effectively VHE-only or not. 51 */ 52 msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE 53 isb 54 mov x1, #1 // Write something to FAR_EL1 55 msr far_el1, x1 56 isb 57 mov x1, #2 // Try to overwrite it via FAR_EL2 58 msr far_el2, x1 59 isb 60 mrs x1, far_el1 // If we see the latest write in FAR_EL1, 61 cmp x1, #2 // we can safely assume we are VHE only. 62 b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. 63 64 .LnE2H0_\@: 65 orr x0, x0, #HCR_E2H 66 msr_hcr_el2 x0 67 isb 68 .LnVHE_\@: 69 .endm 70 71 .macro __init_el2_sctlr 72 mov_q x0, INIT_SCTLR_EL2_MMU_OFF 73 msr sctlr_el2, x0 74 isb 75 .endm 76 77 .macro __init_el2_hcrx 78 mrs x0, id_aa64mmfr1_el1 79 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 80 cbz x0, .Lskip_hcrx_\@ 81 mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) 82 83 /* Enable GCS if supported */ 84 mrs_s x1, SYS_ID_AA64PFR1_EL1 85 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 86 cbz x1, .Lskip_gcs_hcrx_\@ 87 orr x0, x0, #HCRX_EL2_GCSEn 88 89 .Lskip_gcs_hcrx_\@: 90 /* Enable LS64, LS64_V if supported */ 91 mrs_s x1, SYS_ID_AA64ISAR1_EL1 92 ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4 93 cbz x1, .Lset_hcrx_\@ 94 orr x0, x0, #HCRX_EL2_EnALS 95 cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V 96 b.lt .Lset_hcrx_\@ 97 orr x0, x0, #HCRX_EL2_EnASR 98 99 .Lset_hcrx_\@: 100 msr_s SYS_HCRX_EL2, x0 101 .Lskip_hcrx_\@: 102 .endm 103 104 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ 105 .macro __check_hvhe fail, tmp 106 mrs \tmp, hcr_el2 107 and \tmp, \tmp, #HCR_E2H 108 cbz \tmp, \fail 109 .endm 110 111 /* 112 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 113 * This is not necessary for VHE, since the host kernel runs in EL2, 114 * and EL0 accesses are configured in the later stage of boot process. 115 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout 116 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined 117 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 118 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in 119 * EL2. 120 */ 121 .macro __init_el2_timers 122 mov x0, #3 // Enable EL1 physical timers 123 __check_hvhe .LnVHE_\@, x1 124 lsl x0, x0, #10 125 .LnVHE_\@: 126 msr cnthctl_el2, x0 127 msr cntvoff_el2, xzr // Clear virtual offset 128 .endm 129 130 /* Branch to skip_label if SPE version is less than given version */ 131 .macro __spe_vers_imp skip_label, version, tmp 132 mrs \tmp, id_aa64dfr0_el1 133 ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 134 cmp \tmp, \version 135 b.lt \skip_label 136 .endm 137 138 .macro __init_el2_debug 139 mrs x1, id_aa64dfr0_el1 140 ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 141 cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI 142 ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne 143 b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF 144 mrs x0, pmcr_el0 // Disable debug access traps 145 ubfx x0, x0, #11, #5 // to EL2 and allow access to 146 .Lskip_pmu_\@: 147 csel x2, xzr, x0, eq // all PMU counters from EL1 148 149 /* Statistical profiling */ 150 __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present 151 152 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, 153 and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) 154 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical 155 mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ 156 1 << PMSCR_EL2_PA_SHIFT) 157 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter 158 .Lskip_spe_el2_\@: 159 mov x0, #MDCR_EL2_E2PB_MASK 160 orr x2, x2, x0 // If we don't have VHE, then 161 // use EL1&0 translation. 162 163 .Lskip_spe_\@: 164 /* Trace buffer */ 165 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 166 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 167 168 mrs_s x0, SYS_TRBIDR_EL1 169 and x0, x0, TRBIDR_EL1_P 170 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 171 172 mov x0, #MDCR_EL2_E2TB_MASK 173 orr x2, x2, x0 // allow the EL1&0 translation 174 // to own it. 175 176 .Lskip_trace_\@: 177 msr mdcr_el2, x2 // Configure debug traps 178 .endm 179 180 /* LORegions */ 181 .macro __init_el2_lor 182 mrs x1, id_aa64mmfr1_el1 183 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 184 cbz x0, .Lskip_lor_\@ 185 msr_s SYS_LORC_EL1, xzr 186 .Lskip_lor_\@: 187 .endm 188 189 /* Stage-2 translation */ 190 .macro __init_el2_stage2 191 msr vttbr_el2, xzr 192 .endm 193 194 /* GICv3 system register access */ 195 .macro __init_el2_gicv3 196 mrs x0, id_aa64pfr0_el1 197 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 198 cbz x0, .Lskip_gicv3_\@ 199 200 mrs_s x0, SYS_ICC_SRE_EL2 201 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 202 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 203 msr_s SYS_ICC_SRE_EL2, x0 204 isb // Make sure SRE is now set 205 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 206 tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks 207 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults 208 .Lskip_gicv3_\@: 209 .endm 210 211 /* GICv5 system register access */ 212 .macro __init_el2_gicv5 213 mrs_s x0, SYS_ID_AA64PFR2_EL1 214 ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4 215 cbz x0, .Lskip_gicv5_\@ 216 217 mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \ 218 ICH_HFGITR_EL2_GICRCDIA | \ 219 ICH_HFGITR_EL2_GICCDDI | \ 220 ICH_HFGITR_EL2_GICCDEOI | \ 221 ICH_HFGITR_EL2_GICCDHM | \ 222 ICH_HFGITR_EL2_GICCDRCFG | \ 223 ICH_HFGITR_EL2_GICCDPEND | \ 224 ICH_HFGITR_EL2_GICCDAFF | \ 225 ICH_HFGITR_EL2_GICCDPRI | \ 226 ICH_HFGITR_EL2_GICCDDIS | \ 227 ICH_HFGITR_EL2_GICCDEN) 228 msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps 229 mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 230 ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 231 ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \ 232 ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 233 ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \ 234 ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \ 235 ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \ 236 ICH_HFGRTR_EL2_ICC_PCR_EL1 | \ 237 ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \ 238 ICH_HFGRTR_EL2_ICC_CR0_EL1 | \ 239 ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \ 240 ICH_HFGRTR_EL2_ICC_APR_EL1) 241 msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps 242 mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \ 243 ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \ 244 ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \ 245 ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \ 246 ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \ 247 ICH_HFGWTR_EL2_ICC_PCR_EL1 | \ 248 ICH_HFGWTR_EL2_ICC_CR0_EL1 | \ 249 ICH_HFGWTR_EL2_ICC_APR_EL1) 250 msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps 251 .Lskip_gicv5_\@: 252 .endm 253 254 .macro __init_el2_hstr 255 msr hstr_el2, xzr // Disable CP15 traps to EL2 256 .endm 257 258 /* Virtual CPU ID registers */ 259 .macro __init_el2_nvhe_idregs 260 mrs x0, midr_el1 261 mrs x1, mpidr_el1 262 msr vpidr_el2, x0 263 msr vmpidr_el2, x1 264 .endm 265 266 /* Coprocessor traps */ 267 .macro __init_el2_cptr 268 __check_hvhe .LnVHE_\@, x1 269 mov x0, #CPACR_EL1_FPEN 270 msr cpacr_el1, x0 271 b .Lskip_set_cptr_\@ 272 .LnVHE_\@: 273 mov x0, #0x33ff 274 msr cptr_el2, x0 // Disable copro. traps to EL2 275 .Lskip_set_cptr_\@: 276 .endm 277 278 /* 279 * Configure BRBE to permit recording cycle counts and branch mispredicts. 280 * 281 * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and 282 * BRBCR_EL1.CC=1. 283 * 284 * At any EL, to record branch mispredicts BRBE requires that both 285 * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. 286 * 287 * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are 288 * executing in EL1. 289 */ 290 .macro __init_el2_brbe 291 mrs x1, id_aa64dfr0_el1 292 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 293 cbz x1, .Lskip_brbe_\@ 294 295 mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED 296 msr_s SYS_BRBCR_EL2, x0 297 .Lskip_brbe_\@: 298 .endm 299 300 /* Disable any fine grained traps */ 301 .macro __init_el2_fgt 302 mrs x1, id_aa64mmfr0_el1 303 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 304 cbz x1, .Lskip_fgt_\@ 305 306 mov x0, xzr 307 mov x2, xzr 308 /* If SPEv1p2 is implemented, */ 309 __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1 310 /* Disable PMSNEVFR_EL1 read and write traps */ 311 orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK 312 orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK 313 314 .Lskip_spe_fgt_\@: 315 mrs x1, id_aa64dfr0_el1 316 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 317 cbz x1, .Lskip_brbe_fgt_\@ 318 319 /* 320 * Disable read traps for the following registers 321 * 322 * [BRBSRC|BRBTGT|RBINF]_EL1 323 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 324 */ 325 orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK 326 327 /* 328 * Disable write traps for the following registers 329 * 330 * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 331 */ 332 orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK 333 334 /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ 335 orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK 336 orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK 337 338 /* Disable read traps for BRBIDR_EL1 */ 339 orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK 340 341 .Lskip_brbe_fgt_\@: 342 343 .Lset_debug_fgt_\@: 344 msr_s SYS_HDFGRTR_EL2, x0 345 msr_s SYS_HDFGWTR_EL2, x2 346 347 mov x0, xzr 348 mov x2, xzr 349 350 mrs x1, id_aa64dfr0_el1 351 ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 352 cbz x1, .Lskip_brbe_insn_fgt_\@ 353 354 /* Disable traps for BRBIALL instruction */ 355 orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK 356 357 /* Disable traps for BRBINJ instruction */ 358 orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK 359 360 .Lskip_brbe_insn_fgt_\@: 361 mrs x1, id_aa64pfr1_el1 362 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 363 cbz x1, .Lskip_sme_fgt_\@ 364 365 /* Disable nVHE traps of TPIDR2 and SMPRI */ 366 orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK 367 orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK 368 369 .Lskip_sme_fgt_\@: 370 mrs_s x1, SYS_ID_AA64MMFR3_EL1 371 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 372 cbz x1, .Lskip_pie_fgt_\@ 373 374 /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ 375 orr x0, x0, #HFGRTR_EL2_nPIR_EL1 376 orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1 377 378 .Lskip_pie_fgt_\@: 379 mrs_s x1, SYS_ID_AA64MMFR3_EL1 380 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4 381 cbz x1, .Lskip_poe_fgt_\@ 382 383 /* Disable trapping of POR_EL0 */ 384 orr x0, x0, #HFGRTR_EL2_nPOR_EL0 385 386 .Lskip_poe_fgt_\@: 387 /* GCS depends on PIE so we don't check it if PIE is absent */ 388 mrs_s x1, SYS_ID_AA64PFR1_EL1 389 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 390 cbz x1, .Lskip_gce_fgt_\@ 391 392 /* Disable traps of access to GCS registers at EL0 and EL1 */ 393 orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK 394 orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK 395 396 .Lskip_gce_fgt_\@: 397 398 .Lset_fgt_\@: 399 msr_s SYS_HFGRTR_EL2, x0 400 msr_s SYS_HFGWTR_EL2, x0 401 msr_s SYS_HFGITR_EL2, x2 402 403 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU 404 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 405 cbz x1, .Lskip_amu_fgt_\@ 406 407 msr_s SYS_HAFGRTR_EL2, xzr 408 409 .Lskip_amu_fgt_\@: 410 411 .Lskip_fgt_\@: 412 .endm 413 414 .macro __init_el2_fgt2 415 mrs x1, id_aa64mmfr0_el1 416 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 417 cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2 418 b.lt .Lskip_fgt2_\@ 419 420 mov x0, xzr 421 mrs x1, id_aa64dfr0_el1 422 ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 423 cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9 424 b.lt .Lskip_pmuv3p9_\@ 425 426 orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0 427 orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 428 orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 429 .Lskip_pmuv3p9_\@: 430 /* If SPE is implemented, */ 431 __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1 432 /* we can read PMSIDR and */ 433 mrs_s x1, SYS_PMSIDR_EL1 434 and x1, x1, #PMSIDR_EL1_FDS 435 /* if FEAT_SPE_FDS is implemented, */ 436 cbz x1, .Lskip_spefds_\@ 437 /* disable traps of PMSDSFR to EL2. */ 438 orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1 439 440 .Lskip_spefds_\@: 441 msr_s SYS_HDFGRTR2_EL2, x0 442 msr_s SYS_HDFGWTR2_EL2, x0 443 msr_s SYS_HFGRTR2_EL2, xzr 444 msr_s SYS_HFGWTR2_EL2, xzr 445 msr_s SYS_HFGITR2_EL2, xzr 446 .Lskip_fgt2_\@: 447 .endm 448 449 /** 450 * Initialize EL2 registers to sane values. This should be called early on all 451 * cores that were booted in EL2. Note that everything gets initialised as 452 * if VHE was not available. The kernel context will be upgraded to VHE 453 * if possible later on in the boot process 454 * 455 * Regs: x0, x1 and x2 are clobbered. 456 */ 457 .macro init_el2_state 458 __init_el2_sctlr 459 __init_el2_hcrx 460 __init_el2_timers 461 __init_el2_debug 462 __init_el2_brbe 463 __init_el2_lor 464 __init_el2_stage2 465 __init_el2_gicv3 466 __init_el2_gicv5 467 __init_el2_hstr 468 __init_el2_nvhe_idregs 469 __init_el2_cptr 470 __init_el2_fgt 471 __init_el2_fgt2 472 .endm 473 474 #ifndef __KVM_NVHE_HYPERVISOR__ 475 // This will clobber tmp1 and tmp2, and expect tmp1 to contain 476 // the id register value as read from the HW 477 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2 478 ubfx \tmp1, \tmp1, #\fld, #\width 479 cbz \tmp1, \fail 480 481 adr_l \tmp1, \idreg\()_override 482 ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET] 483 ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET] 484 ubfx \tmp2, \tmp2, #\fld, #\width 485 ubfx \tmp1, \tmp1, #\fld, #\width 486 cmp \tmp1, xzr 487 and \tmp2, \tmp2, \tmp1 488 csinv \tmp2, \tmp2, xzr, ne 489 cbnz \tmp2, \pass 490 b \fail 491 .endm 492 493 // This will clobber tmp1 and tmp2 494 .macro check_override idreg, fld, pass, fail, tmp1, tmp2 495 mrs \tmp1, \idreg\()_el1 496 __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2 497 .endm 498 #else 499 // This will clobber tmp 500 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore 501 ldr_l \tmp, \idreg\()_el1_sys_val 502 ubfx \tmp, \tmp, #\fld, #\width 503 cbnz \tmp, \pass 504 b \fail 505 .endm 506 507 .macro check_override idreg, fld, pass, fail, tmp, ignore 508 __check_override \idreg \fld 4 \pass \fail \tmp \ignore 509 .endm 510 #endif 511 512 .macro finalise_el2_state 513 check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2 514 515 .Linit_mpam_\@: 516 msr_s SYS_MPAM2_EL2, xzr // use the default partition 517 // and disable lower traps 518 mrs_s x0, SYS_MPAMIDR_EL1 519 tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg 520 msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 521 522 .Lskip_mpam_\@: 523 check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 524 525 .Linit_gcs_\@: 526 msr_s SYS_GCSCR_EL1, xzr 527 msr_s SYS_GCSCRE0_EL1, xzr 528 529 .Lskip_gcs_\@: 530 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 531 532 .Linit_sve_\@: /* SVE register access */ 533 __check_hvhe .Lcptr_nvhe_\@, x1 534 535 // (h)VHE case 536 mrs x0, cpacr_el1 // Disable SVE traps 537 orr x0, x0, #CPACR_EL1_ZEN 538 msr cpacr_el1, x0 539 b .Lskip_set_cptr_\@ 540 541 .Lcptr_nvhe_\@: // nVHE case 542 mrs x0, cptr_el2 // Disable SVE traps 543 bic x0, x0, #CPTR_EL2_TZ 544 msr cptr_el2, x0 545 .Lskip_set_cptr_\@: 546 isb 547 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 548 msr_s SYS_ZCR_EL2, x1 // length for EL1. 549 550 .Lskip_sve_\@: 551 check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 552 553 .Linit_sme_\@: /* SME register access and priority mapping */ 554 __check_hvhe .Lcptr_nvhe_sme_\@, x1 555 556 // (h)VHE case 557 mrs x0, cpacr_el1 // Disable SME traps 558 orr x0, x0, #CPACR_EL1_SMEN 559 msr cpacr_el1, x0 560 b .Lskip_set_cptr_sme_\@ 561 562 .Lcptr_nvhe_sme_\@: // nVHE case 563 mrs x0, cptr_el2 // Disable SME traps 564 bic x0, x0, #CPTR_EL2_TSM 565 msr cptr_el2, x0 566 .Lskip_set_cptr_sme_\@: 567 isb 568 569 mrs x1, sctlr_el2 570 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 571 msr sctlr_el2, x1 572 isb 573 574 mov x0, #0 // SMCR controls 575 576 // Full FP in SM? 577 mrs_s x1, SYS_ID_AA64SMFR0_EL1 578 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2 579 580 .Linit_sme_fa64_\@: 581 orr x0, x0, SMCR_ELx_FA64_MASK 582 .Lskip_sme_fa64_\@: 583 584 // ZT0 available? 585 mrs_s x1, SYS_ID_AA64SMFR0_EL1 586 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2 587 .Linit_sme_zt0_\@: 588 orr x0, x0, SMCR_ELx_EZT0_MASK 589 .Lskip_sme_zt0_\@: 590 591 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 592 msr_s SYS_SMCR_EL2, x0 // length for EL1. 593 594 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 595 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 596 cbz x1, .Lskip_sme_\@ 597 598 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 599 .Lskip_sme_\@: 600 .endm 601 602 #endif /* __ARM_KVM_INIT_H__ */ 603