sys_regs.c (342123d6913c62be17e5ca1bb325758c5fd0db34) | sys_regs.c (f9b11aa00708d94a0cd78bfde34b68c0f95d8b50) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/coproc.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Authors: Rusty Russell <rusty@rustcorp.com.au> --- 19 unchanged lines hidden (view full) --- 28#include <asm/kvm_mmu.h> 29#include <asm/kvm_nested.h> 30#include <asm/perf_event.h> 31#include <asm/sysreg.h> 32 33#include <trace/events/kvm.h> 34 35#include "sys_regs.h" | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/coproc.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Authors: Rusty Russell <rusty@rustcorp.com.au> --- 19 unchanged lines hidden (view full) --- 28#include <asm/kvm_mmu.h> 29#include <asm/kvm_nested.h> 30#include <asm/perf_event.h> 31#include <asm/sysreg.h> 32 33#include <trace/events/kvm.h> 34 35#include "sys_regs.h" |
36#include "vgic/vgic.h" | |
37 38#include "trace.h" 39 40/* 41 * For AArch32, we only take care of what is being trapped. Anything 42 * that has to do with init and userspace access has to go via the 43 * 64bit interface. 44 */ --- 386 unchanged lines hidden (view full) --- 431 * for both AArch64 and AArch32 accesses. 432 */ 433static bool access_gic_sgi(struct kvm_vcpu *vcpu, 434 struct sys_reg_params *p, 435 const struct sys_reg_desc *r) 436{ 437 bool g1; 438 | 36 37#include "trace.h" 38 39/* 40 * For AArch32, we only take care of what is being trapped. Anything 41 * that has to do with init and userspace access has to go via the 42 * 64bit interface. 43 */ --- 386 unchanged lines hidden (view full) --- 430 * for both AArch64 and AArch32 accesses. 431 */ 432static bool access_gic_sgi(struct kvm_vcpu *vcpu, 433 struct sys_reg_params *p, 434 const struct sys_reg_desc *r) 435{ 436 bool g1; 437 |
439 if (!kvm_has_gicv3(vcpu->kvm)) { 440 kvm_inject_undefined(vcpu); 441 return false; 442 } 443 | |
444 if (!p->is_write) 445 return read_from_write_only(vcpu, p, r); 446 447 /* 448 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates 449 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, 450 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively 451 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure --- 436 unchanged lines hidden (view full) --- 888 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); 889 890 return __vcpu_sys_reg(vcpu, r->reg); 891} 892 893static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 894{ 895 reset_unknown(vcpu, r); | 438 if (!p->is_write) 439 return read_from_write_only(vcpu, p, r); 440 441 /* 442 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates 443 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, 444 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively 445 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure --- 436 unchanged lines hidden (view full) --- 882 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); 883 884 return __vcpu_sys_reg(vcpu, r->reg); 885} 886 887static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 888{ 889 reset_unknown(vcpu, r); |
896 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK; | 890 __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; |
897 898 return __vcpu_sys_reg(vcpu, r->reg); 899} 900 901static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 902{ 903 u64 pmcr = 0; 904 --- 75 unchanged lines hidden (view full) --- 980 if (pmu_access_event_counter_el0_disabled(vcpu)) 981 return false; 982 983 if (p->is_write) 984 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 985 else 986 /* return PMSELR.SEL field */ 987 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) | 891 892 return __vcpu_sys_reg(vcpu, r->reg); 893} 894 895static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 896{ 897 u64 pmcr = 0; 898 --- 75 unchanged lines hidden (view full) --- 974 if (pmu_access_event_counter_el0_disabled(vcpu)) 975 return false; 976 977 if (p->is_write) 978 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 979 else 980 /* return PMSELR.SEL field */ 981 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) |
988 & ARMV8_PMU_COUNTER_MASK; | 982 & PMSELR_EL0_SEL_MASK; |
989 990 return true; 991} 992 993static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 994 const struct sys_reg_desc *r) 995{ 996 u64 pmceid, mask, shift; --- 51 unchanged lines hidden (view full) --- 1048 u64 idx = ~0UL; 1049 1050 if (r->CRn == 9 && r->CRm == 13) { 1051 if (r->Op2 == 2) { 1052 /* PMXEVCNTR_EL0 */ 1053 if (pmu_access_event_counter_el0_disabled(vcpu)) 1054 return false; 1055 | 983 984 return true; 985} 986 987static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 988 const struct sys_reg_desc *r) 989{ 990 u64 pmceid, mask, shift; --- 51 unchanged lines hidden (view full) --- 1042 u64 idx = ~0UL; 1043 1044 if (r->CRn == 9 && r->CRm == 13) { 1045 if (r->Op2 == 2) { 1046 /* PMXEVCNTR_EL0 */ 1047 if (pmu_access_event_counter_el0_disabled(vcpu)) 1048 return false; 1049 |
1056 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) 1057 & ARMV8_PMU_COUNTER_MASK; | 1050 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, 1051 __vcpu_sys_reg(vcpu, PMSELR_EL0)); |
1058 } else if (r->Op2 == 0) { 1059 /* PMCCNTR_EL0 */ 1060 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 1061 return false; 1062 1063 idx = ARMV8_PMU_CYCLE_IDX; 1064 } 1065 } else if (r->CRn == 0 && r->CRm == 9) { --- 33 unchanged lines hidden (view full) --- 1099{ 1100 u64 idx, reg; 1101 1102 if (pmu_access_el0_disabled(vcpu)) 1103 return false; 1104 1105 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 1106 /* PMXEVTYPER_EL0 */ | 1052 } else if (r->Op2 == 0) { 1053 /* PMCCNTR_EL0 */ 1054 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 1055 return false; 1056 1057 idx = ARMV8_PMU_CYCLE_IDX; 1058 } 1059 } else if (r->CRn == 0 && r->CRm == 9) { --- 33 unchanged lines hidden (view full) --- 1093{ 1094 u64 idx, reg; 1095 1096 if (pmu_access_el0_disabled(vcpu)) 1097 return false; 1098 1099 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 1100 /* PMXEVTYPER_EL0 */ |
1107 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; | 1101 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); |
1108 reg = PMEVTYPER0_EL0 + idx; 1109 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 1110 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1111 if (idx == ARMV8_PMU_CYCLE_IDX) 1112 reg = PMCCFILTR_EL0; 1113 else 1114 /* PMEVTYPERn_EL0 */ 1115 reg = PMEVTYPER0_EL0 + idx; --- 3538 unchanged lines hidden --- | 1102 reg = PMEVTYPER0_EL0 + idx; 1103 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 1104 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1105 if (idx == ARMV8_PMU_CYCLE_IDX) 1106 reg = PMCCFILTR_EL0; 1107 else 1108 /* PMEVTYPERn_EL0 */ 1109 reg = PMEVTYPER0_EL0 + idx; --- 3538 unchanged lines hidden --- |