switch.c (84d751a019a9792f5b4884e1d598b603c360ec22) | switch.c (20492a62b99bd4367b79a76ca288d018f11980db) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <hyp/switch.h> 8#include <hyp/sysreg-sr.h> --- 109 unchanged lines hidden (view full) --- 118 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 119 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 120 } 121} 122 123/** 124 * Disable host events, enable guest events 125 */ | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <hyp/switch.h> 8#include <hyp/sysreg-sr.h> --- 109 unchanged lines hidden (view full) --- 118 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 119 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 120 } 121} 122 123/** 124 * Disable host events, enable guest events 125 */ |
126#ifdef CONFIG_HW_PERF_EVENTS |
|
126static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu) 127{ 128 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 129 130 if (pmu->events_host) 131 write_sysreg(pmu->events_host, pmcntenclr_el0); 132 133 if (pmu->events_guest) --- 10 unchanged lines hidden (view full) --- 144 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 145 146 if (pmu->events_guest) 147 write_sysreg(pmu->events_guest, pmcntenclr_el0); 148 149 if (pmu->events_host) 150 write_sysreg(pmu->events_host, pmcntenset_el0); 151} | 127static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu) 128{ 129 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 130 131 if (pmu->events_host) 132 write_sysreg(pmu->events_host, pmcntenclr_el0); 133 134 if (pmu->events_guest) --- 10 unchanged lines hidden (view full) --- 145 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 146 147 if (pmu->events_guest) 148 write_sysreg(pmu->events_guest, pmcntenclr_el0); 149 150 if (pmu->events_host) 151 write_sysreg(pmu->events_host, pmcntenset_el0); 152} |
153#else 154#define __pmu_switch_to_guest(v) ({ false; }) 155#define __pmu_switch_to_host(v) do {} while (0) 156#endif |
|
152 153/** 154 * Handler for protected VM MSR, MRS or System instruction execution in AArch64. 155 * 156 * Returns true if the hypervisor has handled the exit, and control should go 157 * back to the guest, or false if it hasn't. 158 */ 159static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) --- 208 unchanged lines hidden --- | 157 158/** 159 * Handler for protected VM MSR, MRS or System instruction execution in AArch64. 160 * 161 * Returns true if the hypervisor has handled the exit, and control should go 162 * back to the guest, or false if it hasn't. 163 */ 164static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) --- 208 unchanged lines hidden --- |