196c2f033SMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
296c2f033SMarc Zyngier
396c2f033SMarc Zyngier #include <linux/cpu.h>
496c2f033SMarc Zyngier #include <linux/kvm.h>
596c2f033SMarc Zyngier #include <linux/kvm_host.h>
696c2f033SMarc Zyngier #include <linux/interrupt.h>
796c2f033SMarc Zyngier #include <linux/io.h>
896c2f033SMarc Zyngier #include <linux/uaccess.h>
996c2f033SMarc Zyngier
1096c2f033SMarc Zyngier #include <kvm/arm_vgic.h>
1196c2f033SMarc Zyngier
1296c2f033SMarc Zyngier #include <asm/kvm_arm.h>
1396c2f033SMarc Zyngier #include <asm/kvm_emulate.h>
1496c2f033SMarc Zyngier #include <asm/kvm_nested.h>
1596c2f033SMarc Zyngier
1696c2f033SMarc Zyngier #include "vgic.h"
1796c2f033SMarc Zyngier
1896c2f033SMarc Zyngier #define ICH_LRN(n) (ICH_LR0_EL2 + (n))
19146a050fSMarc Zyngier #define ICH_AP0RN(n) (ICH_AP0R0_EL2 + (n))
20146a050fSMarc Zyngier #define ICH_AP1RN(n) (ICH_AP1R0_EL2 + (n))
2196c2f033SMarc Zyngier
2296c2f033SMarc Zyngier struct mi_state {
2396c2f033SMarc Zyngier u16 eisr;
2496c2f033SMarc Zyngier u16 elrsr;
2596c2f033SMarc Zyngier bool pend;
2696c2f033SMarc Zyngier };
2796c2f033SMarc Zyngier
2896c2f033SMarc Zyngier /*
29146a050fSMarc Zyngier * The shadow registers loaded to the hardware when running a L2 guest
30146a050fSMarc Zyngier * with the virtual IMO/FMO bits set.
31146a050fSMarc Zyngier */
32146a050fSMarc Zyngier struct shadow_if {
33146a050fSMarc Zyngier struct vgic_v3_cpu_if cpuif;
34146a050fSMarc Zyngier unsigned long lr_map;
35146a050fSMarc Zyngier };
36146a050fSMarc Zyngier
37146a050fSMarc Zyngier static DEFINE_PER_CPU(struct shadow_if, shadow_if);
38146a050fSMarc Zyngier
lr_map_idx_to_shadow_idx(struct shadow_if * shadow_if,int idx)39*8a8ff069SMarc Zyngier static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx)
40*8a8ff069SMarc Zyngier {
41*8a8ff069SMarc Zyngier return hweight16(shadow_if->lr_map & (BIT(idx) - 1));
42*8a8ff069SMarc Zyngier }
43*8a8ff069SMarc Zyngier
44146a050fSMarc Zyngier /*
4596c2f033SMarc Zyngier * Nesting GICv3 support
4696c2f033SMarc Zyngier *
47146a050fSMarc Zyngier * On a non-nesting VM (only running at EL0/EL1), the host hypervisor
48146a050fSMarc Zyngier * completely controls the interrupts injected via the list registers.
49146a050fSMarc Zyngier * Consequently, most of the state that is modified by the guest (by ACK-ing
50146a050fSMarc Zyngier * and EOI-ing interrupts) is synced by KVM on each entry/exit, so that we
51146a050fSMarc Zyngier * keep a semi-consistent view of the interrupts.
52146a050fSMarc Zyngier *
53146a050fSMarc Zyngier * This still applies for a NV guest, but only while "InHost" (either
54146a050fSMarc Zyngier * running at EL2, or at EL0 with HCR_EL2.{E2H.TGE}=={1,1}.
55146a050fSMarc Zyngier *
56146a050fSMarc Zyngier * When running a L2 guest ("not InHost"), things are radically different,
57146a050fSMarc Zyngier * as the L1 guest is in charge of provisioning the interrupts via its own
58146a050fSMarc Zyngier * view of the ICH_LR*_EL2 registers, which conveniently live in the VNCR
59146a050fSMarc Zyngier * page. This means that the flow described above does work (there is no
60146a050fSMarc Zyngier * state to rebuild in the L0 hypervisor), and that most things happed on L2
61146a050fSMarc Zyngier * load/put:
62146a050fSMarc Zyngier *
63146a050fSMarc Zyngier * - on L2 load: move the in-memory L1 vGIC configuration into a shadow,
64146a050fSMarc Zyngier * per-CPU data structure that is used to populate the actual LRs. This is
65146a050fSMarc Zyngier * an extra copy that we could avoid, but life is short. In the process,
66146a050fSMarc Zyngier * we remap any interrupt that has the HW bit set to the mapped interrupt
67146a050fSMarc Zyngier * on the host, should the host consider it a HW one. This allows the HW
68146a050fSMarc Zyngier * deactivation to take its course, such as for the timer.
69146a050fSMarc Zyngier *
70146a050fSMarc Zyngier * - on L2 put: perform the inverse transformation, so that the result of L2
71146a050fSMarc Zyngier * running becomes visible to L1 in the VNCR-accessible registers.
72146a050fSMarc Zyngier *
73146a050fSMarc Zyngier * - there is nothing to do on L2 entry, as everything will have happened
74146a050fSMarc Zyngier * on load. However, this is the point where we detect that an interrupt
75146a050fSMarc Zyngier * targeting L1 and prepare the grand switcheroo.
76146a050fSMarc Zyngier *
77146a050fSMarc Zyngier * - on L2 exit: emulate the HW bit, and deactivate corresponding the L1
78146a050fSMarc Zyngier * interrupt. The L0 active state will be cleared by the HW if the L1
79146a050fSMarc Zyngier * interrupt was itself backed by a HW interrupt.
80146a050fSMarc Zyngier *
81201c8d40SMarc Zyngier * Maintenance Interrupt (MI) management:
82201c8d40SMarc Zyngier *
83201c8d40SMarc Zyngier * Since the L2 guest runs the vgic in its full glory, MIs get delivered and
84201c8d40SMarc Zyngier * used as a handover point between L2 and L1.
85201c8d40SMarc Zyngier *
86201c8d40SMarc Zyngier * - on delivery of a MI to L0 while L2 is running: make the L1 MI pending,
87201c8d40SMarc Zyngier * and let it rip. This will initiate a vcpu_put() on L2, and allow L1 to
88201c8d40SMarc Zyngier * run and process the MI.
89201c8d40SMarc Zyngier *
90201c8d40SMarc Zyngier * - L1 MI is a fully virtual interrupt, not linked to the host's MI. Its
91201c8d40SMarc Zyngier * state must be computed at each entry/exit of the guest, much like we do
92201c8d40SMarc Zyngier * it for the PMU interrupt.
93201c8d40SMarc Zyngier *
94201c8d40SMarc Zyngier * - because most of the ICH_*_EL2 registers live in the VNCR page, the
95201c8d40SMarc Zyngier * quality of emulation is poor: L1 can setup the vgic so that an MI would
96201c8d40SMarc Zyngier * immediately fire, and not observe anything until the next exit. Trying
97201c8d40SMarc Zyngier * to read ICH_MISR_EL2 would do the trick, for example.
98201c8d40SMarc Zyngier *
9996c2f033SMarc Zyngier * System register emulation:
10096c2f033SMarc Zyngier *
10196c2f033SMarc Zyngier * We get two classes of registers:
10296c2f033SMarc Zyngier *
10396c2f033SMarc Zyngier * - those backed by memory (LRs, APRs, HCR, VMCR): L1 can freely access
10496c2f033SMarc Zyngier * them, and L0 doesn't see a thing.
10596c2f033SMarc Zyngier *
10696c2f033SMarc Zyngier * - those that always trap (ELRSR, EISR, MISR): these are status registers
10796c2f033SMarc Zyngier * that are built on the fly based on the in-memory state.
10896c2f033SMarc Zyngier *
10996c2f033SMarc Zyngier * Only L1 can access the ICH_*_EL2 registers. A non-NV L2 obviously cannot,
11096c2f033SMarc Zyngier * and a NV L2 would either access the VNCR page provided by L1 (memory
11196c2f033SMarc Zyngier * based registers), or see the access redirected to L1 (registers that
11296c2f033SMarc Zyngier * trap) thanks to NV being set by L1.
11396c2f033SMarc Zyngier */
11496c2f033SMarc Zyngier
vgic_state_is_nested(struct kvm_vcpu * vcpu)115146a050fSMarc Zyngier bool vgic_state_is_nested(struct kvm_vcpu *vcpu)
116146a050fSMarc Zyngier {
117146a050fSMarc Zyngier u64 xmo;
118146a050fSMarc Zyngier
119146a050fSMarc Zyngier if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
120146a050fSMarc Zyngier xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO);
121146a050fSMarc Zyngier WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO),
122146a050fSMarc Zyngier "Separate virtual IRQ/FIQ settings not supported\n");
123146a050fSMarc Zyngier
124146a050fSMarc Zyngier return !!xmo;
125146a050fSMarc Zyngier }
126146a050fSMarc Zyngier
127146a050fSMarc Zyngier return false;
128146a050fSMarc Zyngier }
129146a050fSMarc Zyngier
get_shadow_if(void)130146a050fSMarc Zyngier static struct shadow_if *get_shadow_if(void)
131146a050fSMarc Zyngier {
132146a050fSMarc Zyngier return this_cpu_ptr(&shadow_if);
133146a050fSMarc Zyngier }
134146a050fSMarc Zyngier
lr_triggers_eoi(u64 lr)13596c2f033SMarc Zyngier static bool lr_triggers_eoi(u64 lr)
13696c2f033SMarc Zyngier {
13796c2f033SMarc Zyngier return !(lr & (ICH_LR_STATE | ICH_LR_HW)) && (lr & ICH_LR_EOI);
13896c2f033SMarc Zyngier }
13996c2f033SMarc Zyngier
vgic_compute_mi_state(struct kvm_vcpu * vcpu,struct mi_state * mi_state)14096c2f033SMarc Zyngier static void vgic_compute_mi_state(struct kvm_vcpu *vcpu, struct mi_state *mi_state)
14196c2f033SMarc Zyngier {
14296c2f033SMarc Zyngier u16 eisr = 0, elrsr = 0;
14396c2f033SMarc Zyngier bool pend = false;
14496c2f033SMarc Zyngier
14596c2f033SMarc Zyngier for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
14696c2f033SMarc Zyngier u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
14796c2f033SMarc Zyngier
14896c2f033SMarc Zyngier if (lr_triggers_eoi(lr))
14996c2f033SMarc Zyngier eisr |= BIT(i);
15096c2f033SMarc Zyngier if (!(lr & ICH_LR_STATE))
15196c2f033SMarc Zyngier elrsr |= BIT(i);
15296c2f033SMarc Zyngier pend |= (lr & ICH_LR_PENDING_BIT);
15396c2f033SMarc Zyngier }
15496c2f033SMarc Zyngier
15596c2f033SMarc Zyngier mi_state->eisr = eisr;
15696c2f033SMarc Zyngier mi_state->elrsr = elrsr;
15796c2f033SMarc Zyngier mi_state->pend = pend;
15896c2f033SMarc Zyngier }
15996c2f033SMarc Zyngier
vgic_v3_get_eisr(struct kvm_vcpu * vcpu)16096c2f033SMarc Zyngier u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu)
16196c2f033SMarc Zyngier {
16296c2f033SMarc Zyngier struct mi_state mi_state;
16396c2f033SMarc Zyngier
16496c2f033SMarc Zyngier vgic_compute_mi_state(vcpu, &mi_state);
16596c2f033SMarc Zyngier return mi_state.eisr;
16696c2f033SMarc Zyngier }
16796c2f033SMarc Zyngier
vgic_v3_get_elrsr(struct kvm_vcpu * vcpu)16896c2f033SMarc Zyngier u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu)
16996c2f033SMarc Zyngier {
17096c2f033SMarc Zyngier struct mi_state mi_state;
17196c2f033SMarc Zyngier
17296c2f033SMarc Zyngier vgic_compute_mi_state(vcpu, &mi_state);
17396c2f033SMarc Zyngier return mi_state.elrsr;
17496c2f033SMarc Zyngier }
17596c2f033SMarc Zyngier
vgic_v3_get_misr(struct kvm_vcpu * vcpu)17696c2f033SMarc Zyngier u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
17796c2f033SMarc Zyngier {
17896c2f033SMarc Zyngier struct mi_state mi_state;
17996c2f033SMarc Zyngier u64 reg = 0, hcr, vmcr;
18096c2f033SMarc Zyngier
18196c2f033SMarc Zyngier hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
18296c2f033SMarc Zyngier vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
18396c2f033SMarc Zyngier
18496c2f033SMarc Zyngier vgic_compute_mi_state(vcpu, &mi_state);
18596c2f033SMarc Zyngier
18696c2f033SMarc Zyngier if (mi_state.eisr)
18796c2f033SMarc Zyngier reg |= ICH_MISR_EL2_EOI;
18896c2f033SMarc Zyngier
18996c2f033SMarc Zyngier if (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_UIE) {
19096c2f033SMarc Zyngier int used_lrs = kvm_vgic_global_state.nr_lr;
19196c2f033SMarc Zyngier
19296c2f033SMarc Zyngier used_lrs -= hweight16(mi_state.elrsr);
19396c2f033SMarc Zyngier reg |= (used_lrs <= 1) ? ICH_MISR_EL2_U : 0;
19496c2f033SMarc Zyngier }
19596c2f033SMarc Zyngier
19696c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_LRENPIE) && FIELD_GET(ICH_HCR_EL2_EOIcount_MASK, hcr))
19796c2f033SMarc Zyngier reg |= ICH_MISR_EL2_LRENP;
19896c2f033SMarc Zyngier
19996c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_NPIE) && !mi_state.pend)
20096c2f033SMarc Zyngier reg |= ICH_MISR_EL2_NP;
20196c2f033SMarc Zyngier
20296c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_ENG0_MASK))
20396c2f033SMarc Zyngier reg |= ICH_MISR_EL2_VGrp0E;
20496c2f033SMarc Zyngier
20596c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_ENG0_MASK))
20696c2f033SMarc Zyngier reg |= ICH_MISR_EL2_VGrp0D;
20796c2f033SMarc Zyngier
20896c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_ENG1_MASK))
20996c2f033SMarc Zyngier reg |= ICH_MISR_EL2_VGrp1E;
21096c2f033SMarc Zyngier
21196c2f033SMarc Zyngier if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_ENG1_MASK))
21296c2f033SMarc Zyngier reg |= ICH_MISR_EL2_VGrp1D;
21396c2f033SMarc Zyngier
21496c2f033SMarc Zyngier return reg;
21596c2f033SMarc Zyngier }
216146a050fSMarc Zyngier
translate_lr_pintid(struct kvm_vcpu * vcpu,u64 lr)217*8a8ff069SMarc Zyngier static u64 translate_lr_pintid(struct kvm_vcpu *vcpu, u64 lr)
218*8a8ff069SMarc Zyngier {
219*8a8ff069SMarc Zyngier struct vgic_irq *irq;
220*8a8ff069SMarc Zyngier
221*8a8ff069SMarc Zyngier if (!(lr & ICH_LR_HW))
222*8a8ff069SMarc Zyngier return lr;
223*8a8ff069SMarc Zyngier
224*8a8ff069SMarc Zyngier /* We have the HW bit set, check for validity of pINTID */
225*8a8ff069SMarc Zyngier irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
226*8a8ff069SMarc Zyngier /* If there was no real mapping, nuke the HW bit */
227*8a8ff069SMarc Zyngier if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI)
228*8a8ff069SMarc Zyngier lr &= ~ICH_LR_HW;
229*8a8ff069SMarc Zyngier
230*8a8ff069SMarc Zyngier /* Translate the virtual mapping to the real one, even if invalid */
231*8a8ff069SMarc Zyngier if (irq) {
232*8a8ff069SMarc Zyngier lr &= ~ICH_LR_PHYS_ID_MASK;
233*8a8ff069SMarc Zyngier lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
234*8a8ff069SMarc Zyngier vgic_put_irq(vcpu->kvm, irq);
235*8a8ff069SMarc Zyngier }
236*8a8ff069SMarc Zyngier
237*8a8ff069SMarc Zyngier return lr;
238*8a8ff069SMarc Zyngier }
239*8a8ff069SMarc Zyngier
240146a050fSMarc Zyngier /*
241146a050fSMarc Zyngier * For LRs which have HW bit set such as timer interrupts, we modify them to
242146a050fSMarc Zyngier * have the host hardware interrupt number instead of the virtual one programmed
243146a050fSMarc Zyngier * by the guest hypervisor.
244146a050fSMarc Zyngier */
vgic_v3_create_shadow_lr(struct kvm_vcpu * vcpu,struct vgic_v3_cpu_if * s_cpu_if)245146a050fSMarc Zyngier static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu,
246146a050fSMarc Zyngier struct vgic_v3_cpu_if *s_cpu_if)
247146a050fSMarc Zyngier {
248*8a8ff069SMarc Zyngier struct shadow_if *shadow_if;
249*8a8ff069SMarc Zyngier
250*8a8ff069SMarc Zyngier shadow_if = container_of(s_cpu_if, struct shadow_if, cpuif);
251*8a8ff069SMarc Zyngier shadow_if->lr_map = 0;
252146a050fSMarc Zyngier
253146a050fSMarc Zyngier for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
254146a050fSMarc Zyngier u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
255146a050fSMarc Zyngier
256146a050fSMarc Zyngier if (!(lr & ICH_LR_STATE))
257*8a8ff069SMarc Zyngier continue;
258146a050fSMarc Zyngier
259*8a8ff069SMarc Zyngier lr = translate_lr_pintid(vcpu, lr);
260146a050fSMarc Zyngier
261*8a8ff069SMarc Zyngier s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr;
262*8a8ff069SMarc Zyngier shadow_if->lr_map |= BIT(i);
263146a050fSMarc Zyngier }
264146a050fSMarc Zyngier
265*8a8ff069SMarc Zyngier s_cpu_if->used_lrs = hweight16(shadow_if->lr_map);
266146a050fSMarc Zyngier }
267146a050fSMarc Zyngier
vgic_v3_sync_nested(struct kvm_vcpu * vcpu)268146a050fSMarc Zyngier void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
269146a050fSMarc Zyngier {
270146a050fSMarc Zyngier struct shadow_if *shadow_if = get_shadow_if();
271*8a8ff069SMarc Zyngier int i;
272146a050fSMarc Zyngier
273146a050fSMarc Zyngier for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
274146a050fSMarc Zyngier u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
275146a050fSMarc Zyngier struct vgic_irq *irq;
276146a050fSMarc Zyngier
277146a050fSMarc Zyngier if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
278*8a8ff069SMarc Zyngier continue;
279146a050fSMarc Zyngier
280146a050fSMarc Zyngier /*
281146a050fSMarc Zyngier * If we had a HW lr programmed by the guest hypervisor, we
282146a050fSMarc Zyngier * need to emulate the HW effect between the guest hypervisor
283146a050fSMarc Zyngier * and the nested guest.
284146a050fSMarc Zyngier */
285146a050fSMarc Zyngier irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
286146a050fSMarc Zyngier if (WARN_ON(!irq)) /* Shouldn't happen as we check on load */
287*8a8ff069SMarc Zyngier continue;
288146a050fSMarc Zyngier
289*8a8ff069SMarc Zyngier lr = __gic_v3_get_lr(lr_map_idx_to_shadow_idx(shadow_if, i));
290146a050fSMarc Zyngier if (!(lr & ICH_LR_STATE))
291146a050fSMarc Zyngier irq->active = false;
292146a050fSMarc Zyngier
293146a050fSMarc Zyngier vgic_put_irq(vcpu->kvm, irq);
294146a050fSMarc Zyngier }
295146a050fSMarc Zyngier }
296146a050fSMarc Zyngier
vgic_v3_create_shadow_state(struct kvm_vcpu * vcpu,struct vgic_v3_cpu_if * s_cpu_if)297146a050fSMarc Zyngier static void vgic_v3_create_shadow_state(struct kvm_vcpu *vcpu,
298146a050fSMarc Zyngier struct vgic_v3_cpu_if *s_cpu_if)
299146a050fSMarc Zyngier {
300146a050fSMarc Zyngier struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
30189896cc1SMarc Zyngier u64 val = 0;
302146a050fSMarc Zyngier int i;
303146a050fSMarc Zyngier
30489896cc1SMarc Zyngier /*
30589896cc1SMarc Zyngier * If we're on a system with a broken vgic that requires
30689896cc1SMarc Zyngier * trapping, propagate the trapping requirements.
30789896cc1SMarc Zyngier *
30889896cc1SMarc Zyngier * Ah, the smell of rotten fruits...
30989896cc1SMarc Zyngier */
31089896cc1SMarc Zyngier if (static_branch_unlikely(&vgic_v3_cpuif_trap))
31189896cc1SMarc Zyngier val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
31289896cc1SMarc Zyngier ICH_HCR_EL2_TC | ICH_HCR_EL2_TDIR);
31389896cc1SMarc Zyngier s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val;
314146a050fSMarc Zyngier s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
315146a050fSMarc Zyngier s_cpu_if->vgic_sre = host_if->vgic_sre;
316146a050fSMarc Zyngier
317146a050fSMarc Zyngier for (i = 0; i < 4; i++) {
318146a050fSMarc Zyngier s_cpu_if->vgic_ap0r[i] = __vcpu_sys_reg(vcpu, ICH_AP0RN(i));
319146a050fSMarc Zyngier s_cpu_if->vgic_ap1r[i] = __vcpu_sys_reg(vcpu, ICH_AP1RN(i));
320146a050fSMarc Zyngier }
321146a050fSMarc Zyngier
322146a050fSMarc Zyngier vgic_v3_create_shadow_lr(vcpu, s_cpu_if);
323146a050fSMarc Zyngier }
324146a050fSMarc Zyngier
vgic_v3_load_nested(struct kvm_vcpu * vcpu)325146a050fSMarc Zyngier void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
326146a050fSMarc Zyngier {
327146a050fSMarc Zyngier struct shadow_if *shadow_if = get_shadow_if();
328146a050fSMarc Zyngier struct vgic_v3_cpu_if *cpu_if = &shadow_if->cpuif;
329146a050fSMarc Zyngier
330146a050fSMarc Zyngier BUG_ON(!vgic_state_is_nested(vcpu));
331146a050fSMarc Zyngier
332146a050fSMarc Zyngier vgic_v3_create_shadow_state(vcpu, cpu_if);
333146a050fSMarc Zyngier
334146a050fSMarc Zyngier __vgic_v3_restore_vmcr_aprs(cpu_if);
335146a050fSMarc Zyngier __vgic_v3_activate_traps(cpu_if);
336146a050fSMarc Zyngier
337146a050fSMarc Zyngier __vgic_v3_restore_state(cpu_if);
3387682c023SMarc Zyngier
3397682c023SMarc Zyngier /*
3407682c023SMarc Zyngier * Propagate the number of used LRs for the benefit of the HYP
3417682c023SMarc Zyngier * GICv3 emulation code. Yes, this is a pretty sorry hack.
3427682c023SMarc Zyngier */
3437682c023SMarc Zyngier vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs;
344146a050fSMarc Zyngier }
345146a050fSMarc Zyngier
vgic_v3_put_nested(struct kvm_vcpu * vcpu)346146a050fSMarc Zyngier void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
347146a050fSMarc Zyngier {
348146a050fSMarc Zyngier struct shadow_if *shadow_if = get_shadow_if();
349146a050fSMarc Zyngier struct vgic_v3_cpu_if *s_cpu_if = &shadow_if->cpuif;
35089896cc1SMarc Zyngier u64 val;
351146a050fSMarc Zyngier int i;
352146a050fSMarc Zyngier
353146a050fSMarc Zyngier __vgic_v3_save_vmcr_aprs(s_cpu_if);
354146a050fSMarc Zyngier __vgic_v3_deactivate_traps(s_cpu_if);
355146a050fSMarc Zyngier __vgic_v3_save_state(s_cpu_if);
356146a050fSMarc Zyngier
357146a050fSMarc Zyngier /*
358146a050fSMarc Zyngier * Translate the shadow state HW fields back to the virtual ones
359146a050fSMarc Zyngier * before copying the shadow struct back to the nested one.
360146a050fSMarc Zyngier */
36189896cc1SMarc Zyngier val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
36289896cc1SMarc Zyngier val &= ~ICH_HCR_EL2_EOIcount_MASK;
36389896cc1SMarc Zyngier val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
3646678791eSMarc Zyngier __vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val);
3656678791eSMarc Zyngier __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr);
366146a050fSMarc Zyngier
367146a050fSMarc Zyngier for (i = 0; i < 4; i++) {
3686678791eSMarc Zyngier __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]);
3696678791eSMarc Zyngier __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]);
370146a050fSMarc Zyngier }
371146a050fSMarc Zyngier
372146a050fSMarc Zyngier for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
37389896cc1SMarc Zyngier val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
374146a050fSMarc Zyngier
375146a050fSMarc Zyngier val &= ~ICH_LR_STATE;
376*8a8ff069SMarc Zyngier val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE;
377146a050fSMarc Zyngier
3786678791eSMarc Zyngier __vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
379146a050fSMarc Zyngier }
380146a050fSMarc Zyngier
3817682c023SMarc Zyngier vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0;
382146a050fSMarc Zyngier }
383201c8d40SMarc Zyngier
384201c8d40SMarc Zyngier /*
385201c8d40SMarc Zyngier * If we exit a L2 VM with a pending maintenance interrupt from the GIC,
386201c8d40SMarc Zyngier * then we need to forward this to L1 so that it can re-sync the appropriate
387201c8d40SMarc Zyngier * LRs and sample level triggered interrupts again.
388201c8d40SMarc Zyngier */
vgic_v3_handle_nested_maint_irq(struct kvm_vcpu * vcpu)389201c8d40SMarc Zyngier void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
390201c8d40SMarc Zyngier {
391201c8d40SMarc Zyngier bool state = read_sysreg_s(SYS_ICH_MISR_EL2);
392201c8d40SMarc Zyngier
393201c8d40SMarc Zyngier /* This will force a switch back to L1 if the level is high */
394201c8d40SMarc Zyngier kvm_vgic_inject_irq(vcpu->kvm, vcpu,
395201c8d40SMarc Zyngier vcpu->kvm->arch.vgic.mi_intid, state, vcpu);
396201c8d40SMarc Zyngier
397201c8d40SMarc Zyngier sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
398201c8d40SMarc Zyngier }
399201c8d40SMarc Zyngier
vgic_v3_nested_update_mi(struct kvm_vcpu * vcpu)400201c8d40SMarc Zyngier void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
401201c8d40SMarc Zyngier {
402201c8d40SMarc Zyngier bool level;
403201c8d40SMarc Zyngier
404201c8d40SMarc Zyngier level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En;
405201c8d40SMarc Zyngier if (level)
406201c8d40SMarc Zyngier level &= vgic_v3_get_misr(vcpu);
407201c8d40SMarc Zyngier kvm_vgic_inject_irq(vcpu->kvm, vcpu,
408201c8d40SMarc Zyngier vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
409201c8d40SMarc Zyngier }
410