Lines Matching +full:active +full:- +full:semi
1 // SPDX-License-Identifier: GPL-2.0-only
41 return hweight16(shadow_if->lr_map & (BIT(idx) - 1)); in lr_map_idx_to_shadow_idx()
47 * On a non-nesting VM (only running at EL0/EL1), the host hypervisor
49 * Consequently, most of the state that is modified by the guest (by ACK-ing
50 * and EOI-ing interrupts) is synced by KVM on each entry/exit, so that we
51 * keep a semi-consistent view of the interrupts.
63 * - on L2 load: move the in-memory L1 vGIC configuration into a shadow,
64 * per-CPU data structure that is used to populate the actual LRs. This is
70 * - on L2 put: perform the inverse transformation, so that the result of L2
71 * running becomes visible to L1 in the VNCR-accessible registers.
73 * - there is nothing to do on L2 entry, as everything will have happened
77 * - on L2 exit: emulate the HW bit, and deactivate corresponding the L1
78 * interrupt. The L0 active state will be cleared by the HW if the L1
86 * - on delivery of a MI to L0 while L2 is running: make the L1 MI pending,
90 * - L1 MI is a fully virtual interrupt, not linked to the host's MI. Its
94 * - because most of the ICH_*_EL2 registers live in the VNCR page, the
103 * - those backed by memory (LRs, APRs, HCR, VMCR): L1 can freely access
106 * - those that always trap (ELRSR, EISR, MISR): these are status registers
107 * that are built on the fly based on the in-memory state.
109 * Only L1 can access the ICH_*_EL2 registers. A non-NV L2 obviously cannot,
155 mi_state->eisr = eisr; in vgic_compute_mi_state()
156 mi_state->elrsr = elrsr; in vgic_compute_mi_state()
157 mi_state->pend = pend; in vgic_compute_mi_state()
192 used_lrs -= hweight16(mi_state.elrsr); in vgic_v3_get_misr()
227 if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI) in translate_lr_pintid()
233 lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); in translate_lr_pintid()
234 vgic_put_irq(vcpu->kvm, irq); in translate_lr_pintid()
251 shadow_if->lr_map = 0; in vgic_v3_create_shadow_lr()
261 s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr; in vgic_v3_create_shadow_lr()
262 shadow_if->lr_map |= BIT(i); in vgic_v3_create_shadow_lr()
265 s_cpu_if->used_lrs = hweight16(shadow_if->lr_map); in vgic_v3_create_shadow_lr()
273 for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { in vgic_v3_sync_nested()
291 irq->active = false; in vgic_v3_sync_nested()
293 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_sync_nested()
300 struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_create_shadow_state()
311 val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | in vgic_v3_create_shadow_state()
313 s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val; in vgic_v3_create_shadow_state()
314 s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2); in vgic_v3_create_shadow_state()
315 s_cpu_if->vgic_sre = host_if->vgic_sre; in vgic_v3_create_shadow_state()
318 s_cpu_if->vgic_ap0r[i] = __vcpu_sys_reg(vcpu, ICH_AP0RN(i)); in vgic_v3_create_shadow_state()
319 s_cpu_if->vgic_ap1r[i] = __vcpu_sys_reg(vcpu, ICH_AP1RN(i)); in vgic_v3_create_shadow_state()
328 struct vgic_v3_cpu_if *cpu_if = &shadow_if->cpuif; in vgic_v3_load_nested()
343 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs; in vgic_v3_load_nested()
349 struct vgic_v3_cpu_if *s_cpu_if = &shadow_if->cpuif; in vgic_v3_put_nested()
363 val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK); in vgic_v3_put_nested()
365 __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr); in vgic_v3_put_nested()
368 __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]); in vgic_v3_put_nested()
369 __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]); in vgic_v3_put_nested()
372 for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { in vgic_v3_put_nested()
376 val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE; in vgic_v3_put_nested()
381 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0; in vgic_v3_put_nested()
386 * then we need to forward this to L1 so that it can re-sync the appropriate
394 kvm_vgic_inject_irq(vcpu->kvm, vcpu, in vgic_v3_handle_nested_maint_irq()
395 vcpu->kvm->arch.vgic.mi_intid, state, vcpu); in vgic_v3_handle_nested_maint_irq()
405 kvm_vgic_inject_irq(vcpu->kvm, vcpu, in vgic_v3_nested_update_mi()
406 vcpu->kvm->arch.vgic.mi_intid, level, vcpu); in vgic_v3_nested_update_mi()