Lines Matching +full:re +full:- +full:routed
1 // SPDX-License-Identifier: GPL-2.0-only
6 #define pr_fmt(fmt) "xive-kvm: " fmt
23 #include <asm/xive-regs.h>
33 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
34 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
65 xc->pending |= 1 << cppr; in xive_vm_ack_pending()
68 if (cppr >= xc->hw_cppr) in xive_vm_ack_pending()
69 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", in xive_vm_ack_pending()
70 smp_processor_id(), cppr, xc->hw_cppr); in xive_vm_ack_pending()
74 * xc->cppr, this will be done as we scan for interrupts in xive_vm_ack_pending()
77 xc->hw_cppr = cppr; in xive_vm_ack_pending()
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_esb_load()
89 val >>= 64-8; in xive_vm_esb_load()
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_source_eoi()
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_vm_source_eoi()
103 * as they are automatically re-triggred in HW when still in xive_vm_source_eoi()
116 * This allows us to then do a re-trigger if Q was set in xive_vm_source_eoi()
121 /* Re-trigger if needed */ in xive_vm_source_eoi()
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
149 prio = ffs(pending) - 1; in xive_vm_scan_interrupts()
152 if (prio >= xc->cppr || prio > 7) { in xive_vm_scan_interrupts()
153 if (xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
154 prio = xc->mfrr; in xive_vm_scan_interrupts()
161 q = &xc->queues[prio]; in xive_vm_scan_interrupts()
162 idx = q->idx; in xive_vm_scan_interrupts()
163 toggle = q->toggle; in xive_vm_scan_interrupts()
171 qpage = READ_ONCE(q->qpage); in xive_vm_scan_interrupts()
176 * non-queueing priority (ie, qpage = 0). in xive_vm_scan_interrupts()
178 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); in xive_vm_scan_interrupts()
183 * we EOI it now, thus re-enabling reception of a new in xive_vm_scan_interrupts()
187 * page for the queue. In this case, we have non-queued in xive_vm_scan_interrupts()
196 xive_vm_source_eoi(xc->vp_ipi, in xive_vm_scan_interrupts()
197 &xc->vp_ipi_data); in xive_vm_scan_interrupts()
198 q->idx = idx; in xive_vm_scan_interrupts()
199 q->toggle = toggle; in xive_vm_scan_interrupts()
219 if (atomic_read(&q->pending_count)) { in xive_vm_scan_interrupts()
220 int p = atomic_xchg(&q->pending_count, 0); in xive_vm_scan_interrupts()
223 WARN_ON(p > atomic_read(&q->count)); in xive_vm_scan_interrupts()
224 atomic_sub(p, &q->count); in xive_vm_scan_interrupts()
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
235 prio = xc->mfrr; in xive_vm_scan_interrupts()
242 q->idx = idx; in xive_vm_scan_interrupts()
243 q->toggle = toggle; in xive_vm_scan_interrupts()
252 xc->pending = pending; in xive_vm_scan_interrupts()
266 * Note: This can only make xc->cppr smaller as the previous in xive_vm_scan_interrupts()
268 * the current xc->cppr. Thus we don't need to re-check xc->mfrr in xive_vm_scan_interrupts()
272 xc->cppr = prio; in xive_vm_scan_interrupts()
275 * as the HW interrupt we use for IPIs is routed to priority 0. in xive_vm_scan_interrupts()
277 * We re-sync it here. in xive_vm_scan_interrupts()
279 if (xc->cppr != xc->hw_cppr) { in xive_vm_scan_interrupts()
280 xc->hw_cppr = xc->cppr; in xive_vm_scan_interrupts()
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_scan_interrupts()
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_xirr()
295 xc->stat_vm_h_xirr++; in xive_vm_h_xirr()
301 xc->pending, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
304 old_cppr = xive_prio_to_guest(xc->cppr); in xive_vm_h_xirr()
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); in xive_vm_h_xirr()
310 hirq, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
339 u8 pending = xc->pending; in xive_vm_h_ipoll()
344 xc->stat_vm_h_ipoll++; in xive_vm_h_ipoll()
347 if (xc->server_num != server) { in xive_vm_h_ipoll()
348 vcpu = kvmppc_xive_find_server(vcpu->kvm, server); in xive_vm_h_ipoll()
351 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
367 kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24)); in xive_vm_h_ipoll()
376 pending = xc->pending; in xive_vm_push_pending_to_hw()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
378 if (xc->mfrr < 8) in xive_vm_push_pending_to_hw()
379 pending |= 1 << xc->mfrr; in xive_vm_push_pending_to_hw()
385 prio = ffs(pending) - 1; in xive_vm_push_pending_to_hw()
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in xive_vm_scan_for_rerouted_irqs()
397 struct xive_q *q = &xc->queues[prio]; in xive_vm_scan_for_rerouted_irqs()
405 idx = q->idx; in xive_vm_scan_for_rerouted_irqs()
406 toggle = q->toggle; in xive_vm_scan_for_rerouted_irqs()
407 qpage = READ_ONCE(q->qpage); in xive_vm_scan_for_rerouted_irqs()
426 state = &sb->irq_state[src]; in xive_vm_scan_for_rerouted_irqs()
429 if (xc->server_num == state->act_server) in xive_vm_scan_for_rerouted_irqs()
433 * Allright, it *has* been re-routed, kill it from in xive_vm_scan_for_rerouted_irqs()
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_vm_scan_for_rerouted_irqs()
449 idx = (idx + 1) & q->msk; in xive_vm_scan_for_rerouted_irqs()
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_cppr()
459 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in xive_vm_h_cppr()
464 xc->stat_vm_h_cppr++; in xive_vm_h_cppr()
470 old_cppr = xc->cppr; in xive_vm_h_cppr()
471 xc->cppr = cppr; in xive_vm_h_cppr()
474 * Order the above update of xc->cppr with the subsequent in xive_vm_h_cppr()
475 * read of xc->mfrr inside push_pending_to_hw() in xive_vm_h_cppr()
490 * interrupt that has been routed to another CPU, take in xive_vm_h_cppr()
502 * the queue must still be routed to us and isn't a source in xive_vm_h_cppr()
509 xc->hw_cppr = cppr; in xive_vm_h_cppr()
517 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in xive_vm_h_eoi()
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_eoi()
529 xc->stat_vm_h_eoi++; in xive_vm_h_eoi()
531 xc->cppr = xive_prio_from_guest(new_cppr); in xive_vm_h_eoi()
541 * This barrier orders the setting of xc->cppr vs. in xive_vm_h_eoi()
542 * subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
558 state = &sb->irq_state[src]; in xive_vm_h_eoi()
561 state->in_eoi = true; in xive_vm_h_eoi()
566 * of xc->cppr vs. subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
572 if (state->guest_priority == MASKED) { in xive_vm_h_eoi()
573 arch_spin_lock(&sb->lock); in xive_vm_h_eoi()
574 if (state->guest_priority != MASKED) { in xive_vm_h_eoi()
575 arch_spin_unlock(&sb->lock); in xive_vm_h_eoi()
581 state->old_p = false; in xive_vm_h_eoi()
583 arch_spin_unlock(&sb->lock); in xive_vm_h_eoi()
591 if (state->lsi && state->asserted) in xive_vm_h_eoi()
602 * state->in_eoi is visible. in xive_vm_h_eoi()
605 state->in_eoi = false; in xive_vm_h_eoi()
608 /* Re-evaluate pending IRQs and update HW */ in xive_vm_h_eoi()
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); in xive_vm_h_eoi()
611 pr_devel(" after scan pending=%02x\n", xc->pending); in xive_vm_h_eoi()
614 xc->hw_cppr = xc->cppr; in xive_vm_h_eoi()
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_h_eoi()
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
627 xc->stat_vm_h_ipi++; in xive_vm_h_ipi()
630 vcpu = kvmppc_xive_find_server(vcpu->kvm, server); in xive_vm_h_ipi()
633 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
636 xc->mfrr = mfrr; in xive_vm_h_ipi()
639 * The load of xc->cppr below and the subsequent MMIO store in xive_vm_h_ipi()
643 * - Synchronize with another CPU doing an H_EOI or a H_CPPR in xive_vm_h_ipi()
644 * updating xc->cppr then reading xc->mfrr. in xive_vm_h_ipi()
646 * - The target of the IPI sees the xc->mfrr update in xive_vm_h_ipi()
651 if (mfrr < xc->cppr) in xive_vm_h_ipi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_vcpu_has_save_restore()
668 return xc->vp_cam & TM_QW1W2_HO; in kvmppc_xive_vcpu_has_save_restore()
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_check_save_restore()
674 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_check_save_restore()
676 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE) in kvmppc_xive_check_save_restore()
688 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; in kvmppc_xive_push_vcpu()
694 * (e.g. because it's not using an in-kernel interrupt controller). in kvmppc_xive_push_vcpu()
696 if (!tima || !vcpu->arch.xive_cam_word) in kvmppc_xive_push_vcpu()
701 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); in kvmppc_xive_push_vcpu()
702 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); in kvmppc_xive_push_vcpu()
703 vcpu->arch.xive_pushed = 1; in kvmppc_xive_push_vcpu()
713 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
719 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_push_vcpu()
720 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_push_vcpu()
728 * early enough (re-cede right away), there is a in kvmppc_xive_push_vcpu()
731 * a big no-no. in kvmppc_xive_push_vcpu()
742 * before re-enabling the escalation interrupt, and if in kvmppc_xive_push_vcpu()
747 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
758 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; in kvmppc_xive_pull_vcpu()
760 if (!vcpu->arch.xive_pushed) in kvmppc_xive_pull_vcpu()
774 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS); in kvmppc_xive_pull_vcpu()
777 vcpu->arch.xive_saved_state.lsmfb = 0; in kvmppc_xive_pull_vcpu()
778 vcpu->arch.xive_saved_state.ack = 0xff; in kvmppc_xive_pull_vcpu()
779 vcpu->arch.xive_pushed = 0; in kvmppc_xive_pull_vcpu()
786 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; in kvmppc_xive_rearm_escalation()
794 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_rearm_escalation()
806 * There is no need to use the load-after-store ordering offset in kvmppc_xive_rearm_escalation()
811 vcpu->arch.xive_esc_on = true; in kvmppc_xive_rearm_escalation()
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
832 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
844 vcpu->arch.irq_pending = 1; in xive_esc_irq()
846 if (vcpu->arch.ceded || vcpu->arch.nested) in xive_esc_irq()
849 /* Since we have the no-EOI flag, the interrupt is effectively in xive_esc_irq()
858 vcpu->arch.xive_esc_on = false; in xive_esc_irq()
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation()
870 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
875 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
880 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
882 prio, xc->server_num); in kvmppc_xive_attach_escalation()
883 return -EIO; in kvmppc_xive_attach_escalation()
887 name = kasprintf(GFP_KERNEL, "kvm-%lld-%d", in kvmppc_xive_attach_escalation()
888 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
890 name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d", in kvmppc_xive_attach_escalation()
891 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
894 prio, xc->server_num); in kvmppc_xive_attach_escalation()
895 rc = -ENOMEM; in kvmppc_xive_attach_escalation()
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
905 prio, xc->server_num); in kvmppc_xive_attach_escalation()
908 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
923 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
924 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
925 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; in kvmppc_xive_attach_escalation()
930 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
931 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue()
939 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
940 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
944 if (WARN_ON(q->qpage)) in xive_provision_queue()
948 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); in xive_provision_queue()
951 prio, xc->server_num); in xive_provision_queue()
952 return -ENOMEM; in xive_provision_queue()
954 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
957 * Reconfigure the queue. This will set q->qpage only once the in xive_provision_queue()
960 * qpage being non-NULL, and instead will only EOI when we receive in xive_provision_queue()
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
964 xive->q_order, true); in xive_provision_queue()
967 prio, xc->server_num); in xive_provision_queue()
971 /* Called with xive->lock held */
974 struct kvmppc_xive *xive = kvm->arch.xive; in xive_check_provisioning()
979 lockdep_assert_held(&xive->lock); in xive_check_provisioning()
982 if (xive->qmap & (1 << prio)) in xive_check_provisioning()
989 if (!vcpu->arch.xive_vcpu) in xive_check_provisioning()
1001 xive->qmap |= (1 << prio); in xive_check_provisioning()
1017 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
1021 q = &xc->queues[prio]; in xive_inc_q_pending()
1022 atomic_inc(&q->pending_count); in xive_inc_q_pending()
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue()
1032 return -ENXIO; in xive_try_pick_queue()
1033 if (!xc->valid) in xive_try_pick_queue()
1034 return -ENXIO; in xive_try_pick_queue()
1036 q = &xc->queues[prio]; in xive_try_pick_queue()
1037 if (WARN_ON(!q->qpage)) in xive_try_pick_queue()
1038 return -ENXIO; in xive_try_pick_queue()
1041 max = (q->msk + 1) - XIVE_Q_GAP; in xive_try_pick_queue()
1042 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
1055 return -EINVAL; in kvmppc_xive_select_target()
1069 if (!vcpu->arch.xive_vcpu) in kvmppc_xive_select_target()
1073 *server = vcpu->arch.xive_vcpu->server_num; in kvmppc_xive_select_target()
1081 return -EBUSY; in kvmppc_xive_select_target()
1098 arch_spin_lock(&sb->lock); in xive_lock_and_mask()
1099 old_prio = state->guest_priority; in xive_lock_and_mask()
1100 state->guest_priority = MASKED; in xive_lock_and_mask()
1102 if (!state->in_eoi) in xive_lock_and_mask()
1104 state->guest_priority = old_prio; in xive_lock_and_mask()
1105 arch_spin_unlock(&sb->lock); in xive_lock_and_mask()
1117 state->old_p = !!(val & 2); in xive_lock_and_mask()
1118 state->old_q = !!(val & 1); in xive_lock_and_mask()
1136 arch_spin_lock(&sb->lock); in xive_lock_for_unmask()
1137 if (!state->in_eoi) in xive_lock_for_unmask()
1139 arch_spin_unlock(&sb->lock); in xive_lock_for_unmask()
1152 if (state->guest_priority != MASKED) in xive_finish_unmask()
1159 if (state->old_q) in xive_finish_unmask()
1167 if (!state->old_p) in xive_finish_unmask()
1173 state->guest_priority = prio; in xive_finish_unmask()
1187 struct kvmppc_xive *xive = kvm->arch.xive; in xive_target_interrupt()
1210 if (state->act_priority != MASKED) in xive_target_interrupt()
1212 state->act_server, in xive_target_interrupt()
1213 state->act_priority); in xive_target_interrupt()
1217 state->act_priority = prio; in xive_target_interrupt()
1218 state->act_server = server; in xive_target_interrupt()
1225 prio, state->number); in xive_target_interrupt()
1233 * - Unless it was never enabled (or we run out of capacity)
1238 * from masking, we only handle accounting during (re)targetting,
1243 * - When masking, we set PQ to 10 and save the previous value
1246 * - When unmasking, if saved Q was set, we set PQ to 11
1252 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
1260 * - If H_EOI occurs while masked, we clear the saved P.
1262 * - When changing target, we account on the new target and
1271 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_xive()
1279 return -ENODEV; in kvmppc_xive_set_xive()
1286 mutex_lock(&xive->lock); in kvmppc_xive_set_xive()
1287 rc = xive_check_provisioning(xive->kvm, in kvmppc_xive_set_xive()
1289 mutex_unlock(&xive->lock); in kvmppc_xive_set_xive()
1298 return -EINVAL; in kvmppc_xive_set_xive()
1299 state = &sb->irq_state[idx]; in kvmppc_xive_set_xive()
1307 * xive_lock_and_mask() will also set state->guest_priority in kvmppc_xive_set_xive()
1326 new_act_prio = state->act_priority; in kvmppc_xive_set_xive()
1331 new_act_prio, state->act_server, state->act_priority); in kvmppc_xive_set_xive()
1336 * The condition for re-targetting the interrupt is that in kvmppc_xive_set_xive()
1348 (state->act_server != server || in kvmppc_xive_set_xive()
1349 state->act_priority != new_act_prio)) in kvmppc_xive_set_xive()
1363 state->saved_priority = priority; in kvmppc_xive_set_xive()
1365 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_xive()
1372 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_get_xive()
1378 return -ENODEV; in kvmppc_xive_get_xive()
1382 return -EINVAL; in kvmppc_xive_get_xive()
1383 state = &sb->irq_state[idx]; in kvmppc_xive_get_xive()
1384 arch_spin_lock(&sb->lock); in kvmppc_xive_get_xive()
1385 *server = state->act_server; in kvmppc_xive_get_xive()
1386 *priority = state->guest_priority; in kvmppc_xive_get_xive()
1387 arch_spin_unlock(&sb->lock); in kvmppc_xive_get_xive()
1394 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_on()
1400 return -ENODEV; in kvmppc_xive_int_on()
1404 return -EINVAL; in kvmppc_xive_int_on()
1405 state = &sb->irq_state[idx]; in kvmppc_xive_int_on()
1412 if (state->act_priority == MASKED) { in kvmppc_xive_int_on()
1414 return -EINVAL; in kvmppc_xive_int_on()
1418 if (state->saved_priority == MASKED) in kvmppc_xive_int_on()
1425 xive_finish_unmask(xive, sb, state, state->saved_priority); in kvmppc_xive_int_on()
1426 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_on()
1433 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_off()
1439 return -ENODEV; in kvmppc_xive_int_off()
1443 return -EINVAL; in kvmppc_xive_int_off()
1444 state = &sb->irq_state[idx]; in kvmppc_xive_int_off()
1451 state->saved_priority = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_int_off()
1452 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_off()
1466 state = &sb->irq_state[idx]; in xive_restore_pending_irq()
1467 if (!state->valid) in xive_restore_pending_irq()
1471 * Trigger the IPI. This assumes we never restore a pass-through in xive_restore_pending_irq()
1474 xive_irq_trigger(&state->ipi_data); in xive_restore_pending_irq()
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp()
1486 /* Return the per-cpu state for state saving/migration */ in kvmppc_xive_get_icp()
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp()
1495 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_set_icp()
1500 return -ENOENT; in kvmppc_xive_set_icp()
1509 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
1513 * shouldn't happen because the vcpu->mutex makes running a in kvmppc_xive_set_icp()
1516 if (WARN_ON(vcpu->arch.xive_pushed)) in kvmppc_xive_set_icp()
1517 return -EIO; in kvmppc_xive_set_icp()
1520 vcpu->arch.xive_saved_state.cppr = cppr; in kvmppc_xive_set_icp()
1521 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
1525 * having a pending MFRR change, which will re-evaluate the in kvmppc_xive_set_icp()
1529 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
1531 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
1543 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
1544 xive->delayed_irqs++; in kvmppc_xive_set_icp()
1554 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_mapped()
1565 return -ENODEV; in kvmppc_xive_set_mapped()
1572 return -EINVAL; in kvmppc_xive_set_mapped()
1573 state = &sb->irq_state[idx]; in kvmppc_xive_set_mapped()
1576 * Mark the passed-through interrupt as going to a VCPU, in kvmppc_xive_set_mapped()
1584 * non-NULL to switch to passed-through or NULL for the in kvmppc_xive_set_mapped()
1601 state->old_p, state->old_q); in kvmppc_xive_set_mapped()
1604 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_set_mapped()
1610 if (xive->ops && xive->ops->reset_mapped) in kvmppc_xive_set_mapped()
1611 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_set_mapped()
1614 state->pt_number = hw_irq; in kvmppc_xive_set_mapped()
1615 state->pt_data = irq_data_get_irq_handler_data(host_data); in kvmppc_xive_set_mapped()
1624 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_set_mapped()
1625 state->act_priority, state->number); in kvmppc_xive_set_mapped()
1634 if (prio != MASKED && !state->old_p) in kvmppc_xive_set_mapped()
1635 xive_vm_source_eoi(hw_irq, state->pt_data); in kvmppc_xive_set_mapped()
1638 state->old_p = state->old_q = false; in kvmppc_xive_set_mapped()
1642 state->guest_priority = prio; in kvmppc_xive_set_mapped()
1643 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_mapped()
1652 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_clr_mapped()
1660 return -ENODEV; in kvmppc_xive_clr_mapped()
1666 return -EINVAL; in kvmppc_xive_clr_mapped()
1667 state = &sb->irq_state[idx]; in kvmppc_xive_clr_mapped()
1676 state->old_p, state->old_q); in kvmppc_xive_clr_mapped()
1683 if (state->old_p) in kvmppc_xive_clr_mapped()
1684 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); in kvmppc_xive_clr_mapped()
1686 /* Release the passed-through interrupt to the host */ in kvmppc_xive_clr_mapped()
1694 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1695 state->pt_data = NULL; in kvmppc_xive_clr_mapped()
1701 if (xive->ops && xive->ops->reset_mapped) { in kvmppc_xive_clr_mapped()
1702 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_clr_mapped()
1706 xive_native_configure_irq(state->ipi_number, in kvmppc_xive_clr_mapped()
1707 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_clr_mapped()
1708 state->act_priority, state->number); in kvmppc_xive_clr_mapped()
1713 * to PQ=10 state. Otherwise we just re-enable it (PQ=00). in kvmppc_xive_clr_mapped()
1715 if (prio == MASKED || state->old_p) in kvmppc_xive_clr_mapped()
1716 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); in kvmppc_xive_clr_mapped()
1718 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_clr_mapped()
1722 state->guest_priority = prio; in kvmppc_xive_clr_mapped()
1723 arch_spin_unlock(&sb->lock); in kvmppc_xive_clr_mapped()
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts()
1732 struct kvm *kvm = vcpu->kvm; in kvmppc_xive_disable_vcpu_interrupts()
1733 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_disable_vcpu_interrupts()
1736 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1737 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_disable_vcpu_interrupts()
1742 struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; in kvmppc_xive_disable_vcpu_interrupts()
1744 if (!state->valid) in kvmppc_xive_disable_vcpu_interrupts()
1746 if (state->act_priority == MASKED) in kvmppc_xive_disable_vcpu_interrupts()
1748 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1752 arch_spin_lock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1753 state->act_priority = MASKED; in kvmppc_xive_disable_vcpu_interrupts()
1754 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1755 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1756 if (state->pt_number) { in kvmppc_xive_disable_vcpu_interrupts()
1757 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1758 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1760 arch_spin_unlock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1765 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_disable_vcpu_interrupts()
1766 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_disable_vcpu_interrupts()
1768 vcpu->arch.xive_esc_on = false; in kvmppc_xive_disable_vcpu_interrupts()
1773 * This is safe because the vcpu->mutex is held, preventing in kvmppc_xive_disable_vcpu_interrupts()
1776 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1777 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1782 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1798 xd->stale_p = false; in xive_cleanup_single_escalation()
1800 if (!vcpu->arch.xive_esc_on) in xive_cleanup_single_escalation()
1801 xd->stale_p = true; in xive_cleanup_single_escalation()
1806 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu()
1807 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_cleanup_vcpu()
1816 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1818 /* Ensure no interrupt is still routed to that VP */ in kvmppc_xive_cleanup_vcpu()
1819 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1823 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1827 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1828 if (kvmppc_xive_has_single_escalation(xc->xive)) in kvmppc_xive_cleanup_vcpu()
1829 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1830 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1831 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1832 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1837 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1840 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1844 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1846 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1847 if (q->qpage) { in kvmppc_xive_cleanup_vcpu()
1848 free_pages((unsigned long)q->qpage, in kvmppc_xive_cleanup_vcpu()
1849 xive->q_page_order); in kvmppc_xive_cleanup_vcpu()
1850 q->qpage = NULL; in kvmppc_xive_cleanup_vcpu()
1855 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1856 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1857 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1863 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; in kvmppc_xive_cleanup_vcpu()
1864 vcpu->arch.xive_vcpu = NULL; in kvmppc_xive_cleanup_vcpu()
1869 /* We have a block of xive->nr_servers VPs. We just need to check in kvmppc_xive_vcpu_id_valid()
1872 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; in kvmppc_xive_vcpu_id_valid()
1881 return -EINVAL; in kvmppc_xive_compute_vp_id()
1884 if (xive->vp_base == XIVE_INVALID_VP) { in kvmppc_xive_compute_vp_id()
1885 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); in kvmppc_xive_compute_vp_id()
1886 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); in kvmppc_xive_compute_vp_id()
1888 if (xive->vp_base == XIVE_INVALID_VP) in kvmppc_xive_compute_vp_id()
1889 return -ENOSPC; in kvmppc_xive_compute_vp_id()
1893 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { in kvmppc_xive_compute_vp_id()
1895 return -EEXIST; in kvmppc_xive_compute_vp_id()
1906 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_connect_vcpu()
1908 int i, r = -EBUSY; in kvmppc_xive_connect_vcpu()
1913 if (dev->ops != &kvm_xive_ops) { in kvmppc_xive_connect_vcpu()
1915 return -EPERM; in kvmppc_xive_connect_vcpu()
1917 if (xive->kvm != vcpu->kvm) in kvmppc_xive_connect_vcpu()
1918 return -EPERM; in kvmppc_xive_connect_vcpu()
1919 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) in kvmppc_xive_connect_vcpu()
1920 return -EBUSY; in kvmppc_xive_connect_vcpu()
1923 mutex_lock(&xive->lock); in kvmppc_xive_connect_vcpu()
1931 r = -ENOMEM; in kvmppc_xive_connect_vcpu()
1935 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1936 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1937 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1938 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1939 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1940 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1941 xc->valid = true; in kvmppc_xive_connect_vcpu()
1943 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1948 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu); in kvmppc_xive_connect_vcpu()
1949 r = -EIO; in kvmppc_xive_connect_vcpu()
1954 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1955 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1958 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1959 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1961 r = -EIO; in kvmppc_xive_connect_vcpu()
1964 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1966 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1974 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
1983 * our mfrr change notifications. If the VCPU is hot-plugged, we in kvmppc_xive_connect_vcpu()
1988 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1995 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
2003 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
2019 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2021 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
2024 mutex_unlock(&xive->lock); in kvmppc_xive_connect_vcpu()
2030 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; in kvmppc_xive_connect_vcpu()
2047 state = &sb->irq_state[idx]; in xive_pre_save_set_queued()
2050 if (!state->valid) { in xive_pre_save_set_queued()
2060 if (!state->saved_p) in xive_pre_save_set_queued()
2064 state->in_queue = true; in xive_pre_save_set_queued()
2071 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_mask_irq()
2073 if (!state->valid) in xive_pre_save_mask_irq()
2077 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); in xive_pre_save_mask_irq()
2080 state->saved_p = state->old_p; in xive_pre_save_mask_irq()
2081 state->saved_q = state->old_q; in xive_pre_save_mask_irq()
2084 arch_spin_unlock(&sb->lock); in xive_pre_save_mask_irq()
2091 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_unmask_irq()
2093 if (!state->valid) in xive_pre_save_unmask_irq()
2104 if (state->saved_scan_prio != MASKED) in xive_pre_save_unmask_irq()
2105 xive_finish_unmask(xive, sb, state, state->saved_scan_prio); in xive_pre_save_unmask_irq()
2108 arch_spin_unlock(&sb->lock); in xive_pre_save_unmask_irq()
2113 u32 idx = q->idx; in xive_pre_save_queue()
2114 u32 toggle = q->toggle; in xive_pre_save_queue()
2118 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); in xive_pre_save_queue()
2134 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2135 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
2143 kvm_for_each_vcpu(i, vcpu, xive->kvm) { in xive_pre_save_scan()
2144 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan()
2148 if (xc->queues[j].qpage) in xive_pre_save_scan()
2149 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
2154 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2155 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
2168 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
2169 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_post_save_scan()
2173 sb->irq_state[j].in_queue = false; in xive_post_save_scan()
2177 xive->saved_src_count = 0; in xive_post_save_scan()
2193 return -ENOENT; in xive_get_source()
2195 state = &sb->irq_state[idx]; in xive_get_source()
2197 if (!state->valid) in xive_get_source()
2198 return -ENOENT; in xive_get_source()
2218 if (xive->saved_src_count == 0) in xive_get_source()
2220 xive->saved_src_count++; in xive_get_source()
2223 val = state->act_server; in xive_get_source()
2224 prio = state->saved_scan_prio; in xive_get_source()
2228 prio = state->saved_priority; in xive_get_source()
2231 if (state->lsi) { in xive_get_source()
2233 if (state->saved_p) in xive_get_source()
2236 if (state->saved_p) in xive_get_source()
2239 if (state->saved_q) in xive_get_source()
2243 * We mark it pending (which will attempt a re-delivery) in xive_get_source()
2248 if (state->in_queue || (prio == MASKED && state->saved_q)) in xive_get_source()
2256 if (xive->saved_src_count == xive->src_count) in xive_get_source()
2261 return -EFAULT; in xive_get_source()
2274 mutex_lock(&xive->lock); in kvmppc_xive_create_src_block()
2276 /* block already exists - somebody else got here first */ in kvmppc_xive_create_src_block()
2277 if (xive->src_blocks[bid]) in kvmppc_xive_create_src_block()
2285 sb->id = bid; in kvmppc_xive_create_src_block()
2288 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; in kvmppc_xive_create_src_block()
2289 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
2290 sb->irq_state[i].guest_priority = MASKED; in kvmppc_xive_create_src_block()
2291 sb->irq_state[i].saved_priority = MASKED; in kvmppc_xive_create_src_block()
2292 sb->irq_state[i].act_priority = MASKED; in kvmppc_xive_create_src_block()
2295 xive->src_blocks[bid] = sb; in kvmppc_xive_create_src_block()
2297 if (bid > xive->max_sbid) in kvmppc_xive_create_src_block()
2298 xive->max_sbid = bid; in kvmppc_xive_create_src_block()
2301 mutex_unlock(&xive->lock); in kvmppc_xive_create_src_block()
2302 return xive->src_blocks[bid]; in kvmppc_xive_create_src_block()
2307 struct kvm *kvm = xive->kvm; in xive_check_delayed_irq()
2312 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq()
2317 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
2318 xc->delayed_irq = 0; in xive_check_delayed_irq()
2319 xive->delayed_irqs--; in xive_check_delayed_irq()
2338 return -ENOENT; in xive_set_source()
2349 return -ENOMEM; in xive_set_source()
2352 state = &sb->irq_state[idx]; in xive_set_source()
2357 return -EFAULT; in xive_set_source()
2370 if (!state->ipi_number) { in xive_set_source()
2371 state->ipi_number = xive_native_alloc_irq(); in xive_set_source()
2372 if (state->ipi_number == 0) { in xive_set_source()
2374 return -ENOMEM; in xive_set_source()
2376 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); in xive_set_source()
2377 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
2387 state->guest_priority = 0; in xive_set_source()
2399 state->act_priority = MASKED; in xive_set_source()
2406 arch_spin_unlock(&sb->lock); in xive_set_source()
2411 mutex_lock(&xive->lock); in xive_set_source()
2412 rc = xive_check_provisioning(xive->kvm, act_prio); in xive_set_source()
2413 mutex_unlock(&xive->lock); in xive_set_source()
2417 rc = xive_target_interrupt(xive->kvm, state, in xive_set_source()
2422 * the guest re-targets it. in xive_set_source()
2430 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { in xive_set_source()
2436 state->old_p = false; in xive_set_source()
2437 state->old_q = false; in xive_set_source()
2438 state->lsi = false; in xive_set_source()
2439 state->asserted = false; in xive_set_source()
2443 state->lsi = true; in xive_set_source()
2445 state->asserted = true; in xive_set_source()
2446 pr_devel(" LSI ! Asserted=%d\n", state->asserted); in xive_set_source()
2460 state->old_p = true; in xive_set_source()
2462 state->old_q = true; in xive_set_source()
2464 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); in xive_set_source()
2469 * re-trigger if necessary. in xive_set_source()
2473 state->guest_priority = MASKED; in xive_set_source()
2474 state->saved_priority = guest_prio; in xive_set_source()
2478 state->saved_priority = guest_prio; in xive_set_source()
2482 if (!state->valid) in xive_set_source()
2483 xive->src_count++; in xive_set_source()
2484 state->valid = true; in xive_set_source()
2492 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_irq()
2498 return -ENODEV; in kvmppc_xive_set_irq()
2502 return -EINVAL; in kvmppc_xive_set_irq()
2505 state = &sb->irq_state[idx]; in kvmppc_xive_set_irq()
2506 if (!state->valid) in kvmppc_xive_set_irq()
2507 return -EINVAL; in kvmppc_xive_set_irq()
2509 /* We don't allow a trigger on a passed-through interrupt */ in kvmppc_xive_set_irq()
2510 if (state->pt_number) in kvmppc_xive_set_irq()
2511 return -EINVAL; in kvmppc_xive_set_irq()
2513 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) in kvmppc_xive_set_irq()
2514 state->asserted = true; in kvmppc_xive_set_irq()
2516 state->asserted = false; in kvmppc_xive_set_irq()
2521 xive_irq_trigger(&state->ipi_data); in kvmppc_xive_set_irq()
2533 return -EFAULT; in kvmppc_xive_set_nr_servers()
2538 return -EINVAL; in kvmppc_xive_set_nr_servers()
2540 mutex_lock(&xive->lock); in kvmppc_xive_set_nr_servers()
2541 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_set_nr_servers()
2549 rc = -EBUSY; in kvmppc_xive_set_nr_servers()
2554 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_set_nr_servers()
2556 xive->nr_servers = nr_servers; in kvmppc_xive_set_nr_servers()
2558 mutex_unlock(&xive->lock); in kvmppc_xive_set_nr_servers()
2565 struct kvmppc_xive *xive = dev->private; in xive_set_attr()
2568 switch (attr->group) { in xive_set_attr()
2570 return xive_set_source(xive, attr->attr, attr->addr); in xive_set_attr()
2572 switch (attr->attr) { in xive_set_attr()
2574 return kvmppc_xive_set_nr_servers(xive, attr->addr); in xive_set_attr()
2577 return -ENXIO; in xive_set_attr()
2582 struct kvmppc_xive *xive = dev->private; in xive_get_attr()
2585 switch (attr->group) { in xive_get_attr()
2587 return xive_get_source(xive, attr->attr, attr->addr); in xive_get_attr()
2589 return -ENXIO; in xive_get_attr()
2595 switch (attr->group) { in xive_has_attr()
2597 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && in xive_has_attr()
2598 attr->attr < KVMPPC_XICS_NR_IRQS) in xive_has_attr()
2602 switch (attr->attr) { in xive_has_attr()
2607 return -ENXIO; in xive_has_attr()
2621 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_free_sources()
2623 if (!state->valid) in kvmppc_xive_free_sources()
2626 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); in kvmppc_xive_free_sources()
2627 xive_cleanup_irq_data(&state->ipi_data); in kvmppc_xive_free_sources()
2628 xive_native_free_irq(state->ipi_number); in kvmppc_xive_free_sources()
2630 /* Pass-through, cleanup too but keep IRQ hw data */ in kvmppc_xive_free_sources()
2631 if (state->pt_number) in kvmppc_xive_free_sources()
2632 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); in kvmppc_xive_free_sources()
2634 state->valid = false; in kvmppc_xive_free_sources()
2639 * Called when device fd is closed. kvm->lock is held.
2643 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_release()
2644 struct kvm *kvm = xive->kvm; in kvmppc_xive_release()
2659 debugfs_remove(xive->dentry); in kvmppc_xive_release()
2666 * Take vcpu->mutex to ensure that no one_reg get/set ioctl in kvmppc_xive_release()
2668 * Holding the vcpu->mutex also means that the vcpu cannot in kvmppc_xive_release()
2673 mutex_lock(&vcpu->mutex); in kvmppc_xive_release()
2675 mutex_unlock(&vcpu->mutex); in kvmppc_xive_release()
2679 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type in kvmppc_xive_release()
2680 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe in kvmppc_xive_release()
2684 kvm->arch.xive = NULL; in kvmppc_xive_release()
2687 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2688 if (xive->src_blocks[i]) in kvmppc_xive_release()
2689 kvmppc_xive_free_sources(xive->src_blocks[i]); in kvmppc_xive_release()
2690 kfree(xive->src_blocks[i]); in kvmppc_xive_release()
2691 xive->src_blocks[i] = NULL; in kvmppc_xive_release()
2694 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_release()
2695 xive_native_free_vp_block(xive->vp_base); in kvmppc_xive_release()
2719 &kvm->arch.xive_devices.native : in kvmppc_xive_get_device()
2720 &kvm->arch.xive_devices.xics_on_xive; in kvmppc_xive_get_device()
2734 * Create a XICS device with XIVE backend. kvm->lock is held.
2739 struct kvm *kvm = dev->kvm; in kvmppc_xive_create()
2744 if (kvm->arch.xive) in kvmppc_xive_create()
2745 return -EEXIST; in kvmppc_xive_create()
2749 return -ENOMEM; in kvmppc_xive_create()
2751 dev->private = xive; in kvmppc_xive_create()
2752 xive->dev = dev; in kvmppc_xive_create()
2753 xive->kvm = kvm; in kvmppc_xive_create()
2754 mutex_init(&xive->lock); in kvmppc_xive_create()
2757 xive->q_order = xive_native_default_eq_shift(); in kvmppc_xive_create()
2758 if (xive->q_order < PAGE_SHIFT) in kvmppc_xive_create()
2759 xive->q_page_order = 0; in kvmppc_xive_create()
2761 xive->q_page_order = xive->q_order - PAGE_SHIFT; in kvmppc_xive_create()
2764 xive->vp_base = XIVE_INVALID_VP; in kvmppc_xive_create()
2768 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_create()
2771 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; in kvmppc_xive_create()
2774 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; in kvmppc_xive_create()
2776 kvm->arch.xive = xive; in kvmppc_xive_create()
2810 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues()
2814 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2817 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2820 if (q->qpage) { in kvmppc_xive_debug_show_queues()
2822 idx = q->idx; in kvmppc_xive_debug_show_queues()
2823 i0 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2824 idx = (idx + 1) & q->msk; in kvmppc_xive_debug_show_queues()
2825 i1 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2826 seq_printf(m, "T=%d %08x %08x...\n", q->toggle, in kvmppc_xive_debug_show_queues()
2829 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2830 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2836 xc->esc_virq[i], in kvmppc_xive_debug_show_queues()
2837 (pq & XIVE_ESB_VAL_P) ? 'P' : '-', in kvmppc_xive_debug_show_queues()
2838 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-', in kvmppc_xive_debug_show_queues()
2839 xd->eoi_page); in kvmppc_xive_debug_show_queues()
2853 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_debug_show_sources()
2858 if (!state->valid) in kvmppc_xive_debug_show_sources()
2865 seq_printf(m, "%08x %08x/%02x", state->number, hw_num, in kvmppc_xive_debug_show_sources()
2866 xd->src_chip); in kvmppc_xive_debug_show_sources()
2867 if (state->lsi) in kvmppc_xive_debug_show_sources()
2868 seq_printf(m, " %cLSI", state->asserted ? '^' : ' '); in kvmppc_xive_debug_show_sources()
2873 state->ipi_number == hw_num ? "IPI" : " PT", in kvmppc_xive_debug_show_sources()
2874 pq & XIVE_ESB_VAL_P ? 'P' : '-', in kvmppc_xive_debug_show_sources()
2875 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', in kvmppc_xive_debug_show_sources()
2876 state->eisn, state->act_server, in kvmppc_xive_debug_show_sources()
2877 state->act_priority); in kvmppc_xive_debug_show_sources()
2885 struct kvmppc_xive *xive = m->private; in xive_debug_show()
2886 struct kvm *kvm = xive->kvm; in xive_debug_show()
2906 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show()
2913 xc->server_num, xc->vp_id, xc->vp_chip_id, in xive_debug_show()
2914 xc->cppr, xc->hw_cppr, in xive_debug_show()
2915 xc->mfrr, xc->pending, in xive_debug_show()
2916 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2920 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2921 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2922 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2923 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2924 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2925 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2926 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2927 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2928 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2929 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
2941 for (i = 0; i <= xive->max_sbid; i++) { in xive_debug_show()
2942 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_debug_show()
2945 arch_spin_lock(&sb->lock); in xive_debug_show()
2947 arch_spin_unlock(&sb->lock); in xive_debug_show()
2958 xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry, in xive_debugfs_init()
2966 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_init()
2973 .name = "kvm-xive",