Lines Matching +full:non +full:- +full:ipi
1 // SPDX-License-Identifier: GPL-2.0-only
6 #define pr_fmt(fmt) "xive-kvm: " fmt
23 #include <asm/xive-regs.h>
33 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
34 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
65 xc->pending |= 1 << cppr; in xive_vm_ack_pending()
68 if (cppr >= xc->hw_cppr) in xive_vm_ack_pending()
69 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", in xive_vm_ack_pending()
70 smp_processor_id(), cppr, xc->hw_cppr); in xive_vm_ack_pending()
74 * xc->cppr, this will be done as we scan for interrupts in xive_vm_ack_pending()
77 xc->hw_cppr = cppr; in xive_vm_ack_pending()
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_esb_load()
89 val >>= 64-8; in xive_vm_esb_load()
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_source_eoi()
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_vm_source_eoi()
103 * as they are automatically re-triggred in HW when still in xive_vm_source_eoi()
116 * This allows us to then do a re-trigger if Q was set in xive_vm_source_eoi()
121 /* Re-trigger if needed */ in xive_vm_source_eoi()
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
149 prio = ffs(pending) - 1; in xive_vm_scan_interrupts()
152 if (prio >= xc->cppr || prio > 7) { in xive_vm_scan_interrupts()
153 if (xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
154 prio = xc->mfrr; in xive_vm_scan_interrupts()
161 q = &xc->queues[prio]; in xive_vm_scan_interrupts()
162 idx = q->idx; in xive_vm_scan_interrupts()
163 toggle = q->toggle; in xive_vm_scan_interrupts()
171 qpage = READ_ONCE(q->qpage); in xive_vm_scan_interrupts()
176 * non-queueing priority (ie, qpage = 0). in xive_vm_scan_interrupts()
178 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); in xive_vm_scan_interrupts()
183 * we EOI it now, thus re-enabling reception of a new in xive_vm_scan_interrupts()
187 * page for the queue. In this case, we have non-queued in xive_vm_scan_interrupts()
188 * IPI that needs to be EOId. in xive_vm_scan_interrupts()
192 * been set and another occurrence of the IPI will trigger. in xive_vm_scan_interrupts()
196 xive_vm_source_eoi(xc->vp_ipi, in xive_vm_scan_interrupts()
197 &xc->vp_ipi_data); in xive_vm_scan_interrupts()
198 q->idx = idx; in xive_vm_scan_interrupts()
199 q->toggle = toggle; in xive_vm_scan_interrupts()
219 if (atomic_read(&q->pending_count)) { in xive_vm_scan_interrupts()
220 int p = atomic_xchg(&q->pending_count, 0); in xive_vm_scan_interrupts()
223 WARN_ON(p > atomic_read(&q->count)); in xive_vm_scan_interrupts()
224 atomic_sub(p, &q->count); in xive_vm_scan_interrupts()
231 * favored (or equal) than a pending IPI, we return in xive_vm_scan_interrupts()
232 * the IPI instead. in xive_vm_scan_interrupts()
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
235 prio = xc->mfrr; in xive_vm_scan_interrupts()
242 q->idx = idx; in xive_vm_scan_interrupts()
243 q->toggle = toggle; in xive_vm_scan_interrupts()
252 xc->pending = pending; in xive_vm_scan_interrupts()
266 * Note: This can only make xc->cppr smaller as the previous in xive_vm_scan_interrupts()
268 * the current xc->cppr. Thus we don't need to re-check xc->mfrr in xive_vm_scan_interrupts()
272 xc->cppr = prio; in xive_vm_scan_interrupts()
274 * If it was an IPI the HW CPPR might have been lowered too much in xive_vm_scan_interrupts()
277 * We re-sync it here. in xive_vm_scan_interrupts()
279 if (xc->cppr != xc->hw_cppr) { in xive_vm_scan_interrupts()
280 xc->hw_cppr = xc->cppr; in xive_vm_scan_interrupts()
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_scan_interrupts()
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_xirr()
295 xc->stat_vm_h_xirr++; in xive_vm_h_xirr()
301 xc->pending, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
304 old_cppr = xive_prio_to_guest(xc->cppr); in xive_vm_h_xirr()
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); in xive_vm_h_xirr()
310 hirq, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
339 u8 pending = xc->pending; in xive_vm_h_ipoll()
344 xc->stat_vm_h_ipoll++; in xive_vm_h_ipoll()
347 if (xc->server_num != server) { in xive_vm_h_ipoll()
348 vcpu = kvmppc_xive_find_server(vcpu->kvm, server); in xive_vm_h_ipoll()
351 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
367 kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24)); in xive_vm_h_ipoll()
376 pending = xc->pending; in xive_vm_push_pending_to_hw()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
378 if (xc->mfrr < 8) in xive_vm_push_pending_to_hw()
379 pending |= 1 << xc->mfrr; in xive_vm_push_pending_to_hw()
385 prio = ffs(pending) - 1; in xive_vm_push_pending_to_hw()
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in xive_vm_scan_for_rerouted_irqs()
397 struct xive_q *q = &xc->queues[prio]; in xive_vm_scan_for_rerouted_irqs()
405 idx = q->idx; in xive_vm_scan_for_rerouted_irqs()
406 toggle = q->toggle; in xive_vm_scan_for_rerouted_irqs()
407 qpage = READ_ONCE(q->qpage); in xive_vm_scan_for_rerouted_irqs()
426 state = &sb->irq_state[src]; in xive_vm_scan_for_rerouted_irqs()
429 if (xc->server_num == state->act_server) in xive_vm_scan_for_rerouted_irqs()
433 * Allright, it *has* been re-routed, kill it from in xive_vm_scan_for_rerouted_irqs()
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_vm_scan_for_rerouted_irqs()
449 idx = (idx + 1) & q->msk; in xive_vm_scan_for_rerouted_irqs()
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_cppr()
459 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in xive_vm_h_cppr()
464 xc->stat_vm_h_cppr++; in xive_vm_h_cppr()
470 old_cppr = xc->cppr; in xive_vm_h_cppr()
471 xc->cppr = cppr; in xive_vm_h_cppr()
474 * Order the above update of xc->cppr with the subsequent in xive_vm_h_cppr()
475 * read of xc->mfrr inside push_pending_to_hw() in xive_vm_h_cppr()
484 * which we have optimized out sending an IPI signal. in xive_vm_h_cppr()
509 xc->hw_cppr = cppr; in xive_vm_h_cppr()
517 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in xive_vm_h_eoi()
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_eoi()
529 xc->stat_vm_h_eoi++; in xive_vm_h_eoi()
531 xc->cppr = xive_prio_from_guest(new_cppr); in xive_vm_h_eoi()
541 * This barrier orders the setting of xc->cppr vs. in xive_vm_h_eoi()
542 * subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
558 state = &sb->irq_state[src]; in xive_vm_h_eoi()
561 state->in_eoi = true; in xive_vm_h_eoi()
566 * of xc->cppr vs. subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
572 if (state->guest_priority == MASKED) { in xive_vm_h_eoi()
573 arch_spin_lock(&sb->lock); in xive_vm_h_eoi()
574 if (state->guest_priority != MASKED) { in xive_vm_h_eoi()
575 arch_spin_unlock(&sb->lock); in xive_vm_h_eoi()
581 state->old_p = false; in xive_vm_h_eoi()
583 arch_spin_unlock(&sb->lock); in xive_vm_h_eoi()
591 if (state->lsi && state->asserted) in xive_vm_h_eoi()
602 * state->in_eoi is visible. in xive_vm_h_eoi()
605 state->in_eoi = false; in xive_vm_h_eoi()
608 /* Re-evaluate pending IRQs and update HW */ in xive_vm_h_eoi()
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); in xive_vm_h_eoi()
611 pr_devel(" after scan pending=%02x\n", xc->pending); in xive_vm_h_eoi()
614 xc->hw_cppr = xc->cppr; in xive_vm_h_eoi()
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_h_eoi()
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
627 xc->stat_vm_h_ipi++; in xive_vm_h_ipi()
630 vcpu = kvmppc_xive_find_server(vcpu->kvm, server); in xive_vm_h_ipi()
633 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
636 xc->mfrr = mfrr; in xive_vm_h_ipi()
639 * The load of xc->cppr below and the subsequent MMIO store in xive_vm_h_ipi()
640 * to the IPI must happen after the above mfrr update is in xive_vm_h_ipi()
643 * - Synchronize with another CPU doing an H_EOI or a H_CPPR in xive_vm_h_ipi()
644 * updating xc->cppr then reading xc->mfrr. in xive_vm_h_ipi()
646 * - The target of the IPI sees the xc->mfrr update in xive_vm_h_ipi()
650 /* Shoot the IPI if most favored than target cppr */ in xive_vm_h_ipi()
651 if (mfrr < xc->cppr) in xive_vm_h_ipi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
659 * account for the IPI and additional safety guard.
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_vcpu_has_save_restore()
668 return xc->vp_cam & TM_QW1W2_HO; in kvmppc_xive_vcpu_has_save_restore()
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_check_save_restore()
674 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_check_save_restore()
676 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE) in kvmppc_xive_check_save_restore()
688 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; in kvmppc_xive_push_vcpu()
694 * (e.g. because it's not using an in-kernel interrupt controller). in kvmppc_xive_push_vcpu()
696 if (!tima || !vcpu->arch.xive_cam_word) in kvmppc_xive_push_vcpu()
701 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); in kvmppc_xive_push_vcpu()
702 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); in kvmppc_xive_push_vcpu()
703 vcpu->arch.xive_pushed = 1; in kvmppc_xive_push_vcpu()
713 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
719 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_push_vcpu()
720 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_push_vcpu()
728 * early enough (re-cede right away), there is a in kvmppc_xive_push_vcpu()
731 * a big no-no. in kvmppc_xive_push_vcpu()
742 * before re-enabling the escalation interrupt, and if in kvmppc_xive_push_vcpu()
747 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
758 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; in kvmppc_xive_pull_vcpu()
760 if (!vcpu->arch.xive_pushed) in kvmppc_xive_pull_vcpu()
774 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS); in kvmppc_xive_pull_vcpu()
777 vcpu->arch.xive_saved_state.lsmfb = 0; in kvmppc_xive_pull_vcpu()
778 vcpu->arch.xive_saved_state.ack = 0xff; in kvmppc_xive_pull_vcpu()
779 vcpu->arch.xive_pushed = 0; in kvmppc_xive_pull_vcpu()
786 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; in kvmppc_xive_rearm_escalation()
794 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_rearm_escalation()
806 * There is no need to use the load-after-store ordering offset in kvmppc_xive_rearm_escalation()
811 vcpu->arch.xive_esc_on = true; in kvmppc_xive_rearm_escalation()
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
832 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
844 vcpu->arch.irq_pending = 1; in xive_esc_irq()
846 if (vcpu->arch.ceded || vcpu->arch.nested) in xive_esc_irq()
849 /* Since we have the no-EOI flag, the interrupt is effectively in xive_esc_irq()
858 vcpu->arch.xive_esc_on = false; in xive_esc_irq()
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation()
870 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
875 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
880 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
882 prio, xc->server_num); in kvmppc_xive_attach_escalation()
883 return -EIO; in kvmppc_xive_attach_escalation()
887 name = kasprintf(GFP_KERNEL, "kvm-%lld-%d", in kvmppc_xive_attach_escalation()
888 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
890 name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d", in kvmppc_xive_attach_escalation()
891 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
894 prio, xc->server_num); in kvmppc_xive_attach_escalation()
895 rc = -ENOMEM; in kvmppc_xive_attach_escalation()
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
905 prio, xc->server_num); in kvmppc_xive_attach_escalation()
908 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
919 struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
922 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
923 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
924 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; in kvmppc_xive_attach_escalation()
929 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
930 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
937 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue()
938 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
939 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
943 if (WARN_ON(q->qpage)) in xive_provision_queue()
947 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); in xive_provision_queue()
950 prio, xc->server_num); in xive_provision_queue()
951 return -ENOMEM; in xive_provision_queue()
953 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
956 * Reconfigure the queue. This will set q->qpage only once the in xive_provision_queue()
958 * as we will stop doing EOIs for every IPI as soon as we observe in xive_provision_queue()
959 * qpage being non-NULL, and instead will only EOI when we receive in xive_provision_queue()
962 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
963 xive->q_order, true); in xive_provision_queue()
966 prio, xc->server_num); in xive_provision_queue()
970 /* Called with xive->lock held */
973 struct kvmppc_xive *xive = kvm->arch.xive; in xive_check_provisioning()
978 lockdep_assert_held(&xive->lock); in xive_check_provisioning()
981 if (xive->qmap & (1 << prio)) in xive_check_provisioning()
988 if (!vcpu->arch.xive_vcpu) in xive_check_provisioning()
1000 xive->qmap |= (1 << prio); in xive_check_provisioning()
1016 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
1020 q = &xc->queues[prio]; in xive_inc_q_pending()
1021 atomic_inc(&q->pending_count); in xive_inc_q_pending()
1026 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue()
1031 return -ENXIO; in xive_try_pick_queue()
1032 if (!xc->valid) in xive_try_pick_queue()
1033 return -ENXIO; in xive_try_pick_queue()
1035 q = &xc->queues[prio]; in xive_try_pick_queue()
1036 if (WARN_ON(!q->qpage)) in xive_try_pick_queue()
1037 return -ENXIO; in xive_try_pick_queue()
1040 max = (q->msk + 1) - XIVE_Q_GAP; in xive_try_pick_queue()
1041 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
1054 return -EINVAL; in kvmppc_xive_select_target()
1068 if (!vcpu->arch.xive_vcpu) in kvmppc_xive_select_target()
1072 *server = vcpu->arch.xive_vcpu->server_num; in kvmppc_xive_select_target()
1080 return -EBUSY; in kvmppc_xive_select_target()
1097 arch_spin_lock(&sb->lock); in xive_lock_and_mask()
1098 old_prio = state->guest_priority; in xive_lock_and_mask()
1099 state->guest_priority = MASKED; in xive_lock_and_mask()
1101 if (!state->in_eoi) in xive_lock_and_mask()
1103 state->guest_priority = old_prio; in xive_lock_and_mask()
1104 arch_spin_unlock(&sb->lock); in xive_lock_and_mask()
1116 state->old_p = !!(val & 2); in xive_lock_and_mask()
1117 state->old_q = !!(val & 1); in xive_lock_and_mask()
1135 arch_spin_lock(&sb->lock); in xive_lock_for_unmask()
1136 if (!state->in_eoi) in xive_lock_for_unmask()
1138 arch_spin_unlock(&sb->lock); in xive_lock_for_unmask()
1151 if (state->guest_priority != MASKED) in xive_finish_unmask()
1158 if (state->old_q) in xive_finish_unmask()
1166 if (!state->old_p) in xive_finish_unmask()
1172 state->guest_priority = prio; in xive_finish_unmask()
1186 struct kvmppc_xive *xive = kvm->arch.xive; in xive_target_interrupt()
1209 if (state->act_priority != MASKED) in xive_target_interrupt()
1211 state->act_server, in xive_target_interrupt()
1212 state->act_priority); in xive_target_interrupt()
1216 state->act_priority = prio; in xive_target_interrupt()
1217 state->act_server = server; in xive_target_interrupt()
1224 prio, state->number); in xive_target_interrupt()
1232 * - Unless it was never enabled (or we run out of capacity)
1242 * - When masking, we set PQ to 10 and save the previous value
1245 * - When unmasking, if saved Q was set, we set PQ to 11
1251 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
1259 * - If H_EOI occurs while masked, we clear the saved P.
1261 * - When changing target, we account on the new target and
1270 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_xive()
1278 return -ENODEV; in kvmppc_xive_set_xive()
1285 mutex_lock(&xive->lock); in kvmppc_xive_set_xive()
1286 rc = xive_check_provisioning(xive->kvm, in kvmppc_xive_set_xive()
1288 mutex_unlock(&xive->lock); in kvmppc_xive_set_xive()
1297 return -EINVAL; in kvmppc_xive_set_xive()
1298 state = &sb->irq_state[idx]; in kvmppc_xive_set_xive()
1306 * xive_lock_and_mask() will also set state->guest_priority in kvmppc_xive_set_xive()
1325 new_act_prio = state->act_priority; in kvmppc_xive_set_xive()
1330 new_act_prio, state->act_server, state->act_priority); in kvmppc_xive_set_xive()
1335 * The condition for re-targetting the interrupt is that in kvmppc_xive_set_xive()
1347 (state->act_server != server || in kvmppc_xive_set_xive()
1348 state->act_priority != new_act_prio)) in kvmppc_xive_set_xive()
1362 state->saved_priority = priority; in kvmppc_xive_set_xive()
1364 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_xive()
1371 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_get_xive()
1377 return -ENODEV; in kvmppc_xive_get_xive()
1381 return -EINVAL; in kvmppc_xive_get_xive()
1382 state = &sb->irq_state[idx]; in kvmppc_xive_get_xive()
1383 arch_spin_lock(&sb->lock); in kvmppc_xive_get_xive()
1384 *server = state->act_server; in kvmppc_xive_get_xive()
1385 *priority = state->guest_priority; in kvmppc_xive_get_xive()
1386 arch_spin_unlock(&sb->lock); in kvmppc_xive_get_xive()
1393 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_on()
1399 return -ENODEV; in kvmppc_xive_int_on()
1403 return -EINVAL; in kvmppc_xive_int_on()
1404 state = &sb->irq_state[idx]; in kvmppc_xive_int_on()
1411 if (state->act_priority == MASKED) { in kvmppc_xive_int_on()
1413 return -EINVAL; in kvmppc_xive_int_on()
1417 if (state->saved_priority == MASKED) in kvmppc_xive_int_on()
1424 xive_finish_unmask(xive, sb, state, state->saved_priority); in kvmppc_xive_int_on()
1425 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_on()
1432 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_off()
1438 return -ENODEV; in kvmppc_xive_int_off()
1442 return -EINVAL; in kvmppc_xive_int_off()
1443 state = &sb->irq_state[idx]; in kvmppc_xive_int_off()
1450 state->saved_priority = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_int_off()
1451 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_off()
1465 state = &sb->irq_state[idx]; in xive_restore_pending_irq()
1466 if (!state->valid) in xive_restore_pending_irq()
1470 * Trigger the IPI. This assumes we never restore a pass-through in xive_restore_pending_irq()
1473 xive_irq_trigger(&state->ipi_data); in xive_restore_pending_irq()
1480 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp()
1485 /* Return the per-cpu state for state saving/migration */ in kvmppc_xive_get_icp()
1486 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
1487 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
1493 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp()
1494 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_set_icp()
1499 return -ENOENT; in kvmppc_xive_set_icp()
1508 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
1512 * shouldn't happen because the vcpu->mutex makes running a in kvmppc_xive_set_icp()
1515 if (WARN_ON(vcpu->arch.xive_pushed)) in kvmppc_xive_set_icp()
1516 return -EIO; in kvmppc_xive_set_icp()
1519 vcpu->arch.xive_saved_state.cppr = cppr; in kvmppc_xive_set_icp()
1520 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
1524 * having a pending MFRR change, which will re-evaluate the in kvmppc_xive_set_icp()
1528 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
1530 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
1534 * the legacy "1 element" queue... for an IPI we simply ignore it, in kvmppc_xive_set_icp()
1542 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
1543 xive->delayed_irqs++; in kvmppc_xive_set_icp()
1553 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_mapped()
1564 return -ENODEV; in kvmppc_xive_set_mapped()
1571 return -EINVAL; in kvmppc_xive_set_mapped()
1572 state = &sb->irq_state[idx]; in kvmppc_xive_set_mapped()
1575 * Mark the passed-through interrupt as going to a VCPU, in kvmppc_xive_set_mapped()
1583 * non-NULL to switch to passed-through or NULL for the in kvmppc_xive_set_mapped()
1594 * Mask and read state of IPI. We need to know if its P bit in kvmppc_xive_set_mapped()
1599 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, in kvmppc_xive_set_mapped()
1600 state->old_p, state->old_q); in kvmppc_xive_set_mapped()
1602 /* Turn the IPI hard off */ in kvmppc_xive_set_mapped()
1603 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_set_mapped()
1609 if (xive->ops && xive->ops->reset_mapped) in kvmppc_xive_set_mapped()
1610 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_set_mapped()
1613 state->pt_number = hw_irq; in kvmppc_xive_set_mapped()
1614 state->pt_data = irq_data_get_irq_chip_data(host_data); in kvmppc_xive_set_mapped()
1618 * the IPI if it was already targetted. Otherwise this will in kvmppc_xive_set_mapped()
1623 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_set_mapped()
1624 state->act_priority, state->number); in kvmppc_xive_set_mapped()
1629 * set in the IPI. If it was set, we know a slot may still be in in kvmppc_xive_set_mapped()
1633 if (prio != MASKED && !state->old_p) in kvmppc_xive_set_mapped()
1634 xive_vm_source_eoi(hw_irq, state->pt_data); in kvmppc_xive_set_mapped()
1637 state->old_p = state->old_q = false; in kvmppc_xive_set_mapped()
1641 state->guest_priority = prio; in kvmppc_xive_set_mapped()
1642 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_mapped()
1651 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_clr_mapped()
1659 return -ENODEV; in kvmppc_xive_clr_mapped()
1665 return -EINVAL; in kvmppc_xive_clr_mapped()
1666 state = &sb->irq_state[idx]; in kvmppc_xive_clr_mapped()
1675 state->old_p, state->old_q); in kvmppc_xive_clr_mapped()
1682 if (state->old_p) in kvmppc_xive_clr_mapped()
1683 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); in kvmppc_xive_clr_mapped()
1685 /* Release the passed-through interrupt to the host */ in kvmppc_xive_clr_mapped()
1693 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1694 state->pt_data = NULL; in kvmppc_xive_clr_mapped()
1700 if (xive->ops && xive->ops->reset_mapped) { in kvmppc_xive_clr_mapped()
1701 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_clr_mapped()
1704 /* Reconfigure the IPI */ in kvmppc_xive_clr_mapped()
1705 xive_native_configure_irq(state->ipi_number, in kvmppc_xive_clr_mapped()
1706 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_clr_mapped()
1707 state->act_priority, state->number); in kvmppc_xive_clr_mapped()
1711 * occupied) or the interrupt is masked, we set the IPI in kvmppc_xive_clr_mapped()
1712 * to PQ=10 state. Otherwise we just re-enable it (PQ=00). in kvmppc_xive_clr_mapped()
1714 if (prio == MASKED || state->old_p) in kvmppc_xive_clr_mapped()
1715 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); in kvmppc_xive_clr_mapped()
1717 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_clr_mapped()
1721 state->guest_priority = prio; in kvmppc_xive_clr_mapped()
1722 arch_spin_unlock(&sb->lock); in kvmppc_xive_clr_mapped()
1730 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts()
1731 struct kvm *kvm = vcpu->kvm; in kvmppc_xive_disable_vcpu_interrupts()
1732 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_disable_vcpu_interrupts()
1735 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1736 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_disable_vcpu_interrupts()
1741 struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; in kvmppc_xive_disable_vcpu_interrupts()
1743 if (!state->valid) in kvmppc_xive_disable_vcpu_interrupts()
1745 if (state->act_priority == MASKED) in kvmppc_xive_disable_vcpu_interrupts()
1747 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1751 arch_spin_lock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1752 state->act_priority = MASKED; in kvmppc_xive_disable_vcpu_interrupts()
1753 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1754 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1755 if (state->pt_number) { in kvmppc_xive_disable_vcpu_interrupts()
1756 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1757 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1759 arch_spin_unlock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1764 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_disable_vcpu_interrupts()
1765 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_disable_vcpu_interrupts()
1767 vcpu->arch.xive_esc_on = false; in kvmppc_xive_disable_vcpu_interrupts()
1772 * This is safe because the vcpu->mutex is held, preventing in kvmppc_xive_disable_vcpu_interrupts()
1775 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1776 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1781 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1796 xd->stale_p = false; in xive_cleanup_single_escalation()
1798 if (!vcpu->arch.xive_esc_on) in xive_cleanup_single_escalation()
1799 xd->stale_p = true; in xive_cleanup_single_escalation()
1804 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu()
1805 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_cleanup_vcpu()
1814 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1817 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1820 /* Mask the VP IPI */ in kvmppc_xive_cleanup_vcpu()
1821 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1825 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1826 if (kvmppc_xive_has_single_escalation(xc->xive)) in kvmppc_xive_cleanup_vcpu()
1827 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1828 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1829 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1830 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1835 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1838 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1842 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1844 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1845 if (q->qpage) { in kvmppc_xive_cleanup_vcpu()
1846 free_pages((unsigned long)q->qpage, in kvmppc_xive_cleanup_vcpu()
1847 xive->q_page_order); in kvmppc_xive_cleanup_vcpu()
1848 q->qpage = NULL; in kvmppc_xive_cleanup_vcpu()
1852 /* Free the IPI */ in kvmppc_xive_cleanup_vcpu()
1853 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1854 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1855 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1861 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; in kvmppc_xive_cleanup_vcpu()
1862 vcpu->arch.xive_vcpu = NULL; in kvmppc_xive_cleanup_vcpu()
1867 /* We have a block of xive->nr_servers VPs. We just need to check in kvmppc_xive_vcpu_id_valid()
1870 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; in kvmppc_xive_vcpu_id_valid()
1879 return -EINVAL; in kvmppc_xive_compute_vp_id()
1882 if (xive->vp_base == XIVE_INVALID_VP) { in kvmppc_xive_compute_vp_id()
1883 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); in kvmppc_xive_compute_vp_id()
1884 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); in kvmppc_xive_compute_vp_id()
1886 if (xive->vp_base == XIVE_INVALID_VP) in kvmppc_xive_compute_vp_id()
1887 return -ENOSPC; in kvmppc_xive_compute_vp_id()
1891 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { in kvmppc_xive_compute_vp_id()
1893 return -EEXIST; in kvmppc_xive_compute_vp_id()
1904 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_connect_vcpu()
1906 int i, r = -EBUSY; in kvmppc_xive_connect_vcpu()
1911 if (dev->ops != &kvm_xive_ops) { in kvmppc_xive_connect_vcpu()
1913 return -EPERM; in kvmppc_xive_connect_vcpu()
1915 if (xive->kvm != vcpu->kvm) in kvmppc_xive_connect_vcpu()
1916 return -EPERM; in kvmppc_xive_connect_vcpu()
1917 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) in kvmppc_xive_connect_vcpu()
1918 return -EBUSY; in kvmppc_xive_connect_vcpu()
1921 mutex_lock(&xive->lock); in kvmppc_xive_connect_vcpu()
1929 r = -ENOMEM; in kvmppc_xive_connect_vcpu()
1933 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1934 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1935 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1936 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1937 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1938 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1939 xc->valid = true; in kvmppc_xive_connect_vcpu()
1941 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1946 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu); in kvmppc_xive_connect_vcpu()
1947 r = -EIO; in kvmppc_xive_connect_vcpu()
1952 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1953 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1955 /* Allocate IPI */ in kvmppc_xive_connect_vcpu()
1956 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1957 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1958 pr_err("Failed to allocate xive irq for VCPU IPI\n"); in kvmppc_xive_connect_vcpu()
1959 r = -EIO; in kvmppc_xive_connect_vcpu()
1962 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1964 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1972 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
1981 * our mfrr change notifications. If the VCPU is hot-plugged, we in kvmppc_xive_connect_vcpu()
1986 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1993 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
2001 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
2016 /* Route the IPI */ in kvmppc_xive_connect_vcpu()
2017 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2019 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
2022 mutex_unlock(&xive->lock); in kvmppc_xive_connect_vcpu()
2028 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; in kvmppc_xive_connect_vcpu()
2045 state = &sb->irq_state[idx]; in xive_pre_save_set_queued()
2048 if (!state->valid) { in xive_pre_save_set_queued()
2058 if (!state->saved_p) in xive_pre_save_set_queued()
2062 state->in_queue = true; in xive_pre_save_set_queued()
2069 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_mask_irq()
2071 if (!state->valid) in xive_pre_save_mask_irq()
2075 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); in xive_pre_save_mask_irq()
2078 state->saved_p = state->old_p; in xive_pre_save_mask_irq()
2079 state->saved_q = state->old_q; in xive_pre_save_mask_irq()
2082 arch_spin_unlock(&sb->lock); in xive_pre_save_mask_irq()
2089 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_unmask_irq()
2091 if (!state->valid) in xive_pre_save_unmask_irq()
2102 if (state->saved_scan_prio != MASKED) in xive_pre_save_unmask_irq()
2103 xive_finish_unmask(xive, sb, state, state->saved_scan_prio); in xive_pre_save_unmask_irq()
2106 arch_spin_unlock(&sb->lock); in xive_pre_save_unmask_irq()
2111 u32 idx = q->idx; in xive_pre_save_queue()
2112 u32 toggle = q->toggle; in xive_pre_save_queue()
2116 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); in xive_pre_save_queue()
2132 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2133 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
2141 kvm_for_each_vcpu(i, vcpu, xive->kvm) { in xive_pre_save_scan()
2142 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan()
2146 if (xc->queues[j].qpage) in xive_pre_save_scan()
2147 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
2152 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2153 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
2166 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
2167 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_post_save_scan()
2171 sb->irq_state[j].in_queue = false; in xive_post_save_scan()
2175 xive->saved_src_count = 0; in xive_post_save_scan()
2191 return -ENOENT; in xive_get_source()
2193 state = &sb->irq_state[idx]; in xive_get_source()
2195 if (!state->valid) in xive_get_source()
2196 return -ENOENT; in xive_get_source()
2216 if (xive->saved_src_count == 0) in xive_get_source()
2218 xive->saved_src_count++; in xive_get_source()
2221 val = state->act_server; in xive_get_source()
2222 prio = state->saved_scan_prio; in xive_get_source()
2226 prio = state->saved_priority; in xive_get_source()
2229 if (state->lsi) { in xive_get_source()
2231 if (state->saved_p) in xive_get_source()
2234 if (state->saved_p) in xive_get_source()
2237 if (state->saved_q) in xive_get_source()
2241 * We mark it pending (which will attempt a re-delivery) in xive_get_source()
2246 if (state->in_queue || (prio == MASKED && state->saved_q)) in xive_get_source()
2254 if (xive->saved_src_count == xive->src_count) in xive_get_source()
2259 return -EFAULT; in xive_get_source()
2272 mutex_lock(&xive->lock); in kvmppc_xive_create_src_block()
2274 /* block already exists - somebody else got here first */ in kvmppc_xive_create_src_block()
2275 if (xive->src_blocks[bid]) in kvmppc_xive_create_src_block()
2283 sb->id = bid; in kvmppc_xive_create_src_block()
2286 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; in kvmppc_xive_create_src_block()
2287 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
2288 sb->irq_state[i].guest_priority = MASKED; in kvmppc_xive_create_src_block()
2289 sb->irq_state[i].saved_priority = MASKED; in kvmppc_xive_create_src_block()
2290 sb->irq_state[i].act_priority = MASKED; in kvmppc_xive_create_src_block()
2293 xive->src_blocks[bid] = sb; in kvmppc_xive_create_src_block()
2295 if (bid > xive->max_sbid) in kvmppc_xive_create_src_block()
2296 xive->max_sbid = bid; in kvmppc_xive_create_src_block()
2299 mutex_unlock(&xive->lock); in kvmppc_xive_create_src_block()
2300 return xive->src_blocks[bid]; in kvmppc_xive_create_src_block()
2305 struct kvm *kvm = xive->kvm; in xive_check_delayed_irq()
2310 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq()
2315 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
2316 xc->delayed_irq = 0; in xive_check_delayed_irq()
2317 xive->delayed_irqs--; in xive_check_delayed_irq()
2336 return -ENOENT; in xive_set_source()
2347 return -ENOMEM; in xive_set_source()
2350 state = &sb->irq_state[idx]; in xive_set_source()
2355 return -EFAULT; in xive_set_source()
2365 * If the source doesn't already have an IPI, allocate in xive_set_source()
2368 if (!state->ipi_number) { in xive_set_source()
2369 state->ipi_number = xive_native_alloc_irq(); in xive_set_source()
2370 if (state->ipi_number == 0) { in xive_set_source()
2371 pr_devel("Failed to allocate IPI !\n"); in xive_set_source()
2372 return -ENOMEM; in xive_set_source()
2374 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); in xive_set_source()
2375 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
2385 state->guest_priority = 0; in xive_set_source()
2397 state->act_priority = MASKED; in xive_set_source()
2404 arch_spin_unlock(&sb->lock); in xive_set_source()
2409 mutex_lock(&xive->lock); in xive_set_source()
2410 rc = xive_check_provisioning(xive->kvm, act_prio); in xive_set_source()
2411 mutex_unlock(&xive->lock); in xive_set_source()
2415 rc = xive_target_interrupt(xive->kvm, state, in xive_set_source()
2420 * the guest re-targets it. in xive_set_source()
2428 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { in xive_set_source()
2434 state->old_p = false; in xive_set_source()
2435 state->old_q = false; in xive_set_source()
2436 state->lsi = false; in xive_set_source()
2437 state->asserted = false; in xive_set_source()
2441 state->lsi = true; in xive_set_source()
2443 state->asserted = true; in xive_set_source()
2444 pr_devel(" LSI ! Asserted=%d\n", state->asserted); in xive_set_source()
2458 state->old_p = true; in xive_set_source()
2460 state->old_q = true; in xive_set_source()
2462 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); in xive_set_source()
2467 * re-trigger if necessary. in xive_set_source()
2471 state->guest_priority = MASKED; in xive_set_source()
2472 state->saved_priority = guest_prio; in xive_set_source()
2476 state->saved_priority = guest_prio; in xive_set_source()
2480 if (!state->valid) in xive_set_source()
2481 xive->src_count++; in xive_set_source()
2482 state->valid = true; in xive_set_source()
2490 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_irq()
2496 return -ENODEV; in kvmppc_xive_set_irq()
2500 return -EINVAL; in kvmppc_xive_set_irq()
2503 state = &sb->irq_state[idx]; in kvmppc_xive_set_irq()
2504 if (!state->valid) in kvmppc_xive_set_irq()
2505 return -EINVAL; in kvmppc_xive_set_irq()
2507 /* We don't allow a trigger on a passed-through interrupt */ in kvmppc_xive_set_irq()
2508 if (state->pt_number) in kvmppc_xive_set_irq()
2509 return -EINVAL; in kvmppc_xive_set_irq()
2511 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) in kvmppc_xive_set_irq()
2512 state->asserted = true; in kvmppc_xive_set_irq()
2514 state->asserted = false; in kvmppc_xive_set_irq()
2518 /* Trigger the IPI */ in kvmppc_xive_set_irq()
2519 xive_irq_trigger(&state->ipi_data); in kvmppc_xive_set_irq()
2531 return -EFAULT; in kvmppc_xive_set_nr_servers()
2536 return -EINVAL; in kvmppc_xive_set_nr_servers()
2538 mutex_lock(&xive->lock); in kvmppc_xive_set_nr_servers()
2539 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_set_nr_servers()
2547 rc = -EBUSY; in kvmppc_xive_set_nr_servers()
2552 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_set_nr_servers()
2554 xive->nr_servers = nr_servers; in kvmppc_xive_set_nr_servers()
2556 mutex_unlock(&xive->lock); in kvmppc_xive_set_nr_servers()
2563 struct kvmppc_xive *xive = dev->private; in xive_set_attr()
2566 switch (attr->group) { in xive_set_attr()
2568 return xive_set_source(xive, attr->attr, attr->addr); in xive_set_attr()
2570 switch (attr->attr) { in xive_set_attr()
2572 return kvmppc_xive_set_nr_servers(xive, attr->addr); in xive_set_attr()
2575 return -ENXIO; in xive_set_attr()
2580 struct kvmppc_xive *xive = dev->private; in xive_get_attr()
2583 switch (attr->group) { in xive_get_attr()
2585 return xive_get_source(xive, attr->attr, attr->addr); in xive_get_attr()
2587 return -ENXIO; in xive_get_attr()
2593 switch (attr->group) { in xive_has_attr()
2595 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && in xive_has_attr()
2596 attr->attr < KVMPPC_XICS_NR_IRQS) in xive_has_attr()
2600 switch (attr->attr) { in xive_has_attr()
2605 return -ENXIO; in xive_has_attr()
2619 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_free_sources()
2621 if (!state->valid) in kvmppc_xive_free_sources()
2624 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); in kvmppc_xive_free_sources()
2625 xive_cleanup_irq_data(&state->ipi_data); in kvmppc_xive_free_sources()
2626 xive_native_free_irq(state->ipi_number); in kvmppc_xive_free_sources()
2628 /* Pass-through, cleanup too but keep IRQ hw data */ in kvmppc_xive_free_sources()
2629 if (state->pt_number) in kvmppc_xive_free_sources()
2630 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); in kvmppc_xive_free_sources()
2632 state->valid = false; in kvmppc_xive_free_sources()
2637 * Called when device fd is closed. kvm->lock is held.
2641 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_release()
2642 struct kvm *kvm = xive->kvm; in kvmppc_xive_release()
2657 debugfs_remove(xive->dentry); in kvmppc_xive_release()
2664 * Take vcpu->mutex to ensure that no one_reg get/set ioctl in kvmppc_xive_release()
2666 * Holding the vcpu->mutex also means that the vcpu cannot in kvmppc_xive_release()
2671 mutex_lock(&vcpu->mutex); in kvmppc_xive_release()
2673 mutex_unlock(&vcpu->mutex); in kvmppc_xive_release()
2677 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type in kvmppc_xive_release()
2678 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe in kvmppc_xive_release()
2682 kvm->arch.xive = NULL; in kvmppc_xive_release()
2685 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2686 if (xive->src_blocks[i]) in kvmppc_xive_release()
2687 kvmppc_xive_free_sources(xive->src_blocks[i]); in kvmppc_xive_release()
2688 kfree(xive->src_blocks[i]); in kvmppc_xive_release()
2689 xive->src_blocks[i] = NULL; in kvmppc_xive_release()
2692 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_release()
2693 xive_native_free_vp_block(xive->vp_base); in kvmppc_xive_release()
2717 &kvm->arch.xive_devices.native : in kvmppc_xive_get_device()
2718 &kvm->arch.xive_devices.xics_on_xive; in kvmppc_xive_get_device()
2732 * Create a XICS device with XIVE backend. kvm->lock is held.
2737 struct kvm *kvm = dev->kvm; in kvmppc_xive_create()
2742 if (kvm->arch.xive) in kvmppc_xive_create()
2743 return -EEXIST; in kvmppc_xive_create()
2747 return -ENOMEM; in kvmppc_xive_create()
2749 dev->private = xive; in kvmppc_xive_create()
2750 xive->dev = dev; in kvmppc_xive_create()
2751 xive->kvm = kvm; in kvmppc_xive_create()
2752 mutex_init(&xive->lock); in kvmppc_xive_create()
2755 xive->q_order = xive_native_default_eq_shift(); in kvmppc_xive_create()
2756 if (xive->q_order < PAGE_SHIFT) in kvmppc_xive_create()
2757 xive->q_page_order = 0; in kvmppc_xive_create()
2759 xive->q_page_order = xive->q_order - PAGE_SHIFT; in kvmppc_xive_create()
2762 xive->vp_base = XIVE_INVALID_VP; in kvmppc_xive_create()
2766 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_create()
2769 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; in kvmppc_xive_create()
2772 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; in kvmppc_xive_create()
2774 kvm->arch.xive = xive; in kvmppc_xive_create()
2808 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues()
2812 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2815 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2818 if (q->qpage) { in kvmppc_xive_debug_show_queues()
2820 idx = q->idx; in kvmppc_xive_debug_show_queues()
2821 i0 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2822 idx = (idx + 1) & q->msk; in kvmppc_xive_debug_show_queues()
2823 i1 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2824 seq_printf(m, "T=%d %08x %08x...\n", q->toggle, in kvmppc_xive_debug_show_queues()
2827 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2828 struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2832 xc->esc_virq[i], in kvmppc_xive_debug_show_queues()
2833 (pq & XIVE_ESB_VAL_P) ? 'P' : '-', in kvmppc_xive_debug_show_queues()
2834 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-', in kvmppc_xive_debug_show_queues()
2835 xd->eoi_page); in kvmppc_xive_debug_show_queues()
2849 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_debug_show_sources()
2854 if (!state->valid) in kvmppc_xive_debug_show_sources()
2861 seq_printf(m, "%08x %08x/%02x", state->number, hw_num, in kvmppc_xive_debug_show_sources()
2862 xd->src_chip); in kvmppc_xive_debug_show_sources()
2863 if (state->lsi) in kvmppc_xive_debug_show_sources()
2864 seq_printf(m, " %cLSI", state->asserted ? '^' : ' '); in kvmppc_xive_debug_show_sources()
2869 state->ipi_number == hw_num ? "IPI" : " PT", in kvmppc_xive_debug_show_sources()
2870 pq & XIVE_ESB_VAL_P ? 'P' : '-', in kvmppc_xive_debug_show_sources()
2871 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', in kvmppc_xive_debug_show_sources()
2872 state->eisn, state->act_server, in kvmppc_xive_debug_show_sources()
2873 state->act_priority); in kvmppc_xive_debug_show_sources()
2881 struct kvmppc_xive *xive = m->private; in xive_debug_show()
2882 struct kvm *kvm = xive->kvm; in xive_debug_show()
2902 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show()
2909 xc->server_num, xc->vp_id, xc->vp_chip_id, in xive_debug_show()
2910 xc->cppr, xc->hw_cppr, in xive_debug_show()
2911 xc->mfrr, xc->pending, in xive_debug_show()
2912 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2916 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2917 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2918 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2919 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2920 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2921 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2922 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2923 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2924 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2925 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
2937 for (i = 0; i <= xive->max_sbid; i++) { in xive_debug_show()
2938 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_debug_show()
2941 arch_spin_lock(&sb->lock); in xive_debug_show()
2943 arch_spin_unlock(&sb->lock); in xive_debug_show()
2954 xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry, in xive_debugfs_init()
2962 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_init()
2969 .name = "kvm-xive",