Lines Matching +full:0 +full:xd

33 #define __x_eoi_page(xd)	((void __iomem *)((xd)->eoi_mmio))  argument
34 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) argument
63 cppr = ack & 0xff; in xive_vm_ack_pending()
80 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) in xive_vm_esb_load() argument
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_esb_load()
87 val = __raw_readq(__x_eoi_page(xd) + offset); in xive_vm_esb_load()
95 static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_vm_source_eoi() argument
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_source_eoi()
99 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); in xive_vm_source_eoi()
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_vm_source_eoi()
106 __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); in xive_vm_source_eoi()
119 eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00); in xive_vm_source_eoi()
122 if ((eoi_val & 1) && __x_trig_page(xd)) in xive_vm_source_eoi()
123 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_source_eoi()
136 u32 hirq = 0; in xive_vm_scan_interrupts()
137 u8 prio = 0xff; in xive_vm_scan_interrupts()
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
146 * If pending is 0 this will return 0xff which is what in xive_vm_scan_interrupts()
175 * Try to fetch from the queue. Will return 0 for a in xive_vm_scan_interrupts()
176 * non-queueing priority (ie, qpage = 0). in xive_vm_scan_interrupts()
186 * We also need to do that if prio is 0 and we had no in xive_vm_scan_interrupts()
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) { in xive_vm_scan_interrupts()
220 int p = atomic_xchg(&q->pending_count, 0); in xive_vm_scan_interrupts()
267 * loop will only exit with hirq != 0 if prio is lower than in xive_vm_scan_interrupts()
275 * as the HW interrupt we use for IPIs is routed to priority 0. in xive_vm_scan_interrupts()
300 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
309 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
313 if (hirq & 0xff000000) in xive_vm_h_xirr()
314 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); in xive_vm_h_xirr()
324 * hirq = 0; in xive_vm_h_xirr()
354 pending = 0xff; in xive_vm_h_ipoll()
358 u8 pipr = be64_to_cpu(qw1) & 0xff; in xive_vm_h_ipoll()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
381 pending |= 0x80; in xive_vm_push_pending_to_hw()
401 struct xive_irq_data *xd; in xive_vm_scan_for_rerouted_irqs() local
418 irq = entry & 0x7fffffff; in xive_vm_scan_for_rerouted_irqs()
436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); in xive_vm_scan_for_rerouted_irqs()
439 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_scan_for_rerouted_irqs()
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_vm_scan_for_rerouted_irqs()
443 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_vm_scan_for_rerouted_irqs()
446 xive_vm_source_eoi(hw_num, xd); in xive_vm_scan_for_rerouted_irqs()
450 if (idx == 0) in xive_vm_scan_for_rerouted_irqs()
521 struct xive_irq_data *xd; in xive_vm_h_eoi() local
523 u32 irq = xirr & 0x00ffffff, hw_num; in xive_vm_h_eoi()
525 int rc = 0; in xive_vm_h_eoi()
539 if (irq == XICS_IPI || irq == 0) { in xive_vm_h_eoi()
559 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_h_eoi()
588 xive_vm_source_eoi(hw_num, xd); in xive_vm_h_eoi()
592 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_h_eoi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
713 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
746 /* Now P is 0, we can clear the flag */ in kvmppc_xive_push_vcpu()
747 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
772 /* Second load to recover the context state (Words 0 and 1) */ in kvmppc_xive_pull_vcpu()
777 vcpu->arch.xive_saved_state.lsmfb = 0; in kvmppc_xive_pull_vcpu()
778 vcpu->arch.xive_saved_state.ack = 0xff; in kvmppc_xive_pull_vcpu()
779 vcpu->arch.xive_pushed = 0; in kvmppc_xive_pull_vcpu()
825 static bool xive_irq_trigger(struct xive_irq_data *xd) in xive_irq_trigger() argument
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
832 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
876 return 0; in kvmppc_xive_attach_escalation()
919 struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation() local
921 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_attach_escalation()
922 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
923 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
924 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; in kvmppc_xive_attach_escalation()
927 return 0; in kvmppc_xive_attach_escalation()
930 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
944 return 0; in xive_provision_queue()
953 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
957 * queue is fully configured. This is a requirement for prio 0 in xive_provision_queue()
960 * corresponding queue 0 entries in xive_provision_queue()
982 return 0; in xive_check_provisioning()
991 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive)) in xive_check_provisioning()
1001 return 0; in xive_check_provisioning()
1041 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
1057 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in kvmppc_xive_select_target()
1061 if (rc == 0) in kvmppc_xive_select_target()
1071 if (rc == 0) { in kvmppc_xive_select_target()
1073 pr_devel(" found on 0x%x/%d\n", *server, prio); in kvmppc_xive_select_target()
1087 struct xive_irq_data *xd; in xive_lock_and_mask() local
1112 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_lock_and_mask()
1115 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); in xive_lock_and_mask()
1147 struct xive_irq_data *xd; in xive_finish_unmask() local
1155 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_finish_unmask()
1159 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_finish_unmask()
1167 xive_vm_source_eoi(hw_num, xd); in xive_finish_unmask()
1274 int rc = 0; in kvmppc_xive_set_xive()
1280 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", in kvmppc_xive_set_xive()
1336 * we have a valid new priority (new_act_prio is not 0xff) in kvmppc_xive_set_xive()
1388 return 0; in kvmppc_xive_get_xive()
1406 pr_devel("int_on(irq=0x%x)\n", irq); in kvmppc_xive_int_on()
1416 /* If saved_priority is 0xff, do nothing */ in kvmppc_xive_int_on()
1418 return 0; in kvmppc_xive_int_on()
1427 return 0; in kvmppc_xive_int_on()
1445 pr_devel("int_off(irq=0x%x)\n", irq); in kvmppc_xive_int_off()
1453 return 0; in kvmppc_xive_int_off()
1483 return 0; in kvmppc_xive_get_icp()
1488 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; in kvmppc_xive_get_icp()
1507 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", in kvmppc_xive_set_icp()
1523 * Update MFRR state. If it's not 0xff, we mark the VCPU as in kvmppc_xive_set_icp()
1547 return 0; in kvmppc_xive_set_icp()
1566 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n", in kvmppc_xive_set_mapped()
1619 * mask the interrupt in a lossy way (act_priority is 0xff) in kvmppc_xive_set_mapped()
1644 return 0; in kvmppc_xive_set_mapped()
1661 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq); in kvmppc_xive_clr_mapped()
1693 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1724 return 0; in kvmppc_xive_clr_mapped()
1735 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1740 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { in kvmppc_xive_disable_vcpu_interrupts()
1754 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1757 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1775 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1776 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1789 struct xive_irq_data *xd = irq_get_chip_data(irq); in xive_cleanup_single_escalation() local
1796 xd->stale_p = false; in xive_cleanup_single_escalation()
1799 xd->stale_p = true; in xive_cleanup_single_escalation()
1824 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1838 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1841 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1898 return 0; in kvmppc_xive_compute_vp_id()
1938 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1952 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1962 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1980 * and we enable escalation for queue 0 only which we'll use for in kvmppc_xive_connect_vcpu()
1985 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_connect_vcpu()
1995 if (r == 0 && !kvmppc_xive_has_single_escalation(xive)) in kvmppc_xive_connect_vcpu()
2002 q, i, NULL, 0, true); in kvmppc_xive_connect_vcpu()
2011 /* If not done above, attach priority 0 escalation */ in kvmppc_xive_connect_vcpu()
2012 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
2017 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2029 return 0; in kvmppc_xive_connect_vcpu()
2049 pr_err("invalid irq 0x%x in cpu queue!\n", irq); in xive_pre_save_set_queued()
2059 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); in xive_pre_save_set_queued()
2132 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2136 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2145 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { in xive_pre_save_scan()
2152 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2156 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2166 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
2170 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_post_save_scan()
2175 xive->saved_src_count = 0; in xive_post_save_scan()
2216 if (xive->saved_src_count == 0) in xive_get_source()
2261 return 0; in xive_get_source()
2285 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_create_src_block()
2287 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
2316 xc->delayed_irq = 0; in xive_check_delayed_irq()
2333 int rc = 0; in xive_set_source()
2338 pr_devel("set_source(irq=0x%lx)\n", irq); in xive_set_source()
2361 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", in xive_set_source()
2370 if (state->ipi_number == 0) { in xive_set_source()
2375 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
2383 * 0 before calling it to ensure it actually performs the masking. in xive_set_source()
2385 state->guest_priority = 0; in xive_set_source()
2414 if (rc == 0) in xive_set_source()
2484 return 0; in xive_set_source()
2513 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { in kvmppc_xive_set_irq()
2515 return 0; in kvmppc_xive_set_irq()
2521 return 0; in kvmppc_xive_set_irq()
2528 int rc = 0; in kvmppc_xive_set_nr_servers()
2597 return 0; in xive_has_attr()
2602 return 0; in xive_has_attr()
2608 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) in kvmppc_xive_cleanup_irq() argument
2610 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_irq()
2611 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
2618 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_free_sources()
2685 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2725 memset(xive, 0, sizeof(*xive)); in kvmppc_xive_get_device()
2757 xive->q_page_order = 0; in kvmppc_xive_create()
2775 return 0; in kvmppc_xive_create()
2811 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_debug_show_queues()
2828 struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues() local
2829 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_queues()
2835 xd->eoi_page); in kvmppc_xive_debug_show_queues()
2839 return 0; in kvmppc_xive_debug_show_queues()
2848 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_debug_show_sources()
2850 struct xive_irq_data *xd; in kvmppc_xive_debug_show_sources() local
2857 kvmppc_xive_select_irq(state, &hw_num, &xd); in kvmppc_xive_debug_show_sources()
2859 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_sources()
2862 xd->src_chip); in kvmppc_xive_debug_show_sources()
2884 u64 t_rm_h_xirr = 0; in xive_debug_show()
2885 u64 t_rm_h_ipoll = 0; in xive_debug_show()
2886 u64 t_rm_h_cppr = 0; in xive_debug_show()
2887 u64 t_rm_h_eoi = 0; in xive_debug_show()
2888 u64 t_rm_h_ipi = 0; in xive_debug_show()
2889 u64 t_vm_h_xirr = 0; in xive_debug_show()
2890 u64 t_vm_h_ipoll = 0; in xive_debug_show()
2891 u64 t_vm_h_cppr = 0; in xive_debug_show()
2892 u64 t_vm_h_eoi = 0; in xive_debug_show()
2893 u64 t_vm_h_ipi = 0; in xive_debug_show()
2897 return 0; in xive_debug_show()
2937 for (i = 0; i <= xive->max_sbid; i++) { in xive_debug_show()
2947 return 0; in xive_debug_show()