Lines Matching full:xc
151 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) in xive_scan_interrupts() argument
157 while (xc->pending_prio != 0) { in xive_scan_interrupts()
160 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
164 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
182 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
189 q = &xc->queue[prio]; in xive_scan_interrupts()
204 if (prio != xc->cppr) { in xive_scan_interrupts()
206 xc->cppr = prio; in xive_scan_interrupts()
272 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xmon_xive_do_dump() local
275 if (xc) { in xmon_xive_do_dump()
276 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
282 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xmon_xive_do_dump()
283 xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer); in xmon_xive_do_dump()
286 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
346 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_get_irq() local
363 xive_ops->update_pending(xc); in xive_get_irq()
365 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
368 irq = xive_scan_interrupts(xc, false); in xive_get_irq()
371 irq, xc->pending_prio); in xive_get_irq()
389 static void xive_do_queue_eoi(struct xive_cpu *xc) in xive_do_queue_eoi() argument
391 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
392 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
441 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_irq_eoi() local
444 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
463 xive_do_queue_eoi(xc); in xive_irq_eoi()
505 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_try_pick_target() local
506 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
529 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_dec_target_count() local
530 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
532 if (WARN_ON(cpu < 0 || !xc)) { in xive_dec_target_count()
533 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); in xive_dec_target_count()
610 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_pick_irq_target() local
611 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
1056 struct xive_cpu *xc; in xive_cause_ipi() local
1059 xc = per_cpu(xive_cpu, cpu); in xive_cause_ipi()
1062 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
1064 xd = &xc->ipi_data; in xive_cause_ipi()
1077 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_ipi_eoi() local
1080 if (!xc) in xive_ipi_eoi()
1084 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1086 xive_do_source_eoi(&xc->ipi_data); in xive_ipi_eoi()
1087 xive_do_queue_eoi(xc); in xive_ipi_eoi()
1199 struct xive_cpu *xc; in xive_setup_cpu_ipi() local
1204 xc = per_cpu(xive_cpu, cpu); in xive_setup_cpu_ipi()
1207 if (xc->hw_ipi != XIVE_BAD_IRQ) in xive_setup_cpu_ipi()
1213 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ in xive_setup_cpu_ipi()
1214 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1221 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1226 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1234 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1237 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1242 noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_ipi() argument
1249 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_cleanup_cpu_ipi()
1255 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1264 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1268 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1472 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_queues() argument
1474 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1475 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1478 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_setup_cpu_queues() argument
1483 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1484 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1491 struct xive_cpu *xc; in xive_prepare_cpu() local
1493 xc = per_cpu(xive_cpu, cpu); in xive_prepare_cpu()
1494 if (!xc) { in xive_prepare_cpu()
1495 xc = kzalloc_node(sizeof(struct xive_cpu), in xive_prepare_cpu()
1497 if (!xc) in xive_prepare_cpu()
1499 xc->hw_ipi = XIVE_BAD_IRQ; in xive_prepare_cpu()
1500 xc->chip_id = XIVE_INVALID_CHIP_ID; in xive_prepare_cpu()
1502 xive_ops->prepare_cpu(cpu, xc); in xive_prepare_cpu()
1504 per_cpu(xive_cpu, cpu) = xc; in xive_prepare_cpu()
1508 return xive_setup_cpu_queues(cpu, xc); in xive_prepare_cpu()
1513 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_setup_cpu() local
1517 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1520 xc->cppr = 0xff; in xive_setup_cpu()
1549 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) in xive_flush_cpu_queue() argument
1557 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1605 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_smp_disable_cpu() local
1612 xc->cppr = 0; in xive_smp_disable_cpu()
1616 xive_flush_cpu_queue(cpu, xc); in xive_smp_disable_cpu()
1619 xc->cppr = 0xff; in xive_smp_disable_cpu()
1625 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_flush_interrupt() local
1629 xive_flush_cpu_queue(cpu, xc); in xive_flush_interrupt()
1638 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_teardown_cpu() local
1642 xc->cppr = 0; in xive_teardown_cpu()
1646 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1650 xive_cleanup_cpu_ipi(cpu, xc); in xive_teardown_cpu()
1654 xive_cleanup_cpu_queues(cpu, xc); in xive_teardown_cpu()
1730 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_debug_show_ipi() local
1733 if (xc) { in xive_debug_show_ipi()
1734 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xive_debug_show_ipi()
1740 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xive_debug_show_ipi()
1741 seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer); in xive_debug_show_ipi()
1818 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_eq_debug_show() local
1820 if (xc) in xive_eq_debug_show()
1821 xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority], in xive_eq_debug_show()