xref: /linux/arch/powerpc/sysdev/xive/common.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5 
6 #define pr_fmt(fmt) "xive: " fmt
7 
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/debugfs.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/seq_file.h>
17 #include <linux/init.h>
18 #include <linux/cpu.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/msi.h>
23 #include <linux/vmalloc.h>
24 
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/machdep.h>
28 #include <asm/irq.h>
29 #include <asm/errno.h>
30 #include <asm/xive.h>
31 #include <asm/xive-regs.h>
32 #include <asm/xmon.h>
33 
34 #include "xive-internal.h"
35 
36 #undef DEBUG_FLUSH
37 #undef DEBUG_ALL
38 
39 #ifdef DEBUG_ALL
40 #define DBG_VERBOSE(fmt, ...)	pr_devel("cpu %d - " fmt, \
41 					 smp_processor_id(), ## __VA_ARGS__)
42 #else
43 #define DBG_VERBOSE(fmt...)	do { } while(0)
44 #endif
45 
46 bool __xive_enabled;
47 EXPORT_SYMBOL_GPL(__xive_enabled);
48 bool xive_cmdline_disabled;
49 
50 /* We use only one priority for now */
51 static u8 xive_irq_priority;
52 
53 /* TIMA exported to KVM */
54 void __iomem *xive_tima;
55 EXPORT_SYMBOL_GPL(xive_tima);
56 u32 xive_tima_offset;
57 
58 /* Backend ops */
59 static const struct xive_ops *xive_ops;
60 
61 /* Our global interrupt domain */
62 static struct irq_domain *xive_irq_domain;
63 
64 #ifdef CONFIG_SMP
65 /* The IPIs use the same logical irq number when on the same chip */
66 static struct xive_ipi_desc {
67 	unsigned int irq;
68 	char name[16];
69 	atomic_t started;
70 } *xive_ipis;
71 
72 /*
73  * Use early_cpu_to_node() for hot-plugged CPUs
74  */
75 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
76 {
77 	return xive_ipis[early_cpu_to_node(cpu)].irq;
78 }
79 #endif
80 
81 /* Xive state for each CPU */
82 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
83 
84 /* An invalid CPU target */
85 #define XIVE_INVALID_TARGET	(-1)
86 
87 /*
88  * Global toggle to switch on/off StoreEOI
89  */
90 static bool xive_store_eoi = true;
91 
92 static bool xive_is_store_eoi(struct xive_irq_data *xd)
93 {
94 	return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi;
95 }
96 
97 /*
98  * Read the next entry in a queue, return its content if it's valid
99  * or 0 if there is no new entry.
100  *
101  * The queue pointer is moved forward unless "just_peek" is set
102  */
103 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
104 {
105 	u32 cur;
106 
107 	if (!q->qpage)
108 		return 0;
109 	cur = be32_to_cpup(q->qpage + q->idx);
110 
111 	/* Check valid bit (31) vs current toggle polarity */
112 	if ((cur >> 31) == q->toggle)
113 		return 0;
114 
115 	/* If consuming from the queue ... */
116 	if (!just_peek) {
117 		/* Next entry */
118 		q->idx = (q->idx + 1) & q->msk;
119 
120 		/* Wrap around: flip valid toggle */
121 		if (q->idx == 0)
122 			q->toggle ^= 1;
123 	}
124 	/* Mask out the valid bit (31) */
125 	return cur & 0x7fffffff;
126 }
127 
128 /*
129  * Scans all the queue that may have interrupts in them
130  * (based on "pending_prio") in priority order until an
131  * interrupt is found or all the queues are empty.
132  *
133  * Then updates the CPPR (Current Processor Priority
134  * Register) based on the most favored interrupt found
135  * (0xff if none) and return what was found (0 if none).
136  *
137  * If just_peek is set, return the most favored pending
138  * interrupt if any but don't update the queue pointers.
139  *
140  * Note: This function can operate generically on any number
141  * of queues (up to 8). The current implementation of the XIVE
142  * driver only uses a single queue however.
143  *
144  * Note2: This will also "flush" "the pending_count" of a queue
145  * into the "count" when that queue is observed to be empty.
146  * This is used to keep track of the amount of interrupts
147  * targetting a queue. When an interrupt is moved away from
148  * a queue, we only decrement that queue count once the queue
149  * has been observed empty to avoid races.
150  */
151 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
152 {
153 	u32 irq = 0;
154 	u8 prio = 0;
155 
156 	/* Find highest pending priority */
157 	while (xc->pending_prio != 0) {
158 		struct xive_q *q;
159 
160 		prio = ffs(xc->pending_prio) - 1;
161 		DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
162 
163 		/* Try to fetch */
164 		irq = xive_read_eq(&xc->queue[prio], just_peek);
165 
166 		/* Found something ? That's it */
167 		if (irq) {
168 			if (just_peek || irq_to_desc(irq))
169 				break;
170 			/*
171 			 * We should never get here; if we do then we must
172 			 * have failed to synchronize the interrupt properly
173 			 * when shutting it down.
174 			 */
175 			pr_crit("xive: got interrupt %d without descriptor, dropping\n",
176 				irq);
177 			WARN_ON(1);
178 			continue;
179 		}
180 
181 		/* Clear pending bits */
182 		xc->pending_prio &= ~(1 << prio);
183 
184 		/*
185 		 * Check if the queue count needs adjusting due to
186 		 * interrupts being moved away. See description of
187 		 * xive_dec_target_count()
188 		 */
189 		q = &xc->queue[prio];
190 		if (atomic_read(&q->pending_count)) {
191 			int p = atomic_xchg(&q->pending_count, 0);
192 			if (p) {
193 				WARN_ON(p > atomic_read(&q->count));
194 				atomic_sub(p, &q->count);
195 			}
196 		}
197 	}
198 
199 	/* If nothing was found, set CPPR to 0xff */
200 	if (irq == 0)
201 		prio = 0xff;
202 
203 	/* Update HW CPPR to match if necessary */
204 	if (prio != xc->cppr) {
205 		DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
206 		xc->cppr = prio;
207 		out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
208 	}
209 
210 	return irq;
211 }
212 
213 /*
214  * This is used to perform the magic loads from an ESB
215  * described in xive-regs.h
216  */
217 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
218 {
219 	u64 val;
220 
221 	if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd))
222 		offset |= XIVE_ESB_LD_ST_MO;
223 
224 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
225 		val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
226 	else
227 		val = in_be64(xd->eoi_mmio + offset);
228 
229 	return (u8)val;
230 }
231 
232 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
233 {
234 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
235 		xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
236 	else
237 		out_be64(xd->eoi_mmio + offset, data);
238 }
239 
240 #if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS)
241 static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size)
242 {
243 	u64 val = xive_esb_read(xd, XIVE_ESB_GET);
244 
245 	snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx",
246 		 xive_is_store_eoi(xd) ? 'S' : ' ',
247 		 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
248 		 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
249 		 val & XIVE_ESB_VAL_P ? 'P' : '-',
250 		 val & XIVE_ESB_VAL_Q ? 'Q' : '-',
251 		 xd->trig_page, xd->eoi_page);
252 }
253 #endif
254 
255 #ifdef CONFIG_XMON
256 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
257 {
258 	u32 i0, i1, idx;
259 
260 	if (!q->qpage)
261 		return;
262 	idx = q->idx;
263 	i0 = be32_to_cpup(q->qpage + idx);
264 	idx = (idx + 1) & q->msk;
265 	i1 = be32_to_cpup(q->qpage + idx);
266 	xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
267 		     q->idx, q->toggle, i0, i1);
268 }
269 
270 notrace void xmon_xive_do_dump(int cpu)
271 {
272 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
273 
274 	xmon_printf("CPU %d:", cpu);
275 	if (xc) {
276 		xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
277 
278 #ifdef CONFIG_SMP
279 		{
280 			char buffer[128];
281 
282 			xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
283 			xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer);
284 		}
285 #endif
286 		xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
287 	}
288 	xmon_printf("\n");
289 }
290 
291 static struct irq_data *xive_get_irq_data(u32 hw_irq)
292 {
293 	unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
294 
295 	return irq ? irq_get_irq_data(irq) : NULL;
296 }
297 
298 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
299 {
300 	int rc;
301 	u32 target;
302 	u8 prio;
303 	u32 lirq;
304 
305 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
306 	if (rc) {
307 		xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
308 		return rc;
309 	}
310 
311 	xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
312 		    hw_irq, target, prio, lirq);
313 
314 	if (!d)
315 		d = xive_get_irq_data(hw_irq);
316 
317 	if (d) {
318 		char buffer[128];
319 
320 		xive_irq_data_dump(irq_data_get_irq_chip_data(d),
321 				   buffer, sizeof(buffer));
322 		xmon_printf("%s", buffer);
323 	}
324 
325 	xmon_printf("\n");
326 	return 0;
327 }
328 
329 void xmon_xive_get_irq_all(void)
330 {
331 	unsigned int i;
332 	struct irq_desc *desc;
333 
334 	for_each_irq_desc(i, desc) {
335 		struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
336 
337 		if (d)
338 			xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
339 	}
340 }
341 
342 #endif /* CONFIG_XMON */
343 
344 static unsigned int xive_get_irq(void)
345 {
346 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
347 	u32 irq;
348 
349 	/*
350 	 * This can be called either as a result of a HW interrupt or
351 	 * as a "replay" because EOI decided there was still something
352 	 * in one of the queues.
353 	 *
354 	 * First we perform an ACK cycle in order to update our mask
355 	 * of pending priorities. This will also have the effect of
356 	 * updating the CPPR to the most favored pending interrupts.
357 	 *
358 	 * In the future, if we have a way to differentiate a first
359 	 * entry (on HW interrupt) from a replay triggered by EOI,
360 	 * we could skip this on replays unless we soft-mask tells us
361 	 * that a new HW interrupt occurred.
362 	 */
363 	xive_ops->update_pending(xc);
364 
365 	DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
366 
367 	/* Scan our queue(s) for interrupts */
368 	irq = xive_scan_interrupts(xc, false);
369 
370 	DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
371 	    irq, xc->pending_prio);
372 
373 	/* Return pending interrupt if any */
374 	if (irq == XIVE_BAD_IRQ)
375 		return 0;
376 	return irq;
377 }
378 
379 /*
380  * After EOI'ing an interrupt, we need to re-check the queue
381  * to see if another interrupt is pending since multiple
382  * interrupts can coalesce into a single notification to the
383  * CPU.
384  *
385  * If we find that there is indeed more in there, we call
386  * force_external_irq_replay() to make Linux synthesize an
387  * external interrupt on the next call to local_irq_restore().
388  */
389 static void xive_do_queue_eoi(struct xive_cpu *xc)
390 {
391 	if (xive_scan_interrupts(xc, true) != 0) {
392 		DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
393 		force_external_irq_replay();
394 	}
395 }
396 
397 /*
398  * EOI an interrupt at the source. There are several methods
399  * to do this depending on the HW version and source type
400  */
401 static void xive_do_source_eoi(struct xive_irq_data *xd)
402 {
403 	u8 eoi_val;
404 
405 	xd->stale_p = false;
406 
407 	/* If the XIVE supports the new "store EOI facility, use it */
408 	if (xive_is_store_eoi(xd)) {
409 		xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
410 		return;
411 	}
412 
413 	/*
414 	 * For LSIs, we use the "EOI cycle" special load rather than
415 	 * PQ bits, as they are automatically re-triggered in HW when
416 	 * still pending.
417 	 */
418 	if (xd->flags & XIVE_IRQ_FLAG_LSI) {
419 		xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
420 		return;
421 	}
422 
423 	/*
424 	 * Otherwise, we use the special MMIO that does a clear of
425 	 * both P and Q and returns the old Q. This allows us to then
426 	 * do a re-trigger if Q was set rather than synthesizing an
427 	 * interrupt in software
428 	 */
429 	eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
430 	DBG_VERBOSE("eoi_val=%x\n", eoi_val);
431 
432 	/* Re-trigger if needed */
433 	if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
434 		out_be64(xd->trig_mmio, 0);
435 }
436 
437 /* irq_chip eoi callback, called with irq descriptor lock held */
438 static void xive_irq_eoi(struct irq_data *d)
439 {
440 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
441 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
442 
443 	DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
444 		    d->irq, irqd_to_hwirq(d), xc->pending_prio);
445 
446 	/*
447 	 * EOI the source if it hasn't been disabled and hasn't
448 	 * been passed-through to a KVM guest
449 	 */
450 	if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
451 	    !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
452 		xive_do_source_eoi(xd);
453 	else
454 		xd->stale_p = true;
455 
456 	/*
457 	 * Clear saved_p to indicate that it's no longer occupying
458 	 * a queue slot on the target queue
459 	 */
460 	xd->saved_p = false;
461 
462 	/* Check for more work in the queue */
463 	xive_do_queue_eoi(xc);
464 }
465 
466 /*
467  * Helper used to mask and unmask an interrupt source.
468  */
469 static void xive_do_source_set_mask(struct xive_irq_data *xd,
470 				    bool mask)
471 {
472 	u64 val;
473 
474 	pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un");
475 
476 	/*
477 	 * If the interrupt had P set, it may be in a queue.
478 	 *
479 	 * We need to make sure we don't re-enable it until it
480 	 * has been fetched from that queue and EOId. We keep
481 	 * a copy of that P state and use it to restore the
482 	 * ESB accordingly on unmask.
483 	 */
484 	if (mask) {
485 		val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
486 		if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
487 			xd->saved_p = true;
488 		xd->stale_p = false;
489 	} else if (xd->saved_p) {
490 		xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
491 		xd->saved_p = false;
492 	} else {
493 		xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
494 		xd->stale_p = false;
495 	}
496 }
497 
498 /*
499  * Try to chose "cpu" as a new interrupt target. Increments
500  * the queue accounting for that target if it's not already
501  * full.
502  */
503 static bool xive_try_pick_target(int cpu)
504 {
505 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
506 	struct xive_q *q = &xc->queue[xive_irq_priority];
507 	int max;
508 
509 	/*
510 	 * Calculate max number of interrupts in that queue.
511 	 *
512 	 * We leave a gap of 1 just in case...
513 	 */
514 	max = (q->msk + 1) - 1;
515 	return !!atomic_add_unless(&q->count, 1, max);
516 }
517 
518 /*
519  * Un-account an interrupt for a target CPU. We don't directly
520  * decrement q->count since the interrupt might still be present
521  * in the queue.
522  *
523  * Instead increment a separate counter "pending_count" which
524  * will be substracted from "count" later when that CPU observes
525  * the queue to be empty.
526  */
527 static void xive_dec_target_count(int cpu)
528 {
529 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
530 	struct xive_q *q = &xc->queue[xive_irq_priority];
531 
532 	if (WARN_ON(cpu < 0 || !xc)) {
533 		pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
534 		return;
535 	}
536 
537 	/*
538 	 * We increment the "pending count" which will be used
539 	 * to decrement the target queue count whenever it's next
540 	 * processed and found empty. This ensure that we don't
541 	 * decrement while we still have the interrupt there
542 	 * occupying a slot.
543 	 */
544 	atomic_inc(&q->pending_count);
545 }
546 
547 /* Find a tentative CPU target in a CPU mask */
548 static int xive_find_target_in_mask(const struct cpumask *mask,
549 				    unsigned int fuzz)
550 {
551 	int cpu, first;
552 
553 	/* Pick up a starting point CPU in the mask based on  fuzz */
554 	fuzz %= cpumask_weight(mask);
555 	first = cpumask_nth(fuzz, mask);
556 	WARN_ON(first >= nr_cpu_ids);
557 
558 	/*
559 	 * Now go through the entire mask until we find a valid
560 	 * target.
561 	 */
562 	for_each_cpu_wrap(cpu, mask, first) {
563 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
564 			return cpu;
565 	}
566 
567 	return -1;
568 }
569 
570 /*
571  * Pick a target CPU for an interrupt. This is done at
572  * startup or if the affinity is changed in a way that
573  * invalidates the current target.
574  */
575 static int xive_pick_irq_target(struct irq_data *d,
576 				const struct cpumask *affinity)
577 {
578 	static unsigned int fuzz;
579 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
580 	cpumask_var_t mask;
581 	int cpu = -1;
582 
583 	/*
584 	 * If we have chip IDs, first we try to build a mask of
585 	 * CPUs matching the CPU and find a target in there
586 	 */
587 	if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
588 		zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
589 		/* Build a mask of matching chip IDs */
590 		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
591 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
592 			if (xc->chip_id == xd->src_chip)
593 				cpumask_set_cpu(cpu, mask);
594 		}
595 		/* Try to find a target */
596 		if (cpumask_empty(mask))
597 			cpu = -1;
598 		else
599 			cpu = xive_find_target_in_mask(mask, fuzz++);
600 		free_cpumask_var(mask);
601 		if (cpu >= 0)
602 			return cpu;
603 		fuzz--;
604 	}
605 
606 	/* No chip IDs, fallback to using the affinity mask */
607 	return xive_find_target_in_mask(affinity, fuzz++);
608 }
609 
610 static unsigned int xive_irq_startup(struct irq_data *d)
611 {
612 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
613 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
614 	int target, rc;
615 
616 	xd->saved_p = false;
617 	xd->stale_p = false;
618 
619 	pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
620 
621 	/* Pick a target */
622 	target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
623 	if (target == XIVE_INVALID_TARGET) {
624 		/* Try again breaking affinity */
625 		target = xive_pick_irq_target(d, cpu_online_mask);
626 		if (target == XIVE_INVALID_TARGET)
627 			return -ENXIO;
628 		pr_warn("irq %d started with broken affinity\n", d->irq);
629 	}
630 
631 	/* Sanity check */
632 	if (WARN_ON(target == XIVE_INVALID_TARGET ||
633 		    target >= nr_cpu_ids))
634 		target = smp_processor_id();
635 
636 	xd->target = target;
637 
638 	/*
639 	 * Configure the logical number to be the Linux IRQ number
640 	 * and set the target queue
641 	 */
642 	rc = xive_ops->configure_irq(hw_irq,
643 				     get_hard_smp_processor_id(target),
644 				     xive_irq_priority, d->irq);
645 	if (rc)
646 		return rc;
647 
648 	/* Unmask the ESB */
649 	xive_do_source_set_mask(xd, false);
650 
651 	return 0;
652 }
653 
654 /* called with irq descriptor lock held */
655 static void xive_irq_shutdown(struct irq_data *d)
656 {
657 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
658 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
659 
660 	pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
661 
662 	if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
663 		return;
664 
665 	/* Mask the interrupt at the source */
666 	xive_do_source_set_mask(xd, true);
667 
668 	/*
669 	 * Mask the interrupt in HW in the IVT/EAS and set the number
670 	 * to be the "bad" IRQ number
671 	 */
672 	xive_ops->configure_irq(hw_irq,
673 				get_hard_smp_processor_id(xd->target),
674 				0xff, XIVE_BAD_IRQ);
675 
676 	xive_dec_target_count(xd->target);
677 	xd->target = XIVE_INVALID_TARGET;
678 }
679 
680 static void xive_irq_unmask(struct irq_data *d)
681 {
682 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
683 
684 	pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
685 
686 	xive_do_source_set_mask(xd, false);
687 }
688 
689 static void xive_irq_mask(struct irq_data *d)
690 {
691 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
692 
693 	pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
694 
695 	xive_do_source_set_mask(xd, true);
696 }
697 
698 static int xive_irq_set_affinity(struct irq_data *d,
699 				 const struct cpumask *cpumask,
700 				 bool force)
701 {
702 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
703 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
704 	u32 target, old_target;
705 	int rc = 0;
706 
707 	pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq);
708 
709 	/* Is this valid ? */
710 	if (!cpumask_intersects(cpumask, cpu_online_mask))
711 		return -EINVAL;
712 
713 	/*
714 	 * If existing target is already in the new mask, and is
715 	 * online then do nothing.
716 	 */
717 	if (xd->target != XIVE_INVALID_TARGET &&
718 	    cpu_online(xd->target) &&
719 	    cpumask_test_cpu(xd->target, cpumask))
720 		return IRQ_SET_MASK_OK;
721 
722 	/* Pick a new target */
723 	target = xive_pick_irq_target(d, cpumask);
724 
725 	/* No target found */
726 	if (target == XIVE_INVALID_TARGET)
727 		return -ENXIO;
728 
729 	/* Sanity check */
730 	if (WARN_ON(target >= nr_cpu_ids))
731 		target = smp_processor_id();
732 
733 	old_target = xd->target;
734 
735 	/*
736 	 * Only configure the irq if it's not currently passed-through to
737 	 * a KVM guest
738 	 */
739 	if (!irqd_is_forwarded_to_vcpu(d))
740 		rc = xive_ops->configure_irq(hw_irq,
741 					     get_hard_smp_processor_id(target),
742 					     xive_irq_priority, d->irq);
743 	if (rc < 0) {
744 		pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
745 		return rc;
746 	}
747 
748 	pr_debug("  target: 0x%x\n", target);
749 	xd->target = target;
750 
751 	/* Give up previous target */
752 	if (old_target != XIVE_INVALID_TARGET)
753 	    xive_dec_target_count(old_target);
754 
755 	return IRQ_SET_MASK_OK;
756 }
757 
758 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
759 {
760 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
761 
762 	/*
763 	 * We only support these. This has really no effect other than setting
764 	 * the corresponding descriptor bits mind you but those will in turn
765 	 * affect the resend function when re-enabling an edge interrupt.
766 	 *
767 	 * Set the default to edge as explained in map().
768 	 */
769 	if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
770 		flow_type = IRQ_TYPE_EDGE_RISING;
771 
772 	if (flow_type != IRQ_TYPE_EDGE_RISING &&
773 	    flow_type != IRQ_TYPE_LEVEL_LOW)
774 		return -EINVAL;
775 
776 	irqd_set_trigger_type(d, flow_type);
777 
778 	/*
779 	 * Double check it matches what the FW thinks
780 	 *
781 	 * NOTE: We don't know yet if the PAPR interface will provide
782 	 * the LSI vs MSI information apart from the device-tree so
783 	 * this check might have to move into an optional backend call
784 	 * that is specific to the native backend
785 	 */
786 	if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
787 	    !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
788 		pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
789 			d->irq, (u32)irqd_to_hwirq(d),
790 			(flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
791 			(xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
792 	}
793 
794 	return IRQ_SET_MASK_OK_NOCOPY;
795 }
796 
797 static int xive_irq_retrigger(struct irq_data *d)
798 {
799 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
800 
801 	/* This should be only for MSIs */
802 	if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
803 		return 0;
804 
805 	/*
806 	 * To perform a retrigger, we first set the PQ bits to
807 	 * 11, then perform an EOI.
808 	 */
809 	xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
810 	xive_do_source_eoi(xd);
811 
812 	return 1;
813 }
814 
815 /*
816  * Caller holds the irq descriptor lock, so this won't be called
817  * concurrently with xive_get_irqchip_state on the same interrupt.
818  */
819 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
820 {
821 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
822 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
823 	int rc;
824 	u8 pq;
825 
826 	/*
827 	 * This is called by KVM with state non-NULL for enabling
828 	 * pass-through or NULL for disabling it
829 	 */
830 	if (state) {
831 		irqd_set_forwarded_to_vcpu(d);
832 
833 		/* Set it to PQ=10 state to prevent further sends */
834 		pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
835 		if (!xd->stale_p) {
836 			xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
837 			xd->stale_p = !xd->saved_p;
838 		}
839 
840 		/* No target ? nothing to do */
841 		if (xd->target == XIVE_INVALID_TARGET) {
842 			/*
843 			 * An untargetted interrupt should have been
844 			 * also masked at the source
845 			 */
846 			WARN_ON(xd->saved_p);
847 
848 			return 0;
849 		}
850 
851 		/*
852 		 * If P was set, adjust state to PQ=11 to indicate
853 		 * that a resend is needed for the interrupt to reach
854 		 * the guest. Also remember the value of P.
855 		 *
856 		 * This also tells us that it's in flight to a host queue
857 		 * or has already been fetched but hasn't been EOIed yet
858 		 * by the host. Thus it's potentially using up a host
859 		 * queue slot. This is important to know because as long
860 		 * as this is the case, we must not hard-unmask it when
861 		 * "returning" that interrupt to the host.
862 		 *
863 		 * This saved_p is cleared by the host EOI, when we know
864 		 * for sure the queue slot is no longer in use.
865 		 */
866 		if (xd->saved_p) {
867 			xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
868 
869 			/*
870 			 * Sync the XIVE source HW to ensure the interrupt
871 			 * has gone through the EAS before we change its
872 			 * target to the guest. That should guarantee us
873 			 * that we *will* eventually get an EOI for it on
874 			 * the host. Otherwise there would be a small window
875 			 * for P to be seen here but the interrupt going
876 			 * to the guest queue.
877 			 */
878 			if (xive_ops->sync_source)
879 				xive_ops->sync_source(hw_irq);
880 		}
881 	} else {
882 		irqd_clr_forwarded_to_vcpu(d);
883 
884 		/* No host target ? hard mask and return */
885 		if (xd->target == XIVE_INVALID_TARGET) {
886 			xive_do_source_set_mask(xd, true);
887 			return 0;
888 		}
889 
890 		/*
891 		 * Sync the XIVE source HW to ensure the interrupt
892 		 * has gone through the EAS before we change its
893 		 * target to the host.
894 		 */
895 		if (xive_ops->sync_source)
896 			xive_ops->sync_source(hw_irq);
897 
898 		/*
899 		 * By convention we are called with the interrupt in
900 		 * a PQ=10 or PQ=11 state, ie, it won't fire and will
901 		 * have latched in Q whether there's a pending HW
902 		 * interrupt or not.
903 		 *
904 		 * First reconfigure the target.
905 		 */
906 		rc = xive_ops->configure_irq(hw_irq,
907 					     get_hard_smp_processor_id(xd->target),
908 					     xive_irq_priority, d->irq);
909 		if (rc)
910 			return rc;
911 
912 		/*
913 		 * Then if saved_p is not set, effectively re-enable the
914 		 * interrupt with an EOI. If it is set, we know there is
915 		 * still a message in a host queue somewhere that will be
916 		 * EOId eventually.
917 		 *
918 		 * Note: We don't check irqd_irq_disabled(). Effectively,
919 		 * we *will* let the irq get through even if masked if the
920 		 * HW is still firing it in order to deal with the whole
921 		 * saved_p business properly. If the interrupt triggers
922 		 * while masked, the generic code will re-mask it anyway.
923 		 */
924 		if (!xd->saved_p)
925 			xive_do_source_eoi(xd);
926 
927 	}
928 	return 0;
929 }
930 
931 /* Called with irq descriptor lock held. */
932 static int xive_get_irqchip_state(struct irq_data *data,
933 				  enum irqchip_irq_state which, bool *state)
934 {
935 	struct xive_irq_data *xd = irq_data_get_irq_chip_data(data);
936 	u8 pq;
937 
938 	switch (which) {
939 	case IRQCHIP_STATE_ACTIVE:
940 		pq = xive_esb_read(xd, XIVE_ESB_GET);
941 
942 		/*
943 		 * The esb value being all 1's means we couldn't get
944 		 * the PQ state of the interrupt through mmio. It may
945 		 * happen, for example when querying a PHB interrupt
946 		 * while the PHB is in an error state. We consider the
947 		 * interrupt to be inactive in that case.
948 		 */
949 		*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
950 			(xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
951 			 !irqd_irq_disabled(data)));
952 		return 0;
953 	default:
954 		return -EINVAL;
955 	}
956 }
957 
958 static struct irq_chip xive_irq_chip = {
959 	.name = "XIVE-IRQ",
960 	.irq_startup = xive_irq_startup,
961 	.irq_shutdown = xive_irq_shutdown,
962 	.irq_eoi = xive_irq_eoi,
963 	.irq_mask = xive_irq_mask,
964 	.irq_unmask = xive_irq_unmask,
965 	.irq_set_affinity = xive_irq_set_affinity,
966 	.irq_set_type = xive_irq_set_type,
967 	.irq_retrigger = xive_irq_retrigger,
968 	.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
969 	.irq_get_irqchip_state = xive_get_irqchip_state,
970 };
971 
972 bool is_xive_irq(struct irq_chip *chip)
973 {
974 	return chip == &xive_irq_chip;
975 }
976 EXPORT_SYMBOL_GPL(is_xive_irq);
977 
978 void xive_cleanup_irq_data(struct xive_irq_data *xd)
979 {
980 	pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq);
981 
982 	if (xd->eoi_mmio) {
983 		iounmap(xd->eoi_mmio);
984 		if (xd->eoi_mmio == xd->trig_mmio)
985 			xd->trig_mmio = NULL;
986 		xd->eoi_mmio = NULL;
987 	}
988 	if (xd->trig_mmio) {
989 		iounmap(xd->trig_mmio);
990 		xd->trig_mmio = NULL;
991 	}
992 }
993 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
994 
995 static struct xive_irq_data *xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
996 {
997 	struct xive_irq_data *xd;
998 	int rc;
999 
1000 	xd = kzalloc_obj(struct xive_irq_data);
1001 	if (!xd)
1002 		return ERR_PTR(-ENOMEM);
1003 	rc = xive_ops->populate_irq_data(hw, xd);
1004 	if (rc) {
1005 		kfree(xd);
1006 		return ERR_PTR(rc);
1007 	}
1008 	xd->target = XIVE_INVALID_TARGET;
1009 
1010 	/*
1011 	 * Turn OFF by default the interrupt being mapped. A side
1012 	 * effect of this check is the mapping the ESB page of the
1013 	 * interrupt in the Linux address space. This prevents page
1014 	 * fault issues in the crash handler which masks all
1015 	 * interrupts.
1016 	 */
1017 	xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1018 
1019 	return xd;
1020 }
1021 
1022 static void xive_irq_free_data(struct irq_domain *domain, unsigned int virq)
1023 {
1024 	struct xive_irq_data *xd;
1025 	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
1026 
1027 	if (!data)
1028 		return;
1029 
1030 	xd = irq_data_get_irq_chip_data(data);
1031 	if (!xd)
1032 		return;
1033 
1034 	irq_domain_reset_irq_data(data);
1035 	xive_cleanup_irq_data(xd);
1036 	kfree(xd);
1037 }
1038 
1039 #ifdef CONFIG_SMP
1040 
1041 static void xive_cause_ipi(int cpu)
1042 {
1043 	struct xive_cpu *xc;
1044 	struct xive_irq_data *xd;
1045 
1046 	xc = per_cpu(xive_cpu, cpu);
1047 
1048 	DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1049 		    smp_processor_id(), cpu, xc->hw_ipi);
1050 
1051 	xd = &xc->ipi_data;
1052 	if (WARN_ON(!xd->trig_mmio))
1053 		return;
1054 	out_be64(xd->trig_mmio, 0);
1055 }
1056 
1057 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1058 {
1059 	return smp_ipi_demux();
1060 }
1061 
1062 static void xive_ipi_eoi(struct irq_data *d)
1063 {
1064 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1065 
1066 	/* Handle possible race with unplug and drop stale IPIs */
1067 	if (!xc)
1068 		return;
1069 
1070 	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1071 		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1072 
1073 	xive_do_source_eoi(&xc->ipi_data);
1074 	xive_do_queue_eoi(xc);
1075 }
1076 
1077 static void xive_ipi_do_nothing(struct irq_data *d)
1078 {
1079 	/*
1080 	 * Nothing to do, we never mask/unmask IPIs, but the callback
1081 	 * has to exist for the struct irq_chip.
1082 	 */
1083 }
1084 
1085 static struct irq_chip xive_ipi_chip = {
1086 	.name = "XIVE-IPI",
1087 	.irq_eoi = xive_ipi_eoi,
1088 	.irq_mask = xive_ipi_do_nothing,
1089 	.irq_unmask = xive_ipi_do_nothing,
1090 };
1091 
1092 /*
1093  * IPIs are marked per-cpu. We use separate HW interrupts under the
1094  * hood but associated with the same "linux" interrupt
1095  */
1096 struct xive_ipi_alloc_info {
1097 	irq_hw_number_t hwirq;
1098 };
1099 
1100 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1101 				     unsigned int nr_irqs, void *arg)
1102 {
1103 	struct xive_ipi_alloc_info *info = arg;
1104 	int i;
1105 
1106 	for (i = 0; i < nr_irqs; i++) {
1107 		irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1108 				    domain->host_data, handle_percpu_irq,
1109 				    NULL, NULL);
1110 	}
1111 	return 0;
1112 }
1113 
1114 static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1115 	.alloc  = xive_ipi_irq_domain_alloc,
1116 };
1117 
1118 static int __init xive_init_ipis(void)
1119 {
1120 	struct fwnode_handle *fwnode;
1121 	struct irq_domain *ipi_domain;
1122 	unsigned int node;
1123 	int ret = -ENOMEM;
1124 
1125 	fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1126 	if (!fwnode)
1127 		goto out;
1128 
1129 	ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1130 					      &xive_ipi_irq_domain_ops, NULL);
1131 	if (!ipi_domain)
1132 		goto out_free_fwnode;
1133 
1134 	xive_ipis = kzalloc_objs(*xive_ipis, nr_node_ids,
1135 				 GFP_KERNEL | __GFP_NOFAIL);
1136 	if (!xive_ipis)
1137 		goto out_free_domain;
1138 
1139 	for_each_node(node) {
1140 		struct xive_ipi_desc *xid = &xive_ipis[node];
1141 		struct xive_ipi_alloc_info info = { node };
1142 
1143 		/*
1144 		 * Map one IPI interrupt per node for all cpus of that node.
1145 		 * Since the HW interrupt number doesn't have any meaning,
1146 		 * simply use the node number.
1147 		 */
1148 		ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1149 		if (ret < 0)
1150 			goto out_free_xive_ipis;
1151 		xid->irq = ret;
1152 
1153 		snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1154 	}
1155 
1156 	return ret;
1157 
1158 out_free_xive_ipis:
1159 	kfree(xive_ipis);
1160 out_free_domain:
1161 	irq_domain_remove(ipi_domain);
1162 out_free_fwnode:
1163 	irq_domain_free_fwnode(fwnode);
1164 out:
1165 	return ret;
1166 }
1167 
1168 static int xive_request_ipi(unsigned int cpu)
1169 {
1170 	struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
1171 	int ret;
1172 
1173 	if (atomic_inc_return(&xid->started) > 1)
1174 		return 0;
1175 
1176 	ret = request_irq(xid->irq, xive_muxed_ipi_action,
1177 			  IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
1178 			  xid->name, NULL);
1179 
1180 	WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1181 	return ret;
1182 }
1183 
1184 static int xive_setup_cpu_ipi(unsigned int cpu)
1185 {
1186 	unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1187 	struct xive_cpu *xc;
1188 	int rc;
1189 
1190 	pr_debug("Setting up IPI for CPU %d\n", cpu);
1191 
1192 	xc = per_cpu(xive_cpu, cpu);
1193 
1194 	/* Check if we are already setup */
1195 	if (xc->hw_ipi != XIVE_BAD_IRQ)
1196 		return 0;
1197 
1198 	/* Register the IPI */
1199 	xive_request_ipi(cpu);
1200 
1201 	/* Grab an IPI from the backend, this will populate xc->hw_ipi */
1202 	if (xive_ops->get_ipi(cpu, xc))
1203 		return -EIO;
1204 
1205 	/*
1206 	 * Populate the IRQ data in the xive_cpu structure and
1207 	 * configure the HW / enable the IPIs.
1208 	 */
1209 	rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1210 	if (rc) {
1211 		pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1212 		return -EIO;
1213 	}
1214 	rc = xive_ops->configure_irq(xc->hw_ipi,
1215 				     get_hard_smp_processor_id(cpu),
1216 				     xive_irq_priority, xive_ipi_irq);
1217 	if (rc) {
1218 		pr_err("Failed to map IPI CPU %d\n", cpu);
1219 		return -EIO;
1220 	}
1221 	pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
1222 		 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1223 
1224 	/* Unmask it */
1225 	xive_do_source_set_mask(&xc->ipi_data, false);
1226 
1227 	return 0;
1228 }
1229 
1230 noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1231 {
1232 	unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1233 
1234 	/* Disable the IPI and free the IRQ data */
1235 
1236 	/* Already cleaned up ? */
1237 	if (xc->hw_ipi == XIVE_BAD_IRQ)
1238 		return;
1239 
1240 	/* TODO: clear IPI mapping */
1241 
1242 	/* Mask the IPI */
1243 	xive_do_source_set_mask(&xc->ipi_data, true);
1244 
1245 	/*
1246 	 * Note: We don't call xive_cleanup_irq_data() to free
1247 	 * the mappings as this is called from an IPI on kexec
1248 	 * which is not a safe environment to call iounmap()
1249 	 */
1250 
1251 	/* Deconfigure/mask in the backend */
1252 	xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1253 				0xff, xive_ipi_irq);
1254 
1255 	/* Free the IPIs in the backend */
1256 	xive_ops->put_ipi(cpu, xc);
1257 }
1258 
1259 void __init xive_smp_probe(void)
1260 {
1261 	smp_ops->cause_ipi = xive_cause_ipi;
1262 
1263 	/* Register the IPI */
1264 	xive_init_ipis();
1265 
1266 	/* Allocate and setup IPI for the boot CPU */
1267 	xive_setup_cpu_ipi(smp_processor_id());
1268 }
1269 
1270 #endif /* CONFIG_SMP */
1271 
1272 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1273 			       irq_hw_number_t hw)
1274 {
1275 	struct xive_irq_data *xd;
1276 
1277 	/*
1278 	 * Mark interrupts as edge sensitive by default so that resend
1279 	 * actually works. Will fix that up below if needed.
1280 	 */
1281 	irq_clear_status_flags(virq, IRQ_LEVEL);
1282 
1283 	xd = xive_irq_alloc_data(virq, hw);
1284 	if (IS_ERR(xd))
1285 		return PTR_ERR(xd);
1286 
1287 	irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1288 	irq_set_chip_data(virq, xd);
1289 
1290 	return 0;
1291 }
1292 
1293 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1294 {
1295 	xive_irq_free_data(d, virq);
1296 }
1297 
1298 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1299 				 const u32 *intspec, unsigned int intsize,
1300 				 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1301 
1302 {
1303 	*out_hwirq = intspec[0];
1304 
1305 	/*
1306 	 * If intsize is at least 2, we look for the type in the second cell,
1307 	 * we assume the LSB indicates a level interrupt.
1308 	 */
1309 	if (intsize > 1) {
1310 		if (intspec[1] & 1)
1311 			*out_flags = IRQ_TYPE_LEVEL_LOW;
1312 		else
1313 			*out_flags = IRQ_TYPE_EDGE_RISING;
1314 	} else
1315 		*out_flags = IRQ_TYPE_LEVEL_LOW;
1316 
1317 	return 0;
1318 }
1319 
1320 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1321 				 enum irq_domain_bus_token bus_token)
1322 {
1323 	return xive_ops->match(node);
1324 }
1325 
1326 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1327 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1328 
1329 static const struct {
1330 	u64  mask;
1331 	char *name;
1332 } xive_irq_flags[] = {
1333 	{ XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1334 	{ XIVE_IRQ_FLAG_LSI,       "LSI"       },
1335 	{ XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1336 	{ XIVE_IRQ_FLAG_NO_EOI,    "NO_EOI"    },
1337 };
1338 
1339 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1340 				       struct irq_data *irqd, int ind)
1341 {
1342 	struct xive_irq_data *xd;
1343 	u64 val;
1344 	int i;
1345 
1346 	/* No IRQ domain level information. To be done */
1347 	if (!irqd)
1348 		return;
1349 
1350 	if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1351 		return;
1352 
1353 	seq_printf(m, "%*sXIVE:\n", ind, "");
1354 	ind++;
1355 
1356 	xd = irq_data_get_irq_chip_data(irqd);
1357 	if (!xd) {
1358 		seq_printf(m, "%*snot assigned\n", ind, "");
1359 		return;
1360 	}
1361 
1362 	val = xive_esb_read(xd, XIVE_ESB_GET);
1363 	seq_printf(m, "%*sESB:      %s\n", ind, "", esb_names[val & 0x3]);
1364 	seq_printf(m, "%*sPstate:   %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1365 		   xd->saved_p ? "saved" : "");
1366 	seq_printf(m, "%*sTarget:   %d\n", ind, "", xd->target);
1367 	seq_printf(m, "%*sChip:     %d\n", ind, "", xd->src_chip);
1368 	seq_printf(m, "%*sTrigger:  0x%016llx\n", ind, "", xd->trig_page);
1369 	seq_printf(m, "%*sEOI:      0x%016llx\n", ind, "", xd->eoi_page);
1370 	seq_printf(m, "%*sFlags:    0x%llx\n", ind, "", xd->flags);
1371 	for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1372 		if (xd->flags & xive_irq_flags[i].mask)
1373 			seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1374 	}
1375 }
1376 #endif
1377 
1378 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1379 static int xive_irq_domain_translate(struct irq_domain *d,
1380 				     struct irq_fwspec *fwspec,
1381 				     unsigned long *hwirq,
1382 				     unsigned int *type)
1383 {
1384 	return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
1385 				     fwspec->param, fwspec->param_count,
1386 				     hwirq, type);
1387 }
1388 
1389 static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1390 				 unsigned int nr_irqs, void *arg)
1391 {
1392 	struct irq_fwspec *fwspec = arg;
1393 	struct xive_irq_data *xd;
1394 	irq_hw_number_t hwirq;
1395 	unsigned int type = IRQ_TYPE_NONE;
1396 	int i, rc;
1397 
1398 	rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
1399 	if (rc)
1400 		return rc;
1401 
1402 	pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs);
1403 
1404 	for (i = 0; i < nr_irqs; i++) {
1405 		/* TODO: call xive_irq_domain_map() */
1406 
1407 		/*
1408 		 * Mark interrupts as edge sensitive by default so that resend
1409 		 * actually works. Will fix that up below if needed.
1410 		 */
1411 		irq_clear_status_flags(virq, IRQ_LEVEL);
1412 
1413 		/* allocates and sets handler data */
1414 		xd = xive_irq_alloc_data(virq + i, hwirq + i);
1415 		if (IS_ERR(xd))
1416 			return PTR_ERR(xd);
1417 
1418 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &xive_irq_chip, xd);
1419 		irq_set_handler(virq + i, handle_fasteoi_irq);
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static void xive_irq_domain_free(struct irq_domain *domain,
1426 				 unsigned int virq, unsigned int nr_irqs)
1427 {
1428 	int i;
1429 
1430 	pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
1431 
1432 	for (i = 0; i < nr_irqs; i++)
1433 		xive_irq_free_data(domain, virq + i);
1434 }
1435 #endif
1436 
1437 static const struct irq_domain_ops xive_irq_domain_ops = {
1438 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1439 	.alloc	= xive_irq_domain_alloc,
1440 	.free	= xive_irq_domain_free,
1441 	.translate = xive_irq_domain_translate,
1442 #endif
1443 	.match = xive_irq_domain_match,
1444 	.map = xive_irq_domain_map,
1445 	.unmap = xive_irq_domain_unmap,
1446 	.xlate = xive_irq_domain_xlate,
1447 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1448 	.debug_show = xive_irq_domain_debug_show,
1449 #endif
1450 };
1451 
1452 static void __init xive_init_host(struct device_node *np)
1453 {
1454 	xive_irq_domain = irq_domain_create_tree(of_fwnode_handle(np), &xive_irq_domain_ops, NULL);
1455 	if (WARN_ON(xive_irq_domain == NULL))
1456 		return;
1457 	irq_set_default_domain(xive_irq_domain);
1458 }
1459 
1460 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1461 {
1462 	if (xc->queue[xive_irq_priority].qpage)
1463 		xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1464 }
1465 
1466 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1467 {
1468 	int rc = 0;
1469 
1470 	/* We setup 1 queues for now with a 64k page */
1471 	if (!xc->queue[xive_irq_priority].qpage)
1472 		rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1473 
1474 	return rc;
1475 }
1476 
1477 static int xive_prepare_cpu(unsigned int cpu)
1478 {
1479 	struct xive_cpu *xc;
1480 
1481 	xc = per_cpu(xive_cpu, cpu);
1482 	if (!xc) {
1483 		xc = kzalloc_node(sizeof(struct xive_cpu),
1484 				  GFP_KERNEL, cpu_to_node(cpu));
1485 		if (!xc)
1486 			return -ENOMEM;
1487 		xc->hw_ipi = XIVE_BAD_IRQ;
1488 		xc->chip_id = XIVE_INVALID_CHIP_ID;
1489 		if (xive_ops->prepare_cpu)
1490 			xive_ops->prepare_cpu(cpu, xc);
1491 
1492 		per_cpu(xive_cpu, cpu) = xc;
1493 	}
1494 
1495 	/* Setup EQs if not already */
1496 	return xive_setup_cpu_queues(cpu, xc);
1497 }
1498 
1499 static void xive_setup_cpu(void)
1500 {
1501 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1502 
1503 	/* The backend might have additional things to do */
1504 	if (xive_ops->setup_cpu)
1505 		xive_ops->setup_cpu(smp_processor_id(), xc);
1506 
1507 	/* Set CPPR to 0xff to enable flow of interrupts */
1508 	xc->cppr = 0xff;
1509 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1510 }
1511 
1512 #ifdef CONFIG_SMP
1513 void xive_smp_setup_cpu(void)
1514 {
1515 	pr_debug("SMP setup CPU %d\n", smp_processor_id());
1516 
1517 	/* This will have already been done on the boot CPU */
1518 	if (smp_processor_id() != boot_cpuid)
1519 		xive_setup_cpu();
1520 
1521 }
1522 
1523 int xive_smp_prepare_cpu(unsigned int cpu)
1524 {
1525 	int rc;
1526 
1527 	/* Allocate per-CPU data and queues */
1528 	rc = xive_prepare_cpu(cpu);
1529 	if (rc)
1530 		return rc;
1531 
1532 	/* Allocate and setup IPI for the new CPU */
1533 	return xive_setup_cpu_ipi(cpu);
1534 }
1535 
1536 #ifdef CONFIG_HOTPLUG_CPU
1537 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1538 {
1539 	u32 irq;
1540 
1541 	/* We assume local irqs are disabled */
1542 	WARN_ON(!irqs_disabled());
1543 
1544 	/* Check what's already in the CPU queue */
1545 	while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1546 		/*
1547 		 * We need to re-route that interrupt to its new destination.
1548 		 * First get and lock the descriptor
1549 		 */
1550 		struct irq_desc *desc = irq_to_desc(irq);
1551 		struct irq_data *d = irq_desc_get_irq_data(desc);
1552 		struct xive_irq_data *xd;
1553 
1554 		/*
1555 		 * Ignore anything that isn't a XIVE irq and ignore
1556 		 * IPIs, so can just be dropped.
1557 		 */
1558 		if (d->domain != xive_irq_domain)
1559 			continue;
1560 
1561 		/*
1562 		 * The IRQ should have already been re-routed, it's just a
1563 		 * stale in the old queue, so re-trigger it in order to make
1564 		 * it reach is new destination.
1565 		 */
1566 #ifdef DEBUG_FLUSH
1567 		pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1568 			cpu, irq);
1569 #endif
1570 		raw_spin_lock(&desc->lock);
1571 		xd = irq_desc_get_chip_data(desc);
1572 
1573 		/*
1574 		 * Clear saved_p to indicate that it's no longer pending
1575 		 */
1576 		xd->saved_p = false;
1577 
1578 		/*
1579 		 * For LSIs, we EOI, this will cause a resend if it's
1580 		 * still asserted. Otherwise do an MSI retrigger.
1581 		 */
1582 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
1583 			xive_do_source_eoi(xd);
1584 		else
1585 			xive_irq_retrigger(d);
1586 
1587 		raw_spin_unlock(&desc->lock);
1588 	}
1589 }
1590 
1591 void xive_smp_disable_cpu(void)
1592 {
1593 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1594 	unsigned int cpu = smp_processor_id();
1595 
1596 	/* Migrate interrupts away from the CPU */
1597 	irq_migrate_all_off_this_cpu();
1598 
1599 	/* Set CPPR to 0 to disable flow of interrupts */
1600 	xc->cppr = 0;
1601 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1602 
1603 	/* Flush everything still in the queue */
1604 	xive_flush_cpu_queue(cpu, xc);
1605 
1606 	/* Re-enable CPPR  */
1607 	xc->cppr = 0xff;
1608 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1609 }
1610 
1611 void xive_flush_interrupt(void)
1612 {
1613 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1614 	unsigned int cpu = smp_processor_id();
1615 
1616 	/* Called if an interrupt occurs while the CPU is hot unplugged */
1617 	xive_flush_cpu_queue(cpu, xc);
1618 }
1619 
1620 #endif /* CONFIG_HOTPLUG_CPU */
1621 
1622 #endif /* CONFIG_SMP */
1623 
1624 noinstr void xive_teardown_cpu(void)
1625 {
1626 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1627 	unsigned int cpu = smp_processor_id();
1628 
1629 	/* Set CPPR to 0 to disable flow of interrupts */
1630 	xc->cppr = 0;
1631 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1632 
1633 	if (xive_ops->teardown_cpu)
1634 		xive_ops->teardown_cpu(cpu, xc);
1635 
1636 #ifdef CONFIG_SMP
1637 	/* Get rid of IPI */
1638 	xive_cleanup_cpu_ipi(cpu, xc);
1639 #endif
1640 
1641 	/* Disable and free the queues */
1642 	xive_cleanup_cpu_queues(cpu, xc);
1643 }
1644 
1645 void xive_shutdown(void)
1646 {
1647 	xive_ops->shutdown();
1648 }
1649 
1650 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1651 			   void __iomem *area, u32 offset, u8 max_prio)
1652 {
1653 	xive_tima = area;
1654 	xive_tima_offset = offset;
1655 	xive_ops = ops;
1656 	xive_irq_priority = max_prio;
1657 
1658 	ppc_md.get_irq = xive_get_irq;
1659 	__xive_enabled = true;
1660 
1661 	pr_debug("Initializing host..\n");
1662 	xive_init_host(np);
1663 
1664 	pr_debug("Initializing boot CPU..\n");
1665 
1666 	/* Allocate per-CPU data and queues */
1667 	xive_prepare_cpu(smp_processor_id());
1668 
1669 	/* Get ready for interrupts */
1670 	xive_setup_cpu();
1671 
1672 	pr_info("Interrupt handling initialized with %s backend\n",
1673 		xive_ops->name);
1674 	pr_info("Using priority %d for all interrupts\n", max_prio);
1675 
1676 	return true;
1677 }
1678 
1679 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1680 {
1681 	unsigned int alloc_order;
1682 	struct page *pages;
1683 	__be32 *qpage;
1684 
1685 	alloc_order = xive_alloc_order(queue_shift);
1686 	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1687 	if (!pages)
1688 		return ERR_PTR(-ENOMEM);
1689 	qpage = (__be32 *)page_address(pages);
1690 	memset(qpage, 0, 1 << queue_shift);
1691 
1692 	return qpage;
1693 }
1694 
1695 static int __init xive_off(char *arg)
1696 {
1697 	xive_cmdline_disabled = true;
1698 	return 1;
1699 }
1700 __setup("xive=off", xive_off);
1701 
1702 static int __init xive_store_eoi_cmdline(char *arg)
1703 {
1704 	if (!arg)
1705 		return 1;
1706 
1707 	if (strncmp(arg, "off", 3) == 0) {
1708 		pr_info("StoreEOI disabled on kernel command line\n");
1709 		xive_store_eoi = false;
1710 	}
1711 	return 1;
1712 }
1713 __setup("xive.store-eoi=", xive_store_eoi_cmdline);
1714 
1715 #ifdef CONFIG_DEBUG_FS
1716 static void xive_debug_show_ipi(struct seq_file *m, int cpu)
1717 {
1718 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1719 
1720 	seq_printf(m, "CPU %d: ", cpu);
1721 	if (xc) {
1722 		seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1723 
1724 #ifdef CONFIG_SMP
1725 		{
1726 			char buffer[128];
1727 
1728 			xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
1729 			seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer);
1730 		}
1731 #endif
1732 	}
1733 	seq_puts(m, "\n");
1734 }
1735 
1736 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1737 {
1738 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1739 	int rc;
1740 	u32 target;
1741 	u8 prio;
1742 	u32 lirq;
1743 	char buffer[128];
1744 
1745 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1746 	if (rc) {
1747 		seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1748 		return;
1749 	}
1750 
1751 	seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1752 		   hw_irq, target, prio, lirq);
1753 
1754 	xive_irq_data_dump(irq_data_get_irq_chip_data(d), buffer, sizeof(buffer));
1755 	seq_puts(m, buffer);
1756 	seq_puts(m, "\n");
1757 }
1758 
1759 static int xive_irq_debug_show(struct seq_file *m, void *private)
1760 {
1761 	unsigned int i;
1762 	struct irq_desc *desc;
1763 
1764 	for_each_irq_desc(i, desc) {
1765 		struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
1766 
1767 		if (d)
1768 			xive_debug_show_irq(m, d);
1769 	}
1770 	return 0;
1771 }
1772 DEFINE_SHOW_ATTRIBUTE(xive_irq_debug);
1773 
1774 static int xive_ipi_debug_show(struct seq_file *m, void *private)
1775 {
1776 	int cpu;
1777 
1778 	if (xive_ops->debug_show)
1779 		xive_ops->debug_show(m, private);
1780 
1781 	for_each_online_cpu(cpu)
1782 		xive_debug_show_ipi(m, cpu);
1783 	return 0;
1784 }
1785 DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug);
1786 
1787 static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
1788 {
1789 	int i;
1790 
1791 	seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
1792 	if (q->qpage) {
1793 		for (i = 0; i < q->msk + 1; i++) {
1794 			if (!(i % 8))
1795 				seq_printf(m, "%05d ", i);
1796 			seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
1797 				   (i + 1) % 8 ? " " : "\n");
1798 		}
1799 	}
1800 	seq_puts(m, "\n");
1801 }
1802 
1803 static int xive_eq_debug_show(struct seq_file *m, void *private)
1804 {
1805 	int cpu = (long)m->private;
1806 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1807 
1808 	if (xc)
1809 		xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority],
1810 				       xive_irq_priority);
1811 	return 0;
1812 }
1813 DEFINE_SHOW_ATTRIBUTE(xive_eq_debug);
1814 
1815 static void xive_core_debugfs_create(void)
1816 {
1817 	struct dentry *xive_dir;
1818 	struct dentry *xive_eq_dir;
1819 	long cpu;
1820 	char name[16];
1821 
1822 	xive_dir = debugfs_create_dir("xive", arch_debugfs_dir);
1823 	if (IS_ERR(xive_dir))
1824 		return;
1825 
1826 	debugfs_create_file("ipis", 0400, xive_dir,
1827 			    NULL, &xive_ipi_debug_fops);
1828 	debugfs_create_file("interrupts", 0400, xive_dir,
1829 			    NULL, &xive_irq_debug_fops);
1830 	xive_eq_dir = debugfs_create_dir("eqs", xive_dir);
1831 	for_each_possible_cpu(cpu) {
1832 		snprintf(name, sizeof(name), "cpu%ld", cpu);
1833 		debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
1834 				    &xive_eq_debug_fops);
1835 	}
1836 	debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi);
1837 
1838 	if (xive_ops->debug_create)
1839 		xive_ops->debug_create(xive_dir);
1840 }
1841 #else
1842 static inline void xive_core_debugfs_create(void) { }
1843 #endif /* CONFIG_DEBUG_FS */
1844 
1845 int xive_core_debug_init(void)
1846 {
1847 	if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS))
1848 		xive_core_debugfs_create();
1849 
1850 	return 0;
1851 }
1852