Lines Matching +full:opp +full:- +full:0
44 #define VID 0x03 /* MPIC version ID */
47 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
48 #define OPENPIC_FLAG_ILR (2 << 0)
51 #define OPENPIC_REG_SIZE 0x40000
52 #define OPENPIC_GLB_REG_START 0x0
53 #define OPENPIC_GLB_REG_SIZE 0x10F0
54 #define OPENPIC_TMR_REG_START 0x10F0
55 #define OPENPIC_TMR_REG_SIZE 0x220
56 #define OPENPIC_MSI_REG_START 0x1600
57 #define OPENPIC_MSI_REG_SIZE 0x200
58 #define OPENPIC_SUMMARY_REG_START 0x3800
59 #define OPENPIC_SUMMARY_REG_SIZE 0x800
60 #define OPENPIC_SRC_REG_START 0x10000
61 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
62 #define OPENPIC_CPU_REG_START 0x20000
63 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
79 #define FRR_VID_SHIFT 0
84 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
86 #define GCR_RESET 0x80000000
87 #define GCR_MODE_PASS 0x00000000
88 #define GCR_MODE_MIXED 0x20000000
89 #define GCR_MODE_PROXY 0x60000000
91 #define TBCR_CI 0x80000000 /* count inhibit */
92 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
99 #define IDR_P0_SHIFT 0
101 #define ILR_INTTGT_MASK 0x000000ff
102 #define ILR_INTTGT_INT 0x00
103 #define ILR_INTTGT_CINT 0x01 /* critical */
104 #define ILR_INTTGT_MCP 0x02 /* machine check */
107 #define MSIIR_OFFSET 0x140
109 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
111 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
116 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; in get_current_cpu()
117 return vcpu ? vcpu->arch.irq_cpu_id : -1; in get_current_cpu()
120 return -1; in get_current_cpu()
128 static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
132 IRQ_TYPE_NORMAL = 0,
133 IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
154 bool level:1; /* level-triggered */
169 #define IVPR_PRIORITY_MASK (0xF << 16)
171 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask) argument
173 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
174 #define IDR_EP 0x80000000 /* external pin */
175 #define IDR_CI 0x40000000 /* critical interrupt */
184 /* Count of IRQ sources asserting on non-INT outputs */
241 static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst, in mpic_irq_raise() argument
248 if (!dst->vcpu) { in mpic_irq_raise()
250 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_raise()
254 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_raise()
260 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq); in mpic_irq_raise()
263 static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst, in mpic_irq_lower() argument
266 if (!dst->vcpu) { in mpic_irq_lower()
268 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_lower()
272 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_lower()
278 kvmppc_core_dequeue_external(dst->vcpu); in mpic_irq_lower()
283 set_bit(n_IRQ, q->queue); in IRQ_setbit()
288 clear_bit(n_IRQ, q->queue); in IRQ_resetbit()
291 static void IRQ_check(struct openpic *opp, struct irq_queue *q) in IRQ_check() argument
293 int irq = -1; in IRQ_check()
294 int next = -1; in IRQ_check()
295 int priority = -1; in IRQ_check()
298 irq = find_next_bit(q->queue, opp->max_irq, irq + 1); in IRQ_check()
299 if (irq == opp->max_irq) in IRQ_check()
303 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); in IRQ_check()
305 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { in IRQ_check()
307 priority = IVPR_PRIORITY(opp->src[irq].ivpr); in IRQ_check()
311 q->next = next; in IRQ_check()
312 q->priority = priority; in IRQ_check()
315 static int IRQ_get_next(struct openpic *opp, struct irq_queue *q) in IRQ_get_next() argument
318 IRQ_check(opp, q); in IRQ_get_next()
320 return q->next; in IRQ_get_next()
323 static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ, in IRQ_local_pipe() argument
330 dst = &opp->dst[n_CPU]; in IRQ_local_pipe()
331 src = &opp->src[n_IRQ]; in IRQ_local_pipe()
336 if (src->output != ILR_INTTGT_INT) { in IRQ_local_pipe()
338 __func__, src->output, n_IRQ, active, was_active, in IRQ_local_pipe()
339 dst->outputs_active[src->output]); in IRQ_local_pipe()
347 dst->outputs_active[src->output]++ == 0) { in IRQ_local_pipe()
349 __func__, src->output, n_CPU, n_IRQ); in IRQ_local_pipe()
350 mpic_irq_raise(opp, dst, src->output); in IRQ_local_pipe()
354 --dst->outputs_active[src->output] == 0) { in IRQ_local_pipe()
356 __func__, src->output, n_CPU, n_IRQ); in IRQ_local_pipe()
357 mpic_irq_lower(opp, dst, src->output); in IRQ_local_pipe()
364 priority = IVPR_PRIORITY(src->ivpr); in IRQ_local_pipe()
370 IRQ_setbit(&dst->raised, n_IRQ); in IRQ_local_pipe()
372 IRQ_resetbit(&dst->raised, n_IRQ); in IRQ_local_pipe()
374 IRQ_check(opp, &dst->raised); in IRQ_local_pipe()
376 if (active && priority <= dst->ctpr) { in IRQ_local_pipe()
378 __func__, n_IRQ, priority, dst->ctpr, n_CPU); in IRQ_local_pipe()
379 active = 0; in IRQ_local_pipe()
383 if (IRQ_get_next(opp, &dst->servicing) >= 0 && in IRQ_local_pipe()
384 priority <= dst->servicing.priority) { in IRQ_local_pipe()
386 __func__, n_IRQ, dst->servicing.next, n_CPU); in IRQ_local_pipe()
389 __func__, n_CPU, n_IRQ, dst->raised.next); in IRQ_local_pipe()
390 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in IRQ_local_pipe()
393 IRQ_get_next(opp, &dst->servicing); in IRQ_local_pipe()
394 if (dst->raised.priority > dst->ctpr && in IRQ_local_pipe()
395 dst->raised.priority > dst->servicing.priority) { in IRQ_local_pipe()
397 __func__, n_IRQ, dst->raised.next, in IRQ_local_pipe()
398 dst->raised.priority, dst->ctpr, in IRQ_local_pipe()
399 dst->servicing.priority, n_CPU); in IRQ_local_pipe()
403 __func__, n_IRQ, dst->ctpr, in IRQ_local_pipe()
404 dst->servicing.priority, n_CPU); in IRQ_local_pipe()
405 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in IRQ_local_pipe()
411 static void openpic_update_irq(struct openpic *opp, int n_IRQ) in openpic_update_irq() argument
417 src = &opp->src[n_IRQ]; in openpic_update_irq()
418 active = src->pending; in openpic_update_irq()
420 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { in openpic_update_irq()
426 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); in openpic_update_irq()
429 * We don't have a similar check for already-active because in openpic_update_irq()
438 src->ivpr |= IVPR_ACTIVITY_MASK; in openpic_update_irq()
440 src->ivpr &= ~IVPR_ACTIVITY_MASK; in openpic_update_irq()
442 if (src->destmask == 0) { in openpic_update_irq()
448 if (src->destmask == (1 << src->last_cpu)) { in openpic_update_irq()
450 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); in openpic_update_irq()
451 } else if (!(src->ivpr & IVPR_MODE_MASK)) { in openpic_update_irq()
453 for (i = 0; i < opp->nb_cpus; i++) { in openpic_update_irq()
454 if (src->destmask & (1 << i)) { in openpic_update_irq()
455 IRQ_local_pipe(opp, i, n_IRQ, active, in openpic_update_irq()
461 for (i = src->last_cpu + 1; i != src->last_cpu; i++) { in openpic_update_irq()
462 if (i == opp->nb_cpus) in openpic_update_irq()
463 i = 0; in openpic_update_irq()
465 if (src->destmask & (1 << i)) { in openpic_update_irq()
466 IRQ_local_pipe(opp, i, n_IRQ, active, in openpic_update_irq()
468 src->last_cpu = i; in openpic_update_irq()
477 struct openpic *opp = opaque; in openpic_set_irq() local
485 src = &opp->src[n_IRQ]; in openpic_set_irq()
486 pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n", in openpic_set_irq()
487 n_IRQ, level, src->ivpr); in openpic_set_irq()
488 if (src->level) { in openpic_set_irq()
489 /* level-sensitive irq */ in openpic_set_irq()
490 src->pending = level; in openpic_set_irq()
491 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
493 /* edge-sensitive irq */ in openpic_set_irq()
495 src->pending = 1; in openpic_set_irq()
496 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
499 if (src->output != ILR_INTTGT_INT) { in openpic_set_irq()
500 /* Edge-triggered interrupts shouldn't be used in openpic_set_irq()
501 * with non-INT delivery, but just in case, in openpic_set_irq()
506 src->pending = 0; in openpic_set_irq()
507 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
512 static void openpic_reset(struct openpic *opp) in openpic_reset() argument
516 opp->gcr = GCR_RESET; in openpic_reset()
518 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | in openpic_reset()
519 (opp->vid << FRR_VID_SHIFT); in openpic_reset()
521 opp->pir = 0; in openpic_reset()
522 opp->spve = -1 & opp->vector_mask; in openpic_reset()
523 opp->tfrr = opp->tfrr_reset; in openpic_reset()
525 for (i = 0; i < opp->max_irq; i++) { in openpic_reset()
526 opp->src[i].ivpr = opp->ivpr_reset; in openpic_reset()
528 switch (opp->src[i].type) { in openpic_reset()
530 opp->src[i].level = in openpic_reset()
531 !!(opp->ivpr_reset & IVPR_SENSE_MASK); in openpic_reset()
535 opp->src[i].ivpr |= IVPR_POLARITY_MASK; in openpic_reset()
542 write_IRQreg_idr(opp, i, opp->idr_reset); in openpic_reset()
545 for (i = 0; i < MAX_CPU; i++) { in openpic_reset()
546 opp->dst[i].ctpr = 15; in openpic_reset()
547 memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue)); in openpic_reset()
548 opp->dst[i].raised.next = -1; in openpic_reset()
549 memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue)); in openpic_reset()
550 opp->dst[i].servicing.next = -1; in openpic_reset()
553 for (i = 0; i < MAX_TMR; i++) { in openpic_reset()
554 opp->timers[i].tccr = 0; in openpic_reset()
555 opp->timers[i].tbcr = TBCR_CI; in openpic_reset()
558 opp->gcr = 0; in openpic_reset()
561 static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ) in read_IRQreg_idr() argument
563 return opp->src[n_IRQ].idr; in read_IRQreg_idr()
566 static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ) in read_IRQreg_ilr() argument
568 if (opp->flags & OPENPIC_FLAG_ILR) in read_IRQreg_ilr()
569 return opp->src[n_IRQ].output; in read_IRQreg_ilr()
571 return 0xffffffff; in read_IRQreg_ilr()
574 static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ) in read_IRQreg_ivpr() argument
576 return opp->src[n_IRQ].ivpr; in read_IRQreg_ivpr()
579 static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, in write_IRQreg_idr() argument
582 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_idr()
583 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; in write_IRQreg_idr()
584 uint32_t crit_mask = 0; in write_IRQreg_idr()
586 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; in write_IRQreg_idr()
589 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
594 src->idr = val & mask; in write_IRQreg_idr()
595 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr); in write_IRQreg_idr()
597 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
598 if (src->idr & crit_mask) { in write_IRQreg_idr()
599 if (src->idr & normal_mask) { in write_IRQreg_idr()
604 src->output = ILR_INTTGT_CINT; in write_IRQreg_idr()
605 src->nomask = true; in write_IRQreg_idr()
606 src->destmask = 0; in write_IRQreg_idr()
608 for (i = 0; i < opp->nb_cpus; i++) { in write_IRQreg_idr()
609 int n_ci = IDR_CI0_SHIFT - i; in write_IRQreg_idr()
611 if (src->idr & (1UL << n_ci)) in write_IRQreg_idr()
612 src->destmask |= 1UL << i; in write_IRQreg_idr()
615 src->output = ILR_INTTGT_INT; in write_IRQreg_idr()
616 src->nomask = false; in write_IRQreg_idr()
617 src->destmask = src->idr & normal_mask; in write_IRQreg_idr()
620 src->destmask = src->idr; in write_IRQreg_idr()
624 static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ, in write_IRQreg_ilr() argument
627 if (opp->flags & OPENPIC_FLAG_ILR) { in write_IRQreg_ilr()
628 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_ilr()
630 src->output = val & ILR_INTTGT_MASK; in write_IRQreg_ilr()
631 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr, in write_IRQreg_ilr()
632 src->output); in write_IRQreg_ilr()
634 /* TODO: on MPIC v4.0 only, set nomask for non-INT */ in write_IRQreg_ilr()
638 static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ, in write_IRQreg_ivpr() argument
643 /* NOTE when implementing newer FSL MPIC models: starting with v4.0, in write_IRQreg_ivpr()
644 * the polarity bit is read-only on internal interrupts. in write_IRQreg_ivpr()
647 IVPR_POLARITY_MASK | opp->vector_mask; in write_IRQreg_ivpr()
649 /* ACTIVITY bit is read-only */ in write_IRQreg_ivpr()
650 opp->src[n_IRQ].ivpr = in write_IRQreg_ivpr()
651 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); in write_IRQreg_ivpr()
654 * and the interrupt is always level-triggered. Timers and IPIs in write_IRQreg_ivpr()
655 * have no sense or polarity bits, and are edge-triggered. in write_IRQreg_ivpr()
657 switch (opp->src[n_IRQ].type) { in write_IRQreg_ivpr()
659 opp->src[n_IRQ].level = in write_IRQreg_ivpr()
660 !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); in write_IRQreg_ivpr()
664 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; in write_IRQreg_ivpr()
668 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); in write_IRQreg_ivpr()
672 openpic_update_irq(opp, n_IRQ); in write_IRQreg_ivpr()
673 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val, in write_IRQreg_ivpr()
674 opp->src[n_IRQ].ivpr); in write_IRQreg_ivpr()
677 static void openpic_gcr_write(struct openpic *opp, uint64_t val) in openpic_gcr_write() argument
680 openpic_reset(opp); in openpic_gcr_write()
684 opp->gcr &= ~opp->mpic_mode_mask; in openpic_gcr_write()
685 opp->gcr |= val & opp->mpic_mode_mask; in openpic_gcr_write()
690 struct openpic *opp = opaque; in openpic_gbl_write() local
691 int err = 0; in openpic_gbl_write()
694 if (addr & 0xF) in openpic_gbl_write()
695 return 0; in openpic_gbl_write()
698 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ in openpic_gbl_write()
700 case 0x40: in openpic_gbl_write()
701 case 0x50: in openpic_gbl_write()
702 case 0x60: in openpic_gbl_write()
703 case 0x70: in openpic_gbl_write()
704 case 0x80: in openpic_gbl_write()
705 case 0x90: in openpic_gbl_write()
706 case 0xA0: in openpic_gbl_write()
707 case 0xB0: in openpic_gbl_write()
708 err = openpic_cpu_write_internal(opp, addr, val, in openpic_gbl_write()
711 case 0x1000: /* FRR */ in openpic_gbl_write()
713 case 0x1020: /* GCR */ in openpic_gbl_write()
714 openpic_gcr_write(opp, val); in openpic_gbl_write()
716 case 0x1080: /* VIR */ in openpic_gbl_write()
718 case 0x1090: /* PIR */ in openpic_gbl_write()
720 * This register is used to reset a CPU core -- in openpic_gbl_write()
723 err = -ENXIO; in openpic_gbl_write()
725 case 0x10A0: /* IPI_IVPR */ in openpic_gbl_write()
726 case 0x10B0: in openpic_gbl_write()
727 case 0x10C0: in openpic_gbl_write()
728 case 0x10D0: { in openpic_gbl_write()
730 idx = (addr - 0x10A0) >> 4; in openpic_gbl_write()
731 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); in openpic_gbl_write()
734 case 0x10E0: /* SPVE */ in openpic_gbl_write()
735 opp->spve = val & opp->vector_mask; in openpic_gbl_write()
746 struct openpic *opp = opaque; in openpic_gbl_read() local
748 int err = 0; in openpic_gbl_read()
751 retval = 0xFFFFFFFF; in openpic_gbl_read()
752 if (addr & 0xF) in openpic_gbl_read()
756 case 0x1000: /* FRR */ in openpic_gbl_read()
757 retval = opp->frr; in openpic_gbl_read()
758 retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT; in openpic_gbl_read()
760 case 0x1020: /* GCR */ in openpic_gbl_read()
761 retval = opp->gcr; in openpic_gbl_read()
763 case 0x1080: /* VIR */ in openpic_gbl_read()
764 retval = opp->vir; in openpic_gbl_read()
766 case 0x1090: /* PIR */ in openpic_gbl_read()
767 retval = 0x00000000; in openpic_gbl_read()
769 case 0x00: /* Block Revision Register1 (BRR1) */ in openpic_gbl_read()
770 retval = opp->brr1; in openpic_gbl_read()
772 case 0x40: in openpic_gbl_read()
773 case 0x50: in openpic_gbl_read()
774 case 0x60: in openpic_gbl_read()
775 case 0x70: in openpic_gbl_read()
776 case 0x80: in openpic_gbl_read()
777 case 0x90: in openpic_gbl_read()
778 case 0xA0: in openpic_gbl_read()
779 case 0xB0: in openpic_gbl_read()
780 err = openpic_cpu_read_internal(opp, addr, in openpic_gbl_read()
783 case 0x10A0: /* IPI_IVPR */ in openpic_gbl_read()
784 case 0x10B0: in openpic_gbl_read()
785 case 0x10C0: in openpic_gbl_read()
786 case 0x10D0: in openpic_gbl_read()
789 idx = (addr - 0x10A0) >> 4; in openpic_gbl_read()
790 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); in openpic_gbl_read()
793 case 0x10E0: /* SPVE */ in openpic_gbl_read()
794 retval = opp->spve; in openpic_gbl_read()
801 pr_debug("%s: => 0x%08x\n", __func__, retval); in openpic_gbl_read()
808 struct openpic *opp = opaque; in openpic_tmr_write() local
811 addr += 0x10f0; in openpic_tmr_write()
814 if (addr & 0xF) in openpic_tmr_write()
815 return 0; in openpic_tmr_write()
817 if (addr == 0x10f0) { in openpic_tmr_write()
819 opp->tfrr = val; in openpic_tmr_write()
820 return 0; in openpic_tmr_write()
823 idx = (addr >> 6) & 0x3; in openpic_tmr_write()
824 addr = addr & 0x30; in openpic_tmr_write()
826 switch (addr & 0x30) { in openpic_tmr_write()
827 case 0x00: /* TCCR */ in openpic_tmr_write()
829 case 0x10: /* TBCR */ in openpic_tmr_write()
830 if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && in openpic_tmr_write()
831 (val & TBCR_CI) == 0 && in openpic_tmr_write()
832 (opp->timers[idx].tbcr & TBCR_CI) != 0) in openpic_tmr_write()
833 opp->timers[idx].tccr &= ~TCCR_TOG; in openpic_tmr_write()
835 opp->timers[idx].tbcr = val; in openpic_tmr_write()
837 case 0x20: /* TVPR */ in openpic_tmr_write()
838 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
840 case 0x30: /* TDR */ in openpic_tmr_write()
841 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
845 return 0; in openpic_tmr_write()
850 struct openpic *opp = opaque; in openpic_tmr_read() local
851 uint32_t retval = -1; in openpic_tmr_read()
855 if (addr & 0xF) in openpic_tmr_read()
858 idx = (addr >> 6) & 0x3; in openpic_tmr_read()
859 if (addr == 0x0) { in openpic_tmr_read()
861 retval = opp->tfrr; in openpic_tmr_read()
865 switch (addr & 0x30) { in openpic_tmr_read()
866 case 0x00: /* TCCR */ in openpic_tmr_read()
867 retval = opp->timers[idx].tccr; in openpic_tmr_read()
869 case 0x10: /* TBCR */ in openpic_tmr_read()
870 retval = opp->timers[idx].tbcr; in openpic_tmr_read()
872 case 0x20: /* TIPV */ in openpic_tmr_read()
873 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
875 case 0x30: /* TIDE (TIDR) */ in openpic_tmr_read()
876 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
881 pr_debug("%s: => 0x%08x\n", __func__, retval); in openpic_tmr_read()
883 return 0; in openpic_tmr_read()
888 struct openpic *opp = opaque; in openpic_src_write() local
893 addr = addr & 0xffff; in openpic_src_write()
896 switch (addr & 0x1f) { in openpic_src_write()
897 case 0x00: in openpic_src_write()
898 write_IRQreg_ivpr(opp, idx, val); in openpic_src_write()
900 case 0x10: in openpic_src_write()
901 write_IRQreg_idr(opp, idx, val); in openpic_src_write()
903 case 0x18: in openpic_src_write()
904 write_IRQreg_ilr(opp, idx, val); in openpic_src_write()
908 return 0; in openpic_src_write()
913 struct openpic *opp = opaque; in openpic_src_read() local
918 retval = 0xFFFFFFFF; in openpic_src_read()
920 addr = addr & 0xffff; in openpic_src_read()
923 switch (addr & 0x1f) { in openpic_src_read()
924 case 0x00: in openpic_src_read()
925 retval = read_IRQreg_ivpr(opp, idx); in openpic_src_read()
927 case 0x10: in openpic_src_read()
928 retval = read_IRQreg_idr(opp, idx); in openpic_src_read()
930 case 0x18: in openpic_src_read()
931 retval = read_IRQreg_ilr(opp, idx); in openpic_src_read()
935 pr_debug("%s: => 0x%08x\n", __func__, retval); in openpic_src_read()
937 return 0; in openpic_src_read()
942 struct openpic *opp = opaque; in openpic_msi_write() local
943 int idx = opp->irq_msi; in openpic_msi_write()
946 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val); in openpic_msi_write()
947 if (addr & 0xF) in openpic_msi_write()
948 return 0; in openpic_msi_write()
955 opp->msi[srs].msir |= 1 << ibs; in openpic_msi_write()
956 openpic_set_irq(opp, idx, 1); in openpic_msi_write()
959 /* most registers are read-only, thus ignored */ in openpic_msi_write()
963 return 0; in openpic_msi_write()
968 struct openpic *opp = opaque; in openpic_msi_read() local
969 uint32_t r = 0; in openpic_msi_read()
973 if (addr & 0xF) in openpic_msi_read()
974 return -ENXIO; in openpic_msi_read()
979 case 0x00: in openpic_msi_read()
980 case 0x10: in openpic_msi_read()
981 case 0x20: in openpic_msi_read()
982 case 0x30: in openpic_msi_read()
983 case 0x40: in openpic_msi_read()
984 case 0x50: in openpic_msi_read()
985 case 0x60: in openpic_msi_read()
986 case 0x70: /* MSIRs */ in openpic_msi_read()
987 r = opp->msi[srs].msir; in openpic_msi_read()
989 opp->msi[srs].msir = 0; in openpic_msi_read()
990 openpic_set_irq(opp, opp->irq_msi + srs, 0); in openpic_msi_read()
992 case 0x120: /* MSISR */ in openpic_msi_read()
993 for (i = 0; i < MAX_MSI; i++) in openpic_msi_read()
994 r |= (opp->msi[i].msir ? 1 : 0) << i; in openpic_msi_read()
998 pr_debug("%s: => 0x%08x\n", __func__, r); in openpic_msi_read()
1000 return 0; in openpic_msi_read()
1005 uint32_t r = 0; in openpic_summary_read()
1012 return 0; in openpic_summary_read()
1017 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val); in openpic_summary_write()
1020 return 0; in openpic_summary_write()
1026 struct openpic *opp = opaque; in openpic_cpu_write_internal() local
1031 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx, in openpic_cpu_write_internal()
1034 if (idx < 0) in openpic_cpu_write_internal()
1035 return 0; in openpic_cpu_write_internal()
1037 if (addr & 0xF) in openpic_cpu_write_internal()
1038 return 0; in openpic_cpu_write_internal()
1040 dst = &opp->dst[idx]; in openpic_cpu_write_internal()
1041 addr &= 0xFF0; in openpic_cpu_write_internal()
1043 case 0x40: /* IPIDR */ in openpic_cpu_write_internal()
1044 case 0x50: in openpic_cpu_write_internal()
1045 case 0x60: in openpic_cpu_write_internal()
1046 case 0x70: in openpic_cpu_write_internal()
1047 idx = (addr - 0x40) >> 4; in openpic_cpu_write_internal()
1049 opp->src[opp->irq_ipi0 + idx].destmask |= val; in openpic_cpu_write_internal()
1050 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); in openpic_cpu_write_internal()
1051 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); in openpic_cpu_write_internal()
1053 case 0x80: /* CTPR */ in openpic_cpu_write_internal()
1054 dst->ctpr = val & 0x0000000F; in openpic_cpu_write_internal()
1057 __func__, idx, dst->ctpr, dst->raised.priority, in openpic_cpu_write_internal()
1058 dst->servicing.priority); in openpic_cpu_write_internal()
1060 if (dst->raised.priority <= dst->ctpr) { in openpic_cpu_write_internal()
1063 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1064 } else if (dst->raised.priority > dst->servicing.priority) { in openpic_cpu_write_internal()
1066 __func__, idx, dst->raised.next); in openpic_cpu_write_internal()
1067 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1071 case 0x90: /* WHOAMI */ in openpic_cpu_write_internal()
1072 /* Read-only register */ in openpic_cpu_write_internal()
1074 case 0xA0: /* IACK */ in openpic_cpu_write_internal()
1075 /* Read-only register */ in openpic_cpu_write_internal()
1077 case 0xB0: { /* EOI */ in openpic_cpu_write_internal()
1081 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1083 if (s_IRQ < 0) { in openpic_cpu_write_internal()
1089 IRQ_resetbit(&dst->servicing, s_IRQ); in openpic_cpu_write_internal()
1093 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1095 n_IRQ = IRQ_get_next(opp, &dst->raised); in openpic_cpu_write_internal()
1096 src = &opp->src[n_IRQ]; in openpic_cpu_write_internal()
1097 if (n_IRQ != -1 && in openpic_cpu_write_internal()
1098 (s_IRQ == -1 || in openpic_cpu_write_internal()
1099 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { in openpic_cpu_write_internal()
1102 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1105 spin_unlock(&opp->lock); in openpic_cpu_write_internal()
1106 kvm_notify_acked_irq(opp->kvm, 0, notify_eoi); in openpic_cpu_write_internal()
1107 spin_lock(&opp->lock); in openpic_cpu_write_internal()
1115 return 0; in openpic_cpu_write_internal()
1120 struct openpic *opp = opaque; in openpic_cpu_write() local
1122 return openpic_cpu_write_internal(opp, addr, val, in openpic_cpu_write()
1123 (addr & 0x1f000) >> 12); in openpic_cpu_write()
1126 static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst, in openpic_iack() argument
1133 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in openpic_iack()
1135 irq = IRQ_get_next(opp, &dst->raised); in openpic_iack()
1138 if (irq == -1) in openpic_iack()
1140 return opp->spve; in openpic_iack()
1142 src = &opp->src[irq]; in openpic_iack()
1143 if (!(src->ivpr & IVPR_ACTIVITY_MASK) || in openpic_iack()
1144 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { in openpic_iack()
1145 pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n", in openpic_iack()
1146 __func__, irq, dst->ctpr, src->ivpr); in openpic_iack()
1147 openpic_update_irq(opp, irq); in openpic_iack()
1148 retval = opp->spve; in openpic_iack()
1151 IRQ_setbit(&dst->servicing, irq); in openpic_iack()
1152 retval = IVPR_VECTOR(opp, src->ivpr); in openpic_iack()
1155 if (!src->level) { in openpic_iack()
1156 /* edge-sensitive IRQ */ in openpic_iack()
1157 src->ivpr &= ~IVPR_ACTIVITY_MASK; in openpic_iack()
1158 src->pending = 0; in openpic_iack()
1159 IRQ_resetbit(&dst->raised, irq); in openpic_iack()
1162 if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) { in openpic_iack()
1163 src->destmask &= ~(1 << cpu); in openpic_iack()
1164 if (src->destmask && !src->level) { in openpic_iack()
1166 openpic_set_irq(opp, irq, 1); in openpic_iack()
1167 openpic_set_irq(opp, irq, 0); in openpic_iack()
1169 src->ivpr |= IVPR_ACTIVITY_MASK; in openpic_iack()
1178 struct openpic *opp = vcpu->arch.mpic; in kvmppc_mpic_set_epr() local
1179 int cpu = vcpu->arch.irq_cpu_id; in kvmppc_mpic_set_epr()
1182 spin_lock_irqsave(&opp->lock, flags); in kvmppc_mpic_set_epr()
1184 if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY) in kvmppc_mpic_set_epr()
1185 kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu)); in kvmppc_mpic_set_epr()
1187 spin_unlock_irqrestore(&opp->lock, flags); in kvmppc_mpic_set_epr()
1193 struct openpic *opp = opaque; in openpic_cpu_read_internal() local
1198 retval = 0xFFFFFFFF; in openpic_cpu_read_internal()
1200 if (idx < 0) in openpic_cpu_read_internal()
1203 if (addr & 0xF) in openpic_cpu_read_internal()
1206 dst = &opp->dst[idx]; in openpic_cpu_read_internal()
1207 addr &= 0xFF0; in openpic_cpu_read_internal()
1209 case 0x80: /* CTPR */ in openpic_cpu_read_internal()
1210 retval = dst->ctpr; in openpic_cpu_read_internal()
1212 case 0x90: /* WHOAMI */ in openpic_cpu_read_internal()
1215 case 0xA0: /* IACK */ in openpic_cpu_read_internal()
1216 retval = openpic_iack(opp, dst, idx); in openpic_cpu_read_internal()
1218 case 0xB0: /* EOI */ in openpic_cpu_read_internal()
1219 retval = 0; in openpic_cpu_read_internal()
1224 pr_debug("%s: => 0x%08x\n", __func__, retval); in openpic_cpu_read_internal()
1228 return 0; in openpic_cpu_read_internal()
1233 struct openpic *opp = opaque; in openpic_cpu_read() local
1235 return openpic_cpu_read_internal(opp, addr, ptr, in openpic_cpu_read()
1236 (addr & 0x1f000) >> 12); in openpic_cpu_read()
1288 static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr) in add_mmio_region() argument
1290 if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) { in add_mmio_region()
1295 opp->mmio_regions[opp->num_mmio_regions++] = mr; in add_mmio_region()
1298 static void fsl_common_init(struct openpic *opp) in fsl_common_init() argument
1303 add_mmio_region(opp, &openpic_msi_mmio); in fsl_common_init()
1304 add_mmio_region(opp, &openpic_summary_mmio); in fsl_common_init()
1306 opp->vid = VID_REVISION_1_2; in fsl_common_init()
1307 opp->vir = VIR_GENERIC; in fsl_common_init()
1308 opp->vector_mask = 0xFFFF; in fsl_common_init()
1309 opp->tfrr_reset = 0; in fsl_common_init()
1310 opp->ivpr_reset = IVPR_MASK_MASK; in fsl_common_init()
1311 opp->idr_reset = 1 << 0; in fsl_common_init()
1312 opp->max_irq = MAX_IRQ; in fsl_common_init()
1314 opp->irq_ipi0 = virq; in fsl_common_init()
1316 opp->irq_tim0 = virq; in fsl_common_init()
1321 opp->irq_msi = 224; in fsl_common_init()
1323 for (i = 0; i < opp->fsl->max_ext; i++) in fsl_common_init()
1324 opp->src[i].level = false; in fsl_common_init()
1328 opp->src[i].type = IRQ_TYPE_FSLINT; in fsl_common_init()
1329 opp->src[i].level = true; in fsl_common_init()
1334 opp->src[i].type = IRQ_TYPE_FSLSPECIAL; in fsl_common_init()
1335 opp->src[i].level = false; in fsl_common_init()
1339 static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr) in kvm_mpic_read_internal() argument
1343 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_read_internal()
1344 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_read_internal()
1346 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_read_internal()
1349 return mr->read(opp, addr - mr->start_addr, ptr); in kvm_mpic_read_internal()
1352 return -ENXIO; in kvm_mpic_read_internal()
1355 static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val) in kvm_mpic_write_internal() argument
1359 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_write_internal()
1360 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_write_internal()
1362 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_write_internal()
1365 return mr->write(opp, addr - mr->start_addr, val); in kvm_mpic_write_internal()
1368 return -ENXIO; in kvm_mpic_write_internal()
1375 struct openpic *opp = container_of(this, struct openpic, mmio); in kvm_mpic_read() local
1382 if (addr & (len - 1)) { in kvm_mpic_read()
1385 return -EINVAL; in kvm_mpic_read()
1388 spin_lock_irq(&opp->lock); in kvm_mpic_read()
1389 ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val); in kvm_mpic_read()
1390 spin_unlock_irq(&opp->lock); in kvm_mpic_read()
1393 * Technically only 32-bit accesses are allowed, but be nice to in kvm_mpic_read()
1394 * people dumping registers a byte at a time -- it works in real in kvm_mpic_read()
1407 return -EINVAL; in kvm_mpic_read()
1417 struct openpic *opp = container_of(this, struct openpic, mmio); in kvm_mpic_write() local
1422 return -EOPNOTSUPP; in kvm_mpic_write()
1426 return -EOPNOTSUPP; in kvm_mpic_write()
1429 spin_lock_irq(&opp->lock); in kvm_mpic_write()
1430 ret = kvm_mpic_write_internal(opp, addr - opp->reg_base, in kvm_mpic_write()
1432 spin_unlock_irq(&opp->lock); in kvm_mpic_write()
1445 static void map_mmio(struct openpic *opp) in map_mmio() argument
1447 kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops); in map_mmio()
1449 kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS, in map_mmio()
1450 opp->reg_base, OPENPIC_REG_SIZE, in map_mmio()
1451 &opp->mmio); in map_mmio()
1454 static void unmap_mmio(struct openpic *opp) in unmap_mmio() argument
1456 kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio); in unmap_mmio()
1459 static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr) in set_base_addr() argument
1463 if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64))) in set_base_addr()
1464 return -EFAULT; in set_base_addr()
1466 if (base & 0x3ffff) { in set_base_addr()
1469 return -EINVAL; in set_base_addr()
1472 if (base == opp->reg_base) in set_base_addr()
1473 return 0; in set_base_addr()
1475 mutex_lock(&opp->kvm->slots_lock); in set_base_addr()
1477 unmap_mmio(opp); in set_base_addr()
1478 opp->reg_base = base; in set_base_addr()
1483 if (base == 0) in set_base_addr()
1486 map_mmio(opp); in set_base_addr()
1489 mutex_unlock(&opp->kvm->slots_lock); in set_base_addr()
1490 return 0; in set_base_addr()
1493 #define ATTR_SET 0
1496 static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type) in access_reg() argument
1501 return -ENXIO; in access_reg()
1503 spin_lock_irq(&opp->lock); in access_reg()
1506 ret = kvm_mpic_write_internal(opp, addr, *val); in access_reg()
1508 ret = kvm_mpic_read_internal(opp, addr, val); in access_reg()
1510 spin_unlock_irq(&opp->lock); in access_reg()
1519 struct openpic *opp = dev->private; in mpic_set_attr() local
1522 switch (attr->group) { in mpic_set_attr()
1524 switch (attr->attr) { in mpic_set_attr()
1526 return set_base_addr(opp, attr); in mpic_set_attr()
1532 if (get_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_set_attr()
1533 return -EFAULT; in mpic_set_attr()
1535 return access_reg(opp, attr->attr, &attr32, ATTR_SET); in mpic_set_attr()
1538 if (attr->attr > MAX_SRC) in mpic_set_attr()
1539 return -EINVAL; in mpic_set_attr()
1541 if (get_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_set_attr()
1542 return -EFAULT; in mpic_set_attr()
1544 if (attr32 != 0 && attr32 != 1) in mpic_set_attr()
1545 return -EINVAL; in mpic_set_attr()
1547 spin_lock_irq(&opp->lock); in mpic_set_attr()
1548 openpic_set_irq(opp, attr->attr, attr32); in mpic_set_attr()
1549 spin_unlock_irq(&opp->lock); in mpic_set_attr()
1550 return 0; in mpic_set_attr()
1553 return -ENXIO; in mpic_set_attr()
1558 struct openpic *opp = dev->private; in mpic_get_attr() local
1563 switch (attr->group) { in mpic_get_attr()
1565 switch (attr->attr) { in mpic_get_attr()
1567 mutex_lock(&opp->kvm->slots_lock); in mpic_get_attr()
1568 attr64 = opp->reg_base; in mpic_get_attr()
1569 mutex_unlock(&opp->kvm->slots_lock); in mpic_get_attr()
1571 if (copy_to_user((u64 __user *)(long)attr->addr, in mpic_get_attr()
1573 return -EFAULT; in mpic_get_attr()
1575 return 0; in mpic_get_attr()
1581 ret = access_reg(opp, attr->attr, &attr32, ATTR_GET); in mpic_get_attr()
1585 if (put_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_get_attr()
1586 return -EFAULT; in mpic_get_attr()
1588 return 0; in mpic_get_attr()
1591 if (attr->attr > MAX_SRC) in mpic_get_attr()
1592 return -EINVAL; in mpic_get_attr()
1594 spin_lock_irq(&opp->lock); in mpic_get_attr()
1595 attr32 = opp->src[attr->attr].pending; in mpic_get_attr()
1596 spin_unlock_irq(&opp->lock); in mpic_get_attr()
1598 if (put_user(attr32, (u32 __user *)(long)attr->addr)) in mpic_get_attr()
1599 return -EFAULT; in mpic_get_attr()
1601 return 0; in mpic_get_attr()
1604 return -ENXIO; in mpic_get_attr()
1609 switch (attr->group) { in mpic_has_attr()
1611 switch (attr->attr) { in mpic_has_attr()
1613 return 0; in mpic_has_attr()
1619 return 0; in mpic_has_attr()
1622 if (attr->attr > MAX_SRC) in mpic_has_attr()
1625 return 0; in mpic_has_attr()
1628 return -ENXIO; in mpic_has_attr()
1633 struct openpic *opp = dev->private; in mpic_destroy() local
1635 dev->kvm->arch.mpic = NULL; in mpic_destroy()
1636 kfree(opp); in mpic_destroy()
1640 static int mpic_set_default_irq_routing(struct openpic *opp) in mpic_set_default_irq_routing() argument
1647 return -ENOMEM; in mpic_set_default_irq_routing()
1649 kvm_set_irq_routing(opp->kvm, routing, 0, 0); in mpic_set_default_irq_routing()
1652 return 0; in mpic_set_default_irq_routing()
1657 struct openpic *opp; in mpic_create() local
1661 if (dev->kvm->arch.mpic) in mpic_create()
1662 return -EINVAL; in mpic_create()
1664 opp = kzalloc(sizeof(struct openpic), GFP_KERNEL); in mpic_create()
1665 if (!opp) in mpic_create()
1666 return -ENOMEM; in mpic_create()
1668 dev->private = opp; in mpic_create()
1669 opp->kvm = dev->kvm; in mpic_create()
1670 opp->dev = dev; in mpic_create()
1671 opp->model = type; in mpic_create()
1672 spin_lock_init(&opp->lock); in mpic_create()
1674 add_mmio_region(opp, &openpic_gbl_mmio); in mpic_create()
1675 add_mmio_region(opp, &openpic_tmr_mmio); in mpic_create()
1676 add_mmio_region(opp, &openpic_src_mmio); in mpic_create()
1677 add_mmio_region(opp, &openpic_cpu_mmio); in mpic_create()
1679 switch (opp->model) { in mpic_create()
1681 opp->fsl = &fsl_mpic_20; in mpic_create()
1682 opp->brr1 = 0x00400200; in mpic_create()
1683 opp->flags |= OPENPIC_FLAG_IDR_CRIT; in mpic_create()
1684 opp->nb_irqs = 80; in mpic_create()
1685 opp->mpic_mode_mask = GCR_MODE_MIXED; in mpic_create()
1687 fsl_common_init(opp); in mpic_create()
1692 opp->fsl = &fsl_mpic_42; in mpic_create()
1693 opp->brr1 = 0x00400402; in mpic_create()
1694 opp->flags |= OPENPIC_FLAG_ILR; in mpic_create()
1695 opp->nb_irqs = 196; in mpic_create()
1696 opp->mpic_mode_mask = GCR_MODE_PROXY; in mpic_create()
1698 fsl_common_init(opp); in mpic_create()
1703 ret = -ENODEV; in mpic_create()
1707 ret = mpic_set_default_irq_routing(opp); in mpic_create()
1711 openpic_reset(opp); in mpic_create()
1714 dev->kvm->arch.mpic = opp; in mpic_create()
1716 return 0; in mpic_create()
1719 kfree(opp); in mpic_create()
1724 .name = "kvm-mpic",
1735 struct openpic *opp = dev->private; in kvmppc_mpic_connect_vcpu() local
1736 int ret = 0; in kvmppc_mpic_connect_vcpu()
1738 if (dev->ops != &kvm_mpic_ops) in kvmppc_mpic_connect_vcpu()
1739 return -EPERM; in kvmppc_mpic_connect_vcpu()
1740 if (opp->kvm != vcpu->kvm) in kvmppc_mpic_connect_vcpu()
1741 return -EPERM; in kvmppc_mpic_connect_vcpu()
1742 if (cpu < 0 || cpu >= MAX_CPU) in kvmppc_mpic_connect_vcpu()
1743 return -EPERM; in kvmppc_mpic_connect_vcpu()
1745 spin_lock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1747 if (opp->dst[cpu].vcpu) { in kvmppc_mpic_connect_vcpu()
1748 ret = -EEXIST; in kvmppc_mpic_connect_vcpu()
1751 if (vcpu->arch.irq_type) { in kvmppc_mpic_connect_vcpu()
1752 ret = -EBUSY; in kvmppc_mpic_connect_vcpu()
1756 opp->dst[cpu].vcpu = vcpu; in kvmppc_mpic_connect_vcpu()
1757 opp->nb_cpus = max(opp->nb_cpus, cpu + 1); in kvmppc_mpic_connect_vcpu()
1759 vcpu->arch.mpic = opp; in kvmppc_mpic_connect_vcpu()
1760 vcpu->arch.irq_cpu_id = cpu; in kvmppc_mpic_connect_vcpu()
1761 vcpu->arch.irq_type = KVMPPC_IRQ_MPIC; in kvmppc_mpic_connect_vcpu()
1764 if (opp->mpic_mode_mask == GCR_MODE_PROXY) in kvmppc_mpic_connect_vcpu()
1765 vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL; in kvmppc_mpic_connect_vcpu()
1768 spin_unlock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1777 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu) in kvmppc_mpic_disconnect_vcpu() argument
1779 BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu); in kvmppc_mpic_disconnect_vcpu()
1781 opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; in kvmppc_mpic_disconnect_vcpu()
1786 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1787 * = 0 Interrupt was coalesced (previous irq is still pending)
1788 * > 0 Number of CPUs interrupt was delivered to
1794 u32 irq = e->irqchip.pin; in mpic_set_irq()
1795 struct openpic *opp = kvm->arch.mpic; in mpic_set_irq() local
1798 spin_lock_irqsave(&opp->lock, flags); in mpic_set_irq()
1799 openpic_set_irq(opp, irq, level); in mpic_set_irq()
1800 spin_unlock_irqrestore(&opp->lock, flags); in mpic_set_irq()
1803 return 0; in mpic_set_irq()
1809 struct openpic *opp = kvm->arch.mpic; in kvm_set_msi() local
1812 spin_lock_irqsave(&opp->lock, flags); in kvm_set_msi()
1818 openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data); in kvm_set_msi()
1819 spin_unlock_irqrestore(&opp->lock, flags); in kvm_set_msi()
1822 return 0; in kvm_set_msi()
1829 int r = -EINVAL; in kvm_set_routing_entry()
1831 switch (ue->type) { in kvm_set_routing_entry()
1833 e->set = mpic_set_irq; in kvm_set_routing_entry()
1834 e->irqchip.irqchip = ue->u.irqchip.irqchip; in kvm_set_routing_entry()
1835 e->irqchip.pin = ue->u.irqchip.pin; in kvm_set_routing_entry()
1836 if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) in kvm_set_routing_entry()
1840 e->set = kvm_set_msi; in kvm_set_routing_entry()
1841 e->msi.address_lo = ue->u.msi.address_lo; in kvm_set_routing_entry()
1842 e->msi.address_hi = ue->u.msi.address_hi; in kvm_set_routing_entry()
1843 e->msi.data = ue->u.msi.data; in kvm_set_routing_entry()
1849 r = 0; in kvm_set_routing_entry()