1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DEC I/O ASIC interrupts.
4 *
5 * Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
6 */
7
8 #include <linux/init.h>
9 #include <linux/irq.h>
10 #include <linux/types.h>
11
12 #include <asm/dec/ioasic.h>
13 #include <asm/dec/ioasic_addrs.h>
14 #include <asm/dec/ioasic_ints.h>
15
16 static int ioasic_irq_base;
17
unmask_ioasic_irq(struct irq_data * d)18 static void unmask_ioasic_irq(struct irq_data *d)
19 {
20 u32 simr;
21
22 simr = ioasic_read(IO_REG_SIMR);
23 simr |= (1 << (d->irq - ioasic_irq_base));
24 ioasic_write(IO_REG_SIMR, simr);
25 }
26
mask_ioasic_irq(struct irq_data * d)27 static void mask_ioasic_irq(struct irq_data *d)
28 {
29 u32 simr;
30
31 simr = ioasic_read(IO_REG_SIMR);
32 simr &= ~(1 << (d->irq - ioasic_irq_base));
33 ioasic_write(IO_REG_SIMR, simr);
34 }
35
ack_ioasic_irq(struct irq_data * d)36 static void ack_ioasic_irq(struct irq_data *d)
37 {
38 mask_ioasic_irq(d);
39 fast_iob();
40 }
41
42 static struct irq_chip ioasic_irq_type = {
43 .name = "IO-ASIC",
44 .irq_ack = ack_ioasic_irq,
45 .irq_mask = mask_ioasic_irq,
46 .irq_mask_ack = ack_ioasic_irq,
47 .irq_unmask = unmask_ioasic_irq,
48 };
49
clear_ioasic_dma_irq(struct irq_data * d)50 static void clear_ioasic_dma_irq(struct irq_data *d)
51 {
52 u32 sir;
53
54 sir = ~(1 << (d->irq - ioasic_irq_base));
55 ioasic_write(IO_REG_SIR, sir);
56 fast_iob();
57 }
58
59 static struct irq_chip ioasic_dma_irq_type = {
60 .name = "IO-ASIC-DMA",
61 .irq_ack = clear_ioasic_dma_irq,
62 .irq_mask = mask_ioasic_irq,
63 .irq_unmask = unmask_ioasic_irq,
64 .irq_eoi = clear_ioasic_dma_irq,
65 };
66
67 /*
68 * I/O ASIC implements two kinds of DMA interrupts, informational and
69 * error interrupts.
70 *
71 * The former do not stop DMA and should be cleared as soon as possible
72 * so that if they retrigger before the handler has completed, usually as
73 * a side effect of actions taken by the handler, then they are reissued.
74 * These use the `handle_edge_irq' handler that clears the request right
75 * away.
76 *
77 * The latter stop DMA and do not resume it until the interrupt has been
78 * cleared. This cannot be done until after a corrective action has been
79 * taken and this also means they will not retrigger. Therefore they use
80 * the `handle_fasteoi_irq' handler that only clears the request on the
81 * way out. Because MIPS processor interrupt inputs, one of which the I/O
82 * ASIC is cascaded to, are level-triggered it is recommended that error
83 * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
84 * set so that they are run with the interrupt line masked.
85 *
86 * This mask has `1' bits in the positions of informational interrupts.
87 */
88 #define IO_IRQ_DMA_INFO \
89 (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
90 IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
91 IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
92 IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
93 IO_IRQ_MASK(IO_INR_ASC_DMA))
94
init_ioasic_irqs(int base)95 void __init init_ioasic_irqs(int base)
96 {
97 int i;
98
99 /* Mask interrupts. */
100 ioasic_write(IO_REG_SIMR, 0);
101 fast_iob();
102
103 for (i = base; i < base + IO_INR_DMA; i++)
104 irq_set_chip_and_handler(i, &ioasic_irq_type,
105 handle_level_irq);
106 for (; i < base + IO_IRQ_LINES; i++)
107 irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
108 1 << (i - base) & IO_IRQ_DMA_INFO ?
109 handle_edge_irq : handle_fasteoi_irq);
110
111 ioasic_irq_base = base;
112 }
113