1 /*
2 * Copyright (C) 2016 Marvell
3 *
4 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #define pr_fmt(fmt) "GIC-ODMI: " fmt
12
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/msi.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20
21 #include "irq-msi-lib.h"
22
23 #include <dt-bindings/interrupt-controller/arm-gic.h>
24
25 #define GICP_ODMIN_SET 0x40
26 #define GICP_ODMI_INT_NUM_SHIFT 12
27 #define GICP_ODMIN_GM_EP_R0 0x110
28 #define GICP_ODMIN_GM_EP_R1 0x114
29 #define GICP_ODMIN_GM_EA_R0 0x108
30 #define GICP_ODMIN_GM_EA_R1 0x118
31
32 /*
33 * We don't support the group events, so we simply have 8 interrupts
34 * per frame.
35 */
36 #define NODMIS_SHIFT 3
37 #define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
38 #define NODMIS_MASK (NODMIS_PER_FRAME - 1)
39
40 struct odmi_data {
41 struct resource res;
42 void __iomem *base;
43 unsigned int spi_base;
44 };
45
46 static struct odmi_data *odmis;
47 static unsigned long *odmis_bm;
48 static unsigned int odmis_count;
49
50 /* Protects odmis_bm */
51 static DEFINE_SPINLOCK(odmis_bm_lock);
52
odmi_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)53 static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
54 {
55 struct odmi_data *odmi;
56 phys_addr_t addr;
57 unsigned int odmin;
58
59 if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
60 return;
61
62 odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
63 odmin = d->hwirq & NODMIS_MASK;
64
65 addr = odmi->res.start + GICP_ODMIN_SET;
66
67 msg->address_hi = upper_32_bits(addr);
68 msg->address_lo = lower_32_bits(addr);
69 msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
70 }
71
72 static struct irq_chip odmi_irq_chip = {
73 .name = "ODMI",
74 .irq_mask = irq_chip_mask_parent,
75 .irq_unmask = irq_chip_unmask_parent,
76 .irq_eoi = irq_chip_eoi_parent,
77 .irq_set_affinity = irq_chip_set_affinity_parent,
78 .irq_compose_msi_msg = odmi_compose_msi_msg,
79 };
80
odmi_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)81 static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
82 unsigned int nr_irqs, void *args)
83 {
84 struct odmi_data *odmi = NULL;
85 struct irq_fwspec fwspec;
86 struct irq_data *d;
87 unsigned int hwirq, odmin;
88 int ret;
89
90 spin_lock(&odmis_bm_lock);
91 hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
92 if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
93 spin_unlock(&odmis_bm_lock);
94 return -ENOSPC;
95 }
96
97 __set_bit(hwirq, odmis_bm);
98 spin_unlock(&odmis_bm_lock);
99
100 odmi = &odmis[hwirq >> NODMIS_SHIFT];
101 odmin = hwirq & NODMIS_MASK;
102
103 fwspec.fwnode = domain->parent->fwnode;
104 fwspec.param_count = 3;
105 fwspec.param[0] = GIC_SPI;
106 fwspec.param[1] = odmi->spi_base - 32 + odmin;
107 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
108
109 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
110 if (ret) {
111 pr_err("Cannot allocate parent IRQ\n");
112 spin_lock(&odmis_bm_lock);
113 __clear_bit(odmin, odmis_bm);
114 spin_unlock(&odmis_bm_lock);
115 return ret;
116 }
117
118 /* Configure the interrupt line to be edge */
119 d = irq_domain_get_irq_data(domain->parent, virq);
120 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
121
122 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
123 &odmi_irq_chip, NULL);
124
125 return 0;
126 }
127
odmi_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)128 static void odmi_irq_domain_free(struct irq_domain *domain,
129 unsigned int virq, unsigned int nr_irqs)
130 {
131 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
132
133 if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
134 pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
135 return;
136 }
137
138 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
139
140 /* Actually free the MSI */
141 spin_lock(&odmis_bm_lock);
142 __clear_bit(d->hwirq, odmis_bm);
143 spin_unlock(&odmis_bm_lock);
144 }
145
146 static const struct irq_domain_ops odmi_domain_ops = {
147 .select = msi_lib_irq_domain_select,
148 .alloc = odmi_irq_domain_alloc,
149 .free = odmi_irq_domain_free,
150 };
151
152 #define ODMI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
153 MSI_FLAG_USE_DEF_CHIP_OPS)
154
155 #define ODMI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
156
157 static const struct msi_parent_ops odmi_msi_parent_ops = {
158 .supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
159 .required_flags = ODMI_MSI_FLAGS_REQUIRED,
160 .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
161 .bus_select_mask = MATCH_PLATFORM_MSI,
162 .prefix = "ODMI-",
163 .init_dev_msi_info = msi_lib_init_dev_msi_info,
164 };
165
mvebu_odmi_init(struct device_node * node,struct device_node * parent)166 static int __init mvebu_odmi_init(struct device_node *node,
167 struct device_node *parent)
168 {
169 struct irq_domain *parent_domain, *inner_domain;
170 int ret, i;
171
172 if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
173 return -EINVAL;
174
175 odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
176 if (!odmis)
177 return -ENOMEM;
178
179 odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL);
180 if (!odmis_bm) {
181 ret = -ENOMEM;
182 goto err_alloc;
183 }
184
185 for (i = 0; i < odmis_count; i++) {
186 struct odmi_data *odmi = &odmis[i];
187
188 ret = of_address_to_resource(node, i, &odmi->res);
189 if (ret)
190 goto err_unmap;
191
192 odmi->base = of_io_request_and_map(node, i, "odmi");
193 if (IS_ERR(odmi->base)) {
194 ret = PTR_ERR(odmi->base);
195 goto err_unmap;
196 }
197
198 if (of_property_read_u32_index(node, "marvell,spi-base",
199 i, &odmi->spi_base)) {
200 ret = -EINVAL;
201 goto err_unmap;
202 }
203 }
204
205 parent_domain = irq_find_host(parent);
206
207 inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
208 odmis_count * NODMIS_PER_FRAME,
209 of_node_to_fwnode(node),
210 &odmi_domain_ops, NULL);
211 if (!inner_domain) {
212 ret = -ENOMEM;
213 goto err_unmap;
214 }
215
216 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
217 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
218 inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
219
220 return 0;
221
222 err_unmap:
223 for (i = 0; i < odmis_count; i++) {
224 struct odmi_data *odmi = &odmis[i];
225
226 if (odmi->base && !IS_ERR(odmi->base))
227 iounmap(odmis[i].base);
228 }
229 bitmap_free(odmis_bm);
230 err_alloc:
231 kfree(odmis);
232 return ret;
233 }
234
235 IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
236