1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Freescale SCFG MSI(-X) support
4 *
5 * Copyright (C) 2016 Freescale Semiconductor.
6 *
7 * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/interrupt.h>
14 #include <linux/iommu.h>
15 #include <linux/irq.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqchip/irq-msi-lib.h>
18 #include <linux/irqdomain.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_pci.h>
21 #include <linux/platform_device.h>
22 #include <linux/property.h>
23 #include <linux/spinlock.h>
24
25 #define MSI_IRQS_PER_MSIR 32
26 #define MSI_MSIR_OFFSET 4
27
28 #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
29 #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
30
31 struct ls_scfg_msi_cfg {
32 u32 ibs_shift; /* Shift of interrupt bit select */
33 u32 msir_irqs; /* The irq number per MSIR */
34 u32 msir_base; /* The base address of MSIR */
35 };
36
37 struct ls_scfg_msir {
38 struct ls_scfg_msi *msi_data;
39 unsigned int index;
40 unsigned int gic_irq;
41 unsigned int bit_start;
42 unsigned int bit_end;
43 unsigned int srs; /* Shared interrupt register select */
44 void __iomem *reg;
45 };
46
47 struct ls_scfg_msi {
48 spinlock_t lock;
49 struct platform_device *pdev;
50 struct irq_domain *parent;
51 void __iomem *regs;
52 phys_addr_t msiir_addr;
53 struct ls_scfg_msi_cfg *cfg;
54 u32 msir_num;
55 struct ls_scfg_msir *msir;
56 u32 irqs_num;
57 unsigned long *used;
58 };
59
60 #define MPIC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
61 MSI_FLAG_USE_DEF_CHIP_OPS)
62 #define MPIC_MSI_FLAGS_SUPPORTED (MSI_FLAG_PCI_MSIX | \
63 MSI_GENERIC_FLAGS_MASK)
64
65 static const struct msi_parent_ops ls_scfg_msi_parent_ops = {
66 .required_flags = MPIC_MSI_FLAGS_REQUIRED,
67 .supported_flags = MPIC_MSI_FLAGS_SUPPORTED,
68 .bus_select_token = DOMAIN_BUS_NEXUS,
69 .bus_select_mask = MATCH_PCI_MSI,
70 .prefix = "MSI-",
71 .init_dev_msi_info = msi_lib_init_dev_msi_info,
72 };
73
74 static int msi_affinity_flag = 1;
75
early_parse_ls_scfg_msi(char * p)76 static int __init early_parse_ls_scfg_msi(char *p)
77 {
78 if (p && strncmp(p, "no-affinity", 11) == 0)
79 msi_affinity_flag = 0;
80 else
81 msi_affinity_flag = 1;
82
83 return 0;
84 }
85 early_param("lsmsi", early_parse_ls_scfg_msi);
86
ls_scfg_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)87 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
88 {
89 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
90
91 msg->data = data->hwirq;
92
93 if (msi_affinity_flag) {
94 const struct cpumask *mask;
95
96 mask = irq_data_get_effective_affinity_mask(data);
97 msg->data |= cpumask_first(mask);
98 }
99
100 msi_msg_set_addr(irq_data_get_msi_desc(data), msg,
101 msi_data->msiir_addr);
102 }
103
ls_scfg_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)104 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
105 const struct cpumask *mask, bool force)
106 {
107 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
108 u32 cpu;
109
110 if (!msi_affinity_flag)
111 return -EINVAL;
112
113 if (!force)
114 cpu = cpumask_any_and(mask, cpu_online_mask);
115 else
116 cpu = cpumask_first(mask);
117
118 if (cpu >= msi_data->msir_num)
119 return -EINVAL;
120
121 if (msi_data->msir[cpu].gic_irq <= 0) {
122 pr_warn("cannot bind the irq to cpu%d\n", cpu);
123 return -EINVAL;
124 }
125
126 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
127
128 return IRQ_SET_MASK_OK;
129 }
130
131 static struct irq_chip ls_scfg_msi_parent_chip = {
132 .name = "SCFG",
133 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
134 .irq_set_affinity = ls_scfg_msi_set_affinity,
135 };
136
ls_scfg_msi_domain_irq_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)137 static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
138 unsigned int virq,
139 unsigned int nr_irqs,
140 void *args)
141 {
142 msi_alloc_info_t *info = args;
143 struct ls_scfg_msi *msi_data = domain->host_data;
144 int pos, err = 0;
145
146 WARN_ON(nr_irqs != 1);
147
148 spin_lock(&msi_data->lock);
149 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
150 if (pos < msi_data->irqs_num)
151 __set_bit(pos, msi_data->used);
152 else
153 err = -ENOSPC;
154 spin_unlock(&msi_data->lock);
155
156 if (err)
157 return err;
158
159 err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
160 if (err)
161 return err;
162
163 irq_domain_set_info(domain, virq, pos,
164 &ls_scfg_msi_parent_chip, msi_data,
165 handle_simple_irq, NULL, NULL);
166
167 return 0;
168 }
169
ls_scfg_msi_domain_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)170 static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
171 unsigned int virq, unsigned int nr_irqs)
172 {
173 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
174 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
175 int pos;
176
177 pos = d->hwirq;
178 if (pos < 0 || pos >= msi_data->irqs_num) {
179 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
180 return;
181 }
182
183 spin_lock(&msi_data->lock);
184 __clear_bit(pos, msi_data->used);
185 spin_unlock(&msi_data->lock);
186 }
187
188 static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
189 .select = msi_lib_irq_domain_select,
190 .alloc = ls_scfg_msi_domain_irq_alloc,
191 .free = ls_scfg_msi_domain_irq_free,
192 };
193
ls_scfg_msi_irq_handler(struct irq_desc * desc)194 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
195 {
196 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
197 struct ls_scfg_msi *msi_data = msir->msi_data;
198 unsigned long val;
199 int pos, size, hwirq;
200
201 chained_irq_enter(irq_desc_get_chip(desc), desc);
202
203 val = ioread32be(msir->reg);
204
205 pos = msir->bit_start;
206 size = msir->bit_end + 1;
207
208 for_each_set_bit_from(pos, &val, size) {
209 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
210 msir->srs;
211 generic_handle_domain_irq(msi_data->parent, hwirq);
212 }
213
214 chained_irq_exit(irq_desc_get_chip(desc), desc);
215 }
216
ls_scfg_msi_domains_init(struct ls_scfg_msi * msi_data)217 static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
218 {
219 struct irq_domain_info info = {
220 .fwnode = of_fwnode_handle(msi_data->pdev->dev.of_node),
221 .ops = &ls_scfg_msi_domain_ops,
222 .host_data = msi_data,
223 .size = msi_data->irqs_num,
224 };
225
226 msi_data->parent = msi_create_parent_irq_domain(&info, &ls_scfg_msi_parent_ops);
227 if (!msi_data->parent) {
228 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
229 return -ENOMEM;
230 }
231
232 return 0;
233 }
234
ls_scfg_msi_setup_hwirq(struct ls_scfg_msi * msi_data,int index)235 static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
236 {
237 struct ls_scfg_msir *msir;
238 int virq, i, hwirq;
239
240 virq = platform_get_irq(msi_data->pdev, index);
241 if (virq <= 0)
242 return -ENODEV;
243
244 msir = &msi_data->msir[index];
245 msir->index = index;
246 msir->msi_data = msi_data;
247 msir->gic_irq = virq;
248 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
249
250 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
251 msir->bit_start = 32 - ((msir->index + 1) *
252 MSI_LS1043V1_1_IRQS_PER_MSIR);
253 msir->bit_end = msir->bit_start +
254 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
255 } else {
256 msir->bit_start = 0;
257 msir->bit_end = msi_data->cfg->msir_irqs - 1;
258 }
259
260 irq_set_chained_handler_and_data(msir->gic_irq,
261 ls_scfg_msi_irq_handler,
262 msir);
263
264 if (msi_affinity_flag) {
265 /* Associate MSIR interrupt to the cpu */
266 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
267 msir->srs = 0; /* This value is determined by the CPU */
268 } else
269 msir->srs = index;
270
271 /* Release the hwirqs corresponding to this MSIR */
272 if (!msi_affinity_flag || msir->index == 0) {
273 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
274 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
275 bitmap_clear(msi_data->used, hwirq, 1);
276 }
277 }
278
279 return 0;
280 }
281
ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir * msir)282 static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
283 {
284 struct ls_scfg_msi *msi_data = msir->msi_data;
285 int i, hwirq;
286
287 if (msir->gic_irq > 0)
288 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
289
290 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
291 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
292 bitmap_set(msi_data->used, hwirq, 1);
293 }
294
295 return 0;
296 }
297
298 static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
299 .ibs_shift = 3,
300 .msir_irqs = MSI_IRQS_PER_MSIR,
301 .msir_base = MSI_MSIR_OFFSET,
302 };
303
304 static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
305 .ibs_shift = 2,
306 .msir_irqs = MSI_IRQS_PER_MSIR,
307 .msir_base = MSI_MSIR_OFFSET,
308 };
309
310 static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
311 .ibs_shift = 2,
312 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
313 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
314 };
315
316 static const struct of_device_id ls_scfg_msi_id[] = {
317 /* The following two misspelled compatibles are obsolete */
318 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
319 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
320
321 { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
322 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
323 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
324 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
325 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
326 {},
327 };
328 MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
329
ls_scfg_msi_probe(struct platform_device * pdev)330 static int ls_scfg_msi_probe(struct platform_device *pdev)
331 {
332 struct ls_scfg_msi *msi_data;
333 struct resource *res;
334 int i, ret;
335
336 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
337 if (!msi_data)
338 return -ENOMEM;
339
340 msi_data->cfg = (struct ls_scfg_msi_cfg *)device_get_match_data(&pdev->dev);
341 if (!msi_data->cfg)
342 return -ENODEV;
343
344 msi_data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
345 if (IS_ERR(msi_data->regs)) {
346 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
347 return PTR_ERR(msi_data->regs);
348 }
349 msi_data->msiir_addr = res->start;
350
351 msi_data->pdev = pdev;
352 spin_lock_init(&msi_data->lock);
353
354 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
355 (1 << msi_data->cfg->ibs_shift);
356 msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
357 if (!msi_data->used)
358 return -ENOMEM;
359 /*
360 * Reserve all the hwirqs
361 * The available hwirqs will be released in ls1_msi_setup_hwirq()
362 */
363 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
364
365 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
366
367 if (msi_affinity_flag) {
368 u32 cpu_num;
369
370 cpu_num = num_possible_cpus();
371 if (msi_data->msir_num >= cpu_num)
372 msi_data->msir_num = cpu_num;
373 else
374 msi_affinity_flag = 0;
375 }
376
377 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
378 sizeof(*msi_data->msir),
379 GFP_KERNEL);
380 if (!msi_data->msir)
381 return -ENOMEM;
382
383 for (i = 0; i < msi_data->msir_num; i++)
384 ls_scfg_msi_setup_hwirq(msi_data, i);
385
386 ret = ls_scfg_msi_domains_init(msi_data);
387 if (ret)
388 return ret;
389
390 platform_set_drvdata(pdev, msi_data);
391
392 return 0;
393 }
394
ls_scfg_msi_remove(struct platform_device * pdev)395 static void ls_scfg_msi_remove(struct platform_device *pdev)
396 {
397 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
398 int i;
399
400 for (i = 0; i < msi_data->msir_num; i++)
401 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
402
403 irq_domain_remove(msi_data->parent);
404
405 platform_set_drvdata(pdev, NULL);
406 }
407
408 static struct platform_driver ls_scfg_msi_driver = {
409 .driver = {
410 .name = "ls-scfg-msi",
411 .of_match_table = ls_scfg_msi_id,
412 },
413 .probe = ls_scfg_msi_probe,
414 .remove = ls_scfg_msi_remove,
415 };
416
417 module_platform_driver(ls_scfg_msi_driver);
418
419 MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
420 MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
421