xref: /linux/drivers/irqchip/irq-imx-mu-msi.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Freescale MU used as MSI controller
4  *
5  * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
6  * Copyright 2022 NXP
7  *	Frank Li <Frank.Li@nxp.com>
8  *	Peng Fan <peng.fan@nxp.com>
9  *
10  * Based on drivers/mailbox/imx-mailbox.c
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/msi.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_platform.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/spinlock.h>
26 
27 #include "irq-msi-lib.h"
28 
29 #define IMX_MU_CHANS            4
30 
31 enum imx_mu_xcr {
32 	IMX_MU_GIER,
33 	IMX_MU_GCR,
34 	IMX_MU_TCR,
35 	IMX_MU_RCR,
36 	IMX_MU_xCR_MAX,
37 };
38 
39 enum imx_mu_xsr {
40 	IMX_MU_SR,
41 	IMX_MU_GSR,
42 	IMX_MU_TSR,
43 	IMX_MU_RSR,
44 	IMX_MU_xSR_MAX
45 };
46 
47 enum imx_mu_type {
48 	IMX_MU_V2 = BIT(1),
49 };
50 
51 /* Receive Interrupt Enable */
52 #define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
53 #define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
54 
55 struct imx_mu_dcfg {
56 	enum imx_mu_type type;
57 	u32     xTR;            /* Transmit Register0 */
58 	u32     xRR;            /* Receive Register0 */
59 	u32     xSR[IMX_MU_xSR_MAX];         /* Status Registers */
60 	u32     xCR[IMX_MU_xCR_MAX];         /* Control Registers */
61 };
62 
63 struct imx_mu_msi {
64 	raw_spinlock_t			lock;
65 	struct irq_domain		*msi_domain;
66 	void __iomem			*regs;
67 	phys_addr_t			msiir_addr;
68 	const struct imx_mu_dcfg	*cfg;
69 	unsigned long			used;
70 	struct clk			*clk;
71 };
72 
imx_mu_write(struct imx_mu_msi * msi_data,u32 val,u32 offs)73 static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs)
74 {
75 	iowrite32(val, msi_data->regs + offs);
76 }
77 
imx_mu_read(struct imx_mu_msi * msi_data,u32 offs)78 static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs)
79 {
80 	return ioread32(msi_data->regs + offs);
81 }
82 
imx_mu_xcr_rmw(struct imx_mu_msi * msi_data,enum imx_mu_xcr type,u32 set,u32 clr)83 static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr)
84 {
85 	unsigned long flags;
86 	u32 val;
87 
88 	raw_spin_lock_irqsave(&msi_data->lock, flags);
89 	val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]);
90 	val &= ~clr;
91 	val |= set;
92 	imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]);
93 	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
94 
95 	return val;
96 }
97 
imx_mu_msi_parent_mask_irq(struct irq_data * data)98 static void imx_mu_msi_parent_mask_irq(struct irq_data *data)
99 {
100 	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
101 
102 	imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq));
103 }
104 
imx_mu_msi_parent_unmask_irq(struct irq_data * data)105 static void imx_mu_msi_parent_unmask_irq(struct irq_data *data)
106 {
107 	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
108 
109 	imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0);
110 }
111 
imx_mu_msi_parent_ack_irq(struct irq_data * data)112 static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
113 {
114 	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
115 
116 	imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
117 }
118 
imx_mu_msi_parent_compose_msg(struct irq_data * data,struct msi_msg * msg)119 static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
120 					  struct msi_msg *msg)
121 {
122 	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
123 	u64 addr = msi_data->msiir_addr + 4 * data->hwirq;
124 
125 	msg->address_hi = upper_32_bits(addr);
126 	msg->address_lo = lower_32_bits(addr);
127 	msg->data = data->hwirq;
128 }
129 
imx_mu_msi_parent_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)130 static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data,
131 				   const struct cpumask *mask, bool force)
132 {
133 	return -EINVAL;
134 }
135 
136 static struct irq_chip imx_mu_msi_parent_chip = {
137 	.name		= "MU",
138 	.irq_mask	= imx_mu_msi_parent_mask_irq,
139 	.irq_unmask	= imx_mu_msi_parent_unmask_irq,
140 	.irq_ack	= imx_mu_msi_parent_ack_irq,
141 	.irq_compose_msi_msg	= imx_mu_msi_parent_compose_msg,
142 	.irq_set_affinity = imx_mu_msi_parent_set_affinity,
143 };
144 
imx_mu_msi_domain_irq_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)145 static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain,
146 					unsigned int virq,
147 					unsigned int nr_irqs,
148 					void *args)
149 {
150 	struct imx_mu_msi *msi_data = domain->host_data;
151 	unsigned long flags;
152 	int pos, err = 0;
153 
154 	WARN_ON(nr_irqs != 1);
155 
156 	raw_spin_lock_irqsave(&msi_data->lock, flags);
157 	pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS);
158 	if (pos < IMX_MU_CHANS)
159 		__set_bit(pos, &msi_data->used);
160 	else
161 		err = -ENOSPC;
162 	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
163 
164 	if (err)
165 		return err;
166 
167 	irq_domain_set_info(domain, virq, pos,
168 			    &imx_mu_msi_parent_chip, msi_data,
169 			    handle_edge_irq, NULL, NULL);
170 	return 0;
171 }
172 
imx_mu_msi_domain_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)173 static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
174 				       unsigned int virq, unsigned int nr_irqs)
175 {
176 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
177 	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d);
178 	unsigned long flags;
179 
180 	raw_spin_lock_irqsave(&msi_data->lock, flags);
181 	__clear_bit(d->hwirq, &msi_data->used);
182 	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
183 }
184 
185 static const struct irq_domain_ops imx_mu_msi_domain_ops = {
186 	.select	= msi_lib_irq_domain_select,
187 	.alloc	= imx_mu_msi_domain_irq_alloc,
188 	.free	= imx_mu_msi_domain_irq_free,
189 };
190 
imx_mu_msi_irq_handler(struct irq_desc * desc)191 static void imx_mu_msi_irq_handler(struct irq_desc *desc)
192 {
193 	struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc);
194 	struct irq_chip *chip = irq_desc_get_chip(desc);
195 	u32 status;
196 	int i;
197 
198 	status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]);
199 
200 	chained_irq_enter(chip, desc);
201 	for (i = 0; i < IMX_MU_CHANS; i++) {
202 		if (status & IMX_MU_xSR_RFn(msi_data, i))
203 			generic_handle_domain_irq(msi_data->msi_domain, i);
204 	}
205 	chained_irq_exit(chip, desc);
206 }
207 
208 #define IMX_MU_MSI_FLAGS_REQUIRED	(MSI_FLAG_USE_DEF_DOM_OPS |	\
209 					 MSI_FLAG_USE_DEF_CHIP_OPS |	\
210 					 MSI_FLAG_PARENT_PM_DEV)
211 
212 #define IMX_MU_MSI_FLAGS_SUPPORTED	(MSI_GENERIC_FLAGS_MASK)
213 
214 static const struct msi_parent_ops imx_mu_msi_parent_ops = {
215 	.supported_flags	= IMX_MU_MSI_FLAGS_SUPPORTED,
216 	.required_flags		= IMX_MU_MSI_FLAGS_REQUIRED,
217 	.bus_select_token       = DOMAIN_BUS_NEXUS,
218 	.bus_select_mask	= MATCH_PLATFORM_MSI,
219 	.prefix			= "MU-MSI-",
220 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
221 };
222 
imx_mu_msi_domains_init(struct imx_mu_msi * msi_data,struct device * dev)223 static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
224 {
225 	struct fwnode_handle *fwnodes = dev_fwnode(dev);
226 	struct irq_domain *parent;
227 
228 	/* Initialize MSI domain parent */
229 	parent = irq_domain_create_linear(fwnodes, IMX_MU_CHANS,
230 					  &imx_mu_msi_domain_ops, msi_data);
231 	if (!parent) {
232 		dev_err(dev, "failed to create IRQ domain\n");
233 		return -ENOMEM;
234 	}
235 
236 	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
237 	parent->dev = parent->pm_dev = dev;
238 	parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
239 	parent->msi_parent_ops = &imx_mu_msi_parent_ops;
240 	return 0;
241 }
242 
243 /* Register offset of different version MU IP */
244 static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
245 	.type	= 0,
246 	.xTR    = 0x0,
247 	.xRR    = 0x10,
248 	.xSR    = {
249 			[IMX_MU_SR]  = 0x20,
250 			[IMX_MU_GSR] = 0x20,
251 			[IMX_MU_TSR] = 0x20,
252 			[IMX_MU_RSR] = 0x20,
253 		  },
254 	.xCR    = {
255 			[IMX_MU_GIER] = 0x24,
256 			[IMX_MU_GCR]  = 0x24,
257 			[IMX_MU_TCR]  = 0x24,
258 			[IMX_MU_RCR]  = 0x24,
259 		  },
260 };
261 
262 static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
263 	.type	= 0,
264 	.xTR    = 0x20,
265 	.xRR    = 0x40,
266 	.xSR    = {
267 			[IMX_MU_SR]  = 0x60,
268 			[IMX_MU_GSR] = 0x60,
269 			[IMX_MU_TSR] = 0x60,
270 			[IMX_MU_RSR] = 0x60,
271 		  },
272 	.xCR    = {
273 			[IMX_MU_GIER] = 0x64,
274 			[IMX_MU_GCR]  = 0x64,
275 			[IMX_MU_TCR]  = 0x64,
276 			[IMX_MU_RCR]  = 0x64,
277 		  },
278 };
279 
280 static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
281 	.type   = IMX_MU_V2,
282 	.xTR    = 0x200,
283 	.xRR    = 0x280,
284 	.xSR    = {
285 			[IMX_MU_SR]  = 0xC,
286 			[IMX_MU_GSR] = 0x118,
287 			[IMX_MU_TSR] = 0x124,
288 			[IMX_MU_RSR] = 0x12C,
289 		  },
290 	.xCR    = {
291 			[IMX_MU_GIER] = 0x110,
292 			[IMX_MU_GCR]  = 0x114,
293 			[IMX_MU_TCR]  = 0x120,
294 			[IMX_MU_RCR]  = 0x128
295 		  },
296 };
297 
imx_mu_of_init(struct device_node * dn,struct device_node * parent,const struct imx_mu_dcfg * cfg)298 static int __init imx_mu_of_init(struct device_node *dn,
299 				 struct device_node *parent,
300 				 const struct imx_mu_dcfg *cfg)
301 {
302 	struct platform_device *pdev = of_find_device_by_node(dn);
303 	struct device_link *pd_link_a;
304 	struct device_link *pd_link_b;
305 	struct imx_mu_msi *msi_data;
306 	struct resource *res;
307 	struct device *pd_a;
308 	struct device *pd_b;
309 	struct device *dev;
310 	int ret;
311 	int irq;
312 
313 	dev = &pdev->dev;
314 
315 	msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
316 	if (!msi_data)
317 		return -ENOMEM;
318 
319 	msi_data->cfg = cfg;
320 
321 	msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side");
322 	if (IS_ERR(msi_data->regs)) {
323 		dev_err(&pdev->dev, "failed to initialize 'regs'\n");
324 		return PTR_ERR(msi_data->regs);
325 	}
326 
327 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side");
328 	if (!res)
329 		return -EIO;
330 
331 	msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
332 
333 	irq = platform_get_irq(pdev, 0);
334 	if (irq < 0)
335 		return irq;
336 
337 	platform_set_drvdata(pdev, msi_data);
338 
339 	msi_data->clk = devm_clk_get(dev, NULL);
340 	if (IS_ERR(msi_data->clk))
341 		return PTR_ERR(msi_data->clk);
342 
343 	pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side");
344 	if (IS_ERR(pd_a))
345 		return PTR_ERR(pd_a);
346 
347 	pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side");
348 	if (IS_ERR(pd_b))
349 		return PTR_ERR(pd_b);
350 
351 	pd_link_a = device_link_add(dev, pd_a,
352 			DL_FLAG_STATELESS |
353 			DL_FLAG_PM_RUNTIME |
354 			DL_FLAG_RPM_ACTIVE);
355 
356 	if (!pd_link_a) {
357 		dev_err(dev, "Failed to add device_link to mu a.\n");
358 		goto err_pd_a;
359 	}
360 
361 	pd_link_b = device_link_add(dev, pd_b,
362 			DL_FLAG_STATELESS |
363 			DL_FLAG_PM_RUNTIME |
364 			DL_FLAG_RPM_ACTIVE);
365 
366 
367 	if (!pd_link_b) {
368 		dev_err(dev, "Failed to add device_link to mu a.\n");
369 		goto err_pd_b;
370 	}
371 
372 	ret = imx_mu_msi_domains_init(msi_data, dev);
373 	if (ret)
374 		goto err_dm_init;
375 
376 	pm_runtime_enable(dev);
377 
378 	irq_set_chained_handler_and_data(irq,
379 					 imx_mu_msi_irq_handler,
380 					 msi_data);
381 
382 	return 0;
383 
384 err_dm_init:
385 	device_link_remove(dev,	pd_b);
386 err_pd_b:
387 	device_link_remove(dev, pd_a);
388 err_pd_a:
389 	return -EINVAL;
390 }
391 
imx_mu_runtime_suspend(struct device * dev)392 static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
393 {
394 	struct imx_mu_msi *priv = dev_get_drvdata(dev);
395 
396 	clk_disable_unprepare(priv->clk);
397 
398 	return 0;
399 }
400 
imx_mu_runtime_resume(struct device * dev)401 static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
402 {
403 	struct imx_mu_msi *priv = dev_get_drvdata(dev);
404 	int ret;
405 
406 	ret = clk_prepare_enable(priv->clk);
407 	if (ret)
408 		dev_err(dev, "failed to enable clock\n");
409 
410 	return ret;
411 }
412 
413 static const struct dev_pm_ops imx_mu_pm_ops = {
414 	SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
415 			   imx_mu_runtime_resume, NULL)
416 };
417 
imx_mu_imx7ulp_of_init(struct device_node * dn,struct device_node * parent)418 static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
419 					 struct device_node *parent)
420 {
421 	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
422 }
423 
imx_mu_imx6sx_of_init(struct device_node * dn,struct device_node * parent)424 static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
425 					struct device_node *parent)
426 {
427 	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
428 }
429 
imx_mu_imx8ulp_of_init(struct device_node * dn,struct device_node * parent)430 static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
431 					 struct device_node *parent)
432 {
433 	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
434 }
435 
436 IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
437 IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
438 IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
439 IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
440 IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
441 
442 
443 MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
444 MODULE_DESCRIPTION("Freescale MU MSI controller driver");
445 MODULE_LICENSE("GPL");
446