1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright 2017 NXP 3 4 /* INTMUX Block Diagram 5 * 6 * ________________ 7 * interrupt source # 0 +---->| | 8 * | | | 9 * interrupt source # 1 +++-->| | 10 * ... | | | channel # 0 |--------->interrupt out # 0 11 * ... | | | | 12 * ... | | | | 13 * interrupt source # X-1 +++-->|________________| 14 * | | | 15 * | | | 16 * | | | ________________ 17 * +---->| | 18 * | | | | | 19 * | +-->| | 20 * | | | | channel # 1 |--------->interrupt out # 1 21 * | | +>| | 22 * | | | | | 23 * | | | |________________| 24 * | | | 25 * | | | 26 * | | | ... 27 * | | | ... 28 * | | | 29 * | | | ________________ 30 * +---->| | 31 * | | | | 32 * +-->| | 33 * | | channel # N |--------->interrupt out # N 34 * +>| | 35 * | | 36 * |________________| 37 * 38 * 39 * N: Interrupt Channel Instance Number (N=7) 40 * X: Interrupt Source Number for each channel (X=32) 41 * 42 * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 43 * interrupt sources and generates 1 interrupt output. 44 * 45 */ 46 47 #include <linux/clk.h> 48 #include <linux/interrupt.h> 49 #include <linux/irq.h> 50 #include <linux/irqchip/chained_irq.h> 51 #include <linux/irqdomain.h> 52 #include <linux/kernel.h> 53 #include <linux/mod_devicetable.h> 54 #include <linux/of_irq.h> 55 #include <linux/platform_device.h> 56 #include <linux/spinlock.h> 57 #include <linux/pm_runtime.h> 58 59 #define CHANIER(n) (0x10 + (0x40 * n)) 60 #define CHANIPR(n) (0x20 + (0x40 * n)) 61 62 #define CHAN_MAX_NUM 0x8 63 64 struct intmux_irqchip_data { 65 u32 saved_reg; 66 int chanidx; 67 int irq; 68 struct irq_domain *domain; 69 }; 70 71 struct intmux_data { 72 raw_spinlock_t lock; 73 void __iomem *regs; 74 struct clk *ipg_clk; 75 int channum; 76 struct intmux_irqchip_data irqchip_data[] __counted_by(channum); 77 }; 78 79 static void imx_intmux_irq_mask(struct irq_data *d) 80 { 81 struct intmux_irqchip_data *irqchip_data = d->chip_data; 82 int idx = irqchip_data->chanidx; 83 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 84 irqchip_data[idx]); 85 unsigned long flags; 86 void __iomem *reg; 87 u32 val; 88 89 raw_spin_lock_irqsave(&data->lock, flags); 90 reg = data->regs + CHANIER(idx); 91 val = readl_relaxed(reg); 92 /* disable the interrupt source of this channel */ 93 val &= ~BIT(d->hwirq); 94 writel_relaxed(val, reg); 95 raw_spin_unlock_irqrestore(&data->lock, flags); 96 } 97 98 static void imx_intmux_irq_unmask(struct irq_data *d) 99 { 100 struct intmux_irqchip_data *irqchip_data = d->chip_data; 101 int idx = irqchip_data->chanidx; 102 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 103 irqchip_data[idx]); 104 unsigned long flags; 105 void __iomem *reg; 106 u32 val; 107 108 raw_spin_lock_irqsave(&data->lock, flags); 109 reg = data->regs + CHANIER(idx); 110 val = readl_relaxed(reg); 111 /* enable the interrupt source of this channel */ 112 val |= BIT(d->hwirq); 113 writel_relaxed(val, reg); 114 raw_spin_unlock_irqrestore(&data->lock, flags); 115 } 116 117 static struct irq_chip imx_intmux_irq_chip __ro_after_init = { 118 .name = "intmux", 119 .irq_mask = imx_intmux_irq_mask, 120 .irq_unmask = imx_intmux_irq_unmask, 121 }; 122 123 static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, 124 irq_hw_number_t hwirq) 125 { 126 struct intmux_irqchip_data *data = h->host_data; 127 128 irq_set_chip_data(irq, data); 129 irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); 130 131 return 0; 132 } 133 134 static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, 135 const u32 *intspec, unsigned int intsize, 136 unsigned long *out_hwirq, unsigned int *out_type) 137 { 138 struct intmux_irqchip_data *irqchip_data = d->host_data; 139 int idx = irqchip_data->chanidx; 140 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 141 irqchip_data[idx]); 142 143 /* 144 * two cells needed in interrupt specifier: 145 * the 1st cell: hw interrupt number 146 * the 2nd cell: channel index 147 */ 148 if (WARN_ON(intsize != 2)) 149 return -EINVAL; 150 151 if (WARN_ON(intspec[1] >= data->channum)) 152 return -EINVAL; 153 154 *out_hwirq = intspec[0]; 155 *out_type = IRQ_TYPE_LEVEL_HIGH; 156 157 return 0; 158 } 159 160 static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, 161 enum irq_domain_bus_token bus_token) 162 { 163 struct intmux_irqchip_data *irqchip_data = d->host_data; 164 165 /* Not for us */ 166 if (fwspec->fwnode != d->fwnode) 167 return false; 168 169 /* Handle pure domain searches */ 170 if (!fwspec->param_count) 171 return d->bus_token == bus_token; 172 173 return irqchip_data->chanidx == fwspec->param[1]; 174 } 175 176 static const struct irq_domain_ops imx_intmux_domain_ops = { 177 .map = imx_intmux_irq_map, 178 .xlate = imx_intmux_irq_xlate, 179 .select = imx_intmux_irq_select, 180 }; 181 182 static void imx_intmux_irq_handler(struct irq_desc *desc) 183 { 184 struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); 185 int idx = irqchip_data->chanidx; 186 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 187 irqchip_data[idx]); 188 unsigned long irqstat; 189 int pos; 190 191 chained_irq_enter(irq_desc_get_chip(desc), desc); 192 193 /* read the interrupt source pending status of this channel */ 194 irqstat = readl_relaxed(data->regs + CHANIPR(idx)); 195 196 for_each_set_bit(pos, &irqstat, 32) 197 generic_handle_domain_irq(irqchip_data->domain, pos); 198 199 chained_irq_exit(irq_desc_get_chip(desc), desc); 200 } 201 202 static int imx_intmux_probe(struct platform_device *pdev) 203 { 204 struct device_node *np = pdev->dev.of_node; 205 struct irq_domain *domain; 206 struct intmux_data *data; 207 int channum; 208 int i, ret; 209 210 channum = platform_irq_count(pdev); 211 if (channum == -EPROBE_DEFER) { 212 return -EPROBE_DEFER; 213 } else if (channum > CHAN_MAX_NUM) { 214 dev_err(&pdev->dev, "supports up to %d multiplex channels\n", 215 CHAN_MAX_NUM); 216 return -EINVAL; 217 } 218 219 data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL); 220 if (!data) 221 return -ENOMEM; 222 223 data->regs = devm_platform_ioremap_resource(pdev, 0); 224 if (IS_ERR(data->regs)) { 225 dev_err(&pdev->dev, "failed to initialize reg\n"); 226 return PTR_ERR(data->regs); 227 } 228 229 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); 230 if (IS_ERR(data->ipg_clk)) 231 return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk), 232 "failed to get ipg clk\n"); 233 234 data->channum = channum; 235 raw_spin_lock_init(&data->lock); 236 237 pm_runtime_get_noresume(&pdev->dev); 238 pm_runtime_set_active(&pdev->dev); 239 pm_runtime_enable(&pdev->dev); 240 241 ret = clk_prepare_enable(data->ipg_clk); 242 if (ret) { 243 dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); 244 return ret; 245 } 246 247 for (i = 0; i < channum; i++) { 248 data->irqchip_data[i].chanidx = i; 249 250 data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); 251 if (data->irqchip_data[i].irq <= 0) { 252 ret = -EINVAL; 253 dev_err(&pdev->dev, "failed to get irq\n"); 254 goto out; 255 } 256 257 domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, 258 &data->irqchip_data[i]); 259 if (!domain) { 260 ret = -ENOMEM; 261 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 262 goto out; 263 } 264 data->irqchip_data[i].domain = domain; 265 irq_domain_set_pm_device(domain, &pdev->dev); 266 267 /* disable all interrupt sources of this channel firstly */ 268 writel_relaxed(0, data->regs + CHANIER(i)); 269 270 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 271 imx_intmux_irq_handler, 272 &data->irqchip_data[i]); 273 } 274 275 platform_set_drvdata(pdev, data); 276 277 /* 278 * Let pm_runtime_put() disable clock. 279 * If CONFIG_PM is not enabled, the clock will stay powered. 280 */ 281 pm_runtime_put(&pdev->dev); 282 283 return 0; 284 out: 285 clk_disable_unprepare(data->ipg_clk); 286 return ret; 287 } 288 289 static void imx_intmux_remove(struct platform_device *pdev) 290 { 291 struct intmux_data *data = platform_get_drvdata(pdev); 292 int i; 293 294 for (i = 0; i < data->channum; i++) { 295 /* disable all interrupt sources of this channel */ 296 writel_relaxed(0, data->regs + CHANIER(i)); 297 298 irq_set_chained_handler_and_data(data->irqchip_data[i].irq, 299 NULL, NULL); 300 301 irq_domain_remove(data->irqchip_data[i].domain); 302 } 303 304 pm_runtime_disable(&pdev->dev); 305 } 306 307 #ifdef CONFIG_PM 308 static int imx_intmux_runtime_suspend(struct device *dev) 309 { 310 struct intmux_data *data = dev_get_drvdata(dev); 311 struct intmux_irqchip_data *irqchip_data; 312 int i; 313 314 for (i = 0; i < data->channum; i++) { 315 irqchip_data = &data->irqchip_data[i]; 316 irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i)); 317 } 318 319 clk_disable_unprepare(data->ipg_clk); 320 321 return 0; 322 } 323 324 static int imx_intmux_runtime_resume(struct device *dev) 325 { 326 struct intmux_data *data = dev_get_drvdata(dev); 327 struct intmux_irqchip_data *irqchip_data; 328 int ret, i; 329 330 ret = clk_prepare_enable(data->ipg_clk); 331 if (ret) { 332 dev_err(dev, "failed to enable ipg clk: %d\n", ret); 333 return ret; 334 } 335 336 for (i = 0; i < data->channum; i++) { 337 irqchip_data = &data->irqchip_data[i]; 338 writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i)); 339 } 340 341 return 0; 342 } 343 #endif 344 345 static const struct dev_pm_ops imx_intmux_pm_ops = { 346 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 347 pm_runtime_force_resume) 348 SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend, 349 imx_intmux_runtime_resume, NULL) 350 }; 351 352 static const struct of_device_id imx_intmux_id[] = { 353 { .compatible = "fsl,imx-intmux", }, 354 { /* sentinel */ }, 355 }; 356 357 static struct platform_driver imx_intmux_driver = { 358 .driver = { 359 .name = "imx-intmux", 360 .of_match_table = imx_intmux_id, 361 .pm = &imx_intmux_pm_ops, 362 }, 363 .probe = imx_intmux_probe, 364 .remove = imx_intmux_remove, 365 }; 366 builtin_platform_driver(imx_intmux_driver); 367