1 /* 2 * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver 3 * 4 * Copyright (C) 2004 SAN People 5 * Copyright (C) 2004 ATMEL 6 * Copyright (C) Rick Bronson 7 * Copyright (C) 2014 Free Electrons 8 * 9 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/mm.h> 19 #include <linux/bitmap.h> 20 #include <linux/types.h> 21 #include <linux/irq.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/irqdomain.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <linux/io.h> 29 30 #include <asm/exception.h> 31 #include <asm/mach/irq.h> 32 33 #include "irq-atmel-aic-common.h" 34 #include "irqchip.h" 35 36 /* Number of irq lines managed by AIC */ 37 #define NR_AIC5_IRQS 128 38 39 #define AT91_AIC5_SSR 0x0 40 #define AT91_AIC5_INTSEL_MSK (0x7f << 0) 41 42 #define AT91_AIC5_SMR 0x4 43 44 #define AT91_AIC5_SVR 0x8 45 #define AT91_AIC5_IVR 0x10 46 #define AT91_AIC5_FVR 0x14 47 #define AT91_AIC5_ISR 0x18 48 49 #define AT91_AIC5_IPR0 0x20 50 #define AT91_AIC5_IPR1 0x24 51 #define AT91_AIC5_IPR2 0x28 52 #define AT91_AIC5_IPR3 0x2c 53 #define AT91_AIC5_IMR 0x30 54 #define AT91_AIC5_CISR 0x34 55 56 #define AT91_AIC5_IECR 0x40 57 #define AT91_AIC5_IDCR 0x44 58 #define AT91_AIC5_ICCR 0x48 59 #define AT91_AIC5_ISCR 0x4c 60 #define AT91_AIC5_EOICR 0x38 61 #define AT91_AIC5_SPU 0x3c 62 #define AT91_AIC5_DCR 0x6c 63 64 #define AT91_AIC5_FFER 0x50 65 #define AT91_AIC5_FFDR 0x54 66 #define AT91_AIC5_FFSR 0x58 67 68 static struct irq_domain *aic5_domain; 69 70 static asmlinkage void __exception_irq_entry 71 aic5_handle(struct pt_regs *regs) 72 { 73 struct irq_domain_chip_generic *dgc = aic5_domain->gc; 74 struct irq_chip_generic *gc = dgc->gc[0]; 75 u32 irqnr; 76 u32 irqstat; 77 78 irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); 79 irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); 80 81 if (!irqstat) 82 irq_reg_writel(gc, 0, AT91_AIC5_EOICR); 83 else 84 handle_domain_irq(aic5_domain, irqnr, regs); 85 } 86 87 static void aic5_mask(struct irq_data *d) 88 { 89 struct irq_domain *domain = d->domain; 90 struct irq_domain_chip_generic *dgc = domain->gc; 91 struct irq_chip_generic *gc = dgc->gc[0]; 92 93 /* Disable interrupt on AIC5 */ 94 irq_gc_lock(gc); 95 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 96 irq_reg_writel(gc, 1, AT91_AIC5_IDCR); 97 gc->mask_cache &= ~d->mask; 98 irq_gc_unlock(gc); 99 } 100 101 static void aic5_unmask(struct irq_data *d) 102 { 103 struct irq_domain *domain = d->domain; 104 struct irq_domain_chip_generic *dgc = domain->gc; 105 struct irq_chip_generic *gc = dgc->gc[0]; 106 107 /* Enable interrupt on AIC5 */ 108 irq_gc_lock(gc); 109 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 110 irq_reg_writel(gc, 1, AT91_AIC5_IECR); 111 gc->mask_cache |= d->mask; 112 irq_gc_unlock(gc); 113 } 114 115 static int aic5_retrigger(struct irq_data *d) 116 { 117 struct irq_domain *domain = d->domain; 118 struct irq_domain_chip_generic *dgc = domain->gc; 119 struct irq_chip_generic *gc = dgc->gc[0]; 120 121 /* Enable interrupt on AIC5 */ 122 irq_gc_lock(gc); 123 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 124 irq_reg_writel(gc, 1, AT91_AIC5_ISCR); 125 irq_gc_unlock(gc); 126 127 return 0; 128 } 129 130 static int aic5_set_type(struct irq_data *d, unsigned type) 131 { 132 struct irq_domain *domain = d->domain; 133 struct irq_domain_chip_generic *dgc = domain->gc; 134 struct irq_chip_generic *gc = dgc->gc[0]; 135 unsigned int smr; 136 int ret; 137 138 irq_gc_lock(gc); 139 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 140 smr = irq_reg_readl(gc, AT91_AIC5_SMR); 141 ret = aic_common_set_type(d, type, &smr); 142 if (!ret) 143 irq_reg_writel(gc, smr, AT91_AIC5_SMR); 144 irq_gc_unlock(gc); 145 146 return ret; 147 } 148 149 #ifdef CONFIG_PM 150 static void aic5_suspend(struct irq_data *d) 151 { 152 struct irq_domain *domain = d->domain; 153 struct irq_domain_chip_generic *dgc = domain->gc; 154 struct irq_chip_generic *bgc = dgc->gc[0]; 155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 156 int i; 157 u32 mask; 158 159 irq_gc_lock(bgc); 160 for (i = 0; i < dgc->irqs_per_chip; i++) { 161 mask = 1 << i; 162 if ((mask & gc->mask_cache) == (mask & gc->wake_active)) 163 continue; 164 165 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); 166 if (mask & gc->wake_active) 167 irq_reg_writel(bgc, 1, AT91_AIC5_IECR); 168 else 169 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 170 } 171 irq_gc_unlock(bgc); 172 } 173 174 static void aic5_resume(struct irq_data *d) 175 { 176 struct irq_domain *domain = d->domain; 177 struct irq_domain_chip_generic *dgc = domain->gc; 178 struct irq_chip_generic *bgc = dgc->gc[0]; 179 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 180 int i; 181 u32 mask; 182 183 irq_gc_lock(bgc); 184 for (i = 0; i < dgc->irqs_per_chip; i++) { 185 mask = 1 << i; 186 if ((mask & gc->mask_cache) == (mask & gc->wake_active)) 187 continue; 188 189 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); 190 if (mask & gc->mask_cache) 191 irq_reg_writel(bgc, 1, AT91_AIC5_IECR); 192 else 193 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 194 } 195 irq_gc_unlock(bgc); 196 } 197 198 static void aic5_pm_shutdown(struct irq_data *d) 199 { 200 struct irq_domain *domain = d->domain; 201 struct irq_domain_chip_generic *dgc = domain->gc; 202 struct irq_chip_generic *bgc = dgc->gc[0]; 203 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 204 int i; 205 206 irq_gc_lock(bgc); 207 for (i = 0; i < dgc->irqs_per_chip; i++) { 208 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); 209 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 210 irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); 211 } 212 irq_gc_unlock(bgc); 213 } 214 #else 215 #define aic5_suspend NULL 216 #define aic5_resume NULL 217 #define aic5_pm_shutdown NULL 218 #endif /* CONFIG_PM */ 219 220 static void __init aic5_hw_init(struct irq_domain *domain) 221 { 222 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); 223 int i; 224 225 /* 226 * Perform 8 End Of Interrupt Command to make sure AIC 227 * will not Lock out nIRQ 228 */ 229 for (i = 0; i < 8; i++) 230 irq_reg_writel(gc, 0, AT91_AIC5_EOICR); 231 232 /* 233 * Spurious Interrupt ID in Spurious Vector Register. 234 * When there is no current interrupt, the IRQ Vector Register 235 * reads the value stored in AIC_SPU 236 */ 237 irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU); 238 239 /* No debugging in AIC: Debug (Protect) Control Register */ 240 irq_reg_writel(gc, 0, AT91_AIC5_DCR); 241 242 /* Disable and clear all interrupts initially */ 243 for (i = 0; i < domain->revmap_size; i++) { 244 irq_reg_writel(gc, i, AT91_AIC5_SSR); 245 irq_reg_writel(gc, i, AT91_AIC5_SVR); 246 irq_reg_writel(gc, 1, AT91_AIC5_IDCR); 247 irq_reg_writel(gc, 1, AT91_AIC5_ICCR); 248 } 249 } 250 251 static int aic5_irq_domain_xlate(struct irq_domain *d, 252 struct device_node *ctrlr, 253 const u32 *intspec, unsigned int intsize, 254 irq_hw_number_t *out_hwirq, 255 unsigned int *out_type) 256 { 257 struct irq_domain_chip_generic *dgc = d->gc; 258 struct irq_chip_generic *gc; 259 unsigned smr; 260 int ret; 261 262 if (!dgc) 263 return -EINVAL; 264 265 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, 266 out_hwirq, out_type); 267 if (ret) 268 return ret; 269 270 gc = dgc->gc[0]; 271 272 irq_gc_lock(gc); 273 irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); 274 smr = irq_reg_readl(gc, AT91_AIC5_SMR); 275 ret = aic_common_set_priority(intspec[2], &smr); 276 if (!ret) 277 irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); 278 irq_gc_unlock(gc); 279 280 return ret; 281 } 282 283 static const struct irq_domain_ops aic5_irq_ops = { 284 .map = irq_map_generic_chip, 285 .xlate = aic5_irq_domain_xlate, 286 }; 287 288 static void __init sama5d3_aic_irq_fixup(struct device_node *root) 289 { 290 aic_common_rtc_irq_fixup(root); 291 } 292 293 static const struct of_device_id __initdata aic5_irq_fixups[] = { 294 { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, 295 { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, 296 { /* sentinel */ }, 297 }; 298 299 static int __init aic5_of_init(struct device_node *node, 300 struct device_node *parent, 301 int nirqs) 302 { 303 struct irq_chip_generic *gc; 304 struct irq_domain *domain; 305 int nchips; 306 int i; 307 308 if (nirqs > NR_AIC5_IRQS) 309 return -EINVAL; 310 311 if (aic5_domain) 312 return -EEXIST; 313 314 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5", 315 nirqs); 316 if (IS_ERR(domain)) 317 return PTR_ERR(domain); 318 319 aic_common_irq_fixup(aic5_irq_fixups); 320 321 aic5_domain = domain; 322 nchips = aic5_domain->revmap_size / 32; 323 for (i = 0; i < nchips; i++) { 324 gc = irq_get_domain_generic_chip(domain, i * 32); 325 326 gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR; 327 gc->chip_types[0].chip.irq_mask = aic5_mask; 328 gc->chip_types[0].chip.irq_unmask = aic5_unmask; 329 gc->chip_types[0].chip.irq_retrigger = aic5_retrigger; 330 gc->chip_types[0].chip.irq_set_type = aic5_set_type; 331 gc->chip_types[0].chip.irq_suspend = aic5_suspend; 332 gc->chip_types[0].chip.irq_resume = aic5_resume; 333 gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown; 334 } 335 336 aic5_hw_init(domain); 337 set_handle_irq(aic5_handle); 338 339 return 0; 340 } 341 342 #define NR_SAMA5D3_IRQS 48 343 344 static int __init sama5d3_aic5_of_init(struct device_node *node, 345 struct device_node *parent) 346 { 347 return aic5_of_init(node, parent, NR_SAMA5D3_IRQS); 348 } 349 IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init); 350 351 #define NR_SAMA5D4_IRQS 68 352 353 static int __init sama5d4_aic5_of_init(struct device_node *node, 354 struct device_node *parent) 355 { 356 return aic5_of_init(node, parent, NR_SAMA5D4_IRQS); 357 } 358 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init); 359