1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> 4 * Loongson Local IO Interrupt Controller support 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/init.h> 9 #include <linux/types.h> 10 #include <linux/interrupt.h> 11 #include <linux/ioport.h> 12 #include <linux/irqchip.h> 13 #include <linux/of_address.h> 14 #include <linux/of_irq.h> 15 #include <linux/io.h> 16 #include <linux/smp.h> 17 #include <linux/irqchip/chained_irq.h> 18 19 #ifdef CONFIG_MIPS 20 #include <loongson.h> 21 #else 22 #include <asm/loongson.h> 23 #endif 24 25 #define LIOINTC_CHIP_IRQ 32 26 #define LIOINTC_NUM_PARENT 4 27 #define LIOINTC_NUM_CORES 4 28 29 #define LIOINTC_INTC_CHIP_START 0x20 30 31 #define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20) 32 #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) 33 #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) 34 #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) 35 #define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10) 36 #define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14) 37 38 #define LIOINTC_SHIFT_INTx 4 39 40 #define LIOINTC_ERRATA_IRQ 10 41 42 #if defined(CONFIG_MIPS) 43 #define liointc_core_id get_ebase_cpunum() 44 #else 45 #define liointc_core_id get_csr_cpuid() 46 #endif 47 48 struct liointc_handler_data { 49 struct liointc_priv *priv; 50 u32 parent_int_map; 51 }; 52 53 struct liointc_priv { 54 struct irq_chip_generic *gc; 55 struct liointc_handler_data handler[LIOINTC_NUM_PARENT]; 56 void __iomem *core_isr[LIOINTC_NUM_CORES]; 57 u8 map_cache[LIOINTC_CHIP_IRQ]; 58 u32 int_pol; 59 u32 int_edge; 60 bool has_lpc_irq_errata; 61 }; 62 63 struct fwnode_handle *liointc_handle; 64 65 static void liointc_chained_handle_irq(struct irq_desc *desc) 66 { 67 struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); 68 struct irq_chip *chip = irq_desc_get_chip(desc); 69 struct irq_chip_generic *gc = handler->priv->gc; 70 int core = liointc_core_id % LIOINTC_NUM_CORES; 71 u32 pending; 72 73 chained_irq_enter(chip, desc); 74 75 pending = readl(handler->priv->core_isr[core]); 76 77 if (!pending) { 78 /* Always blame LPC IRQ if we have that bug */ 79 if (handler->priv->has_lpc_irq_errata && 80 (handler->parent_int_map & gc->mask_cache & 81 BIT(LIOINTC_ERRATA_IRQ))) 82 pending = BIT(LIOINTC_ERRATA_IRQ); 83 else 84 spurious_interrupt(); 85 } 86 87 while (pending) { 88 int bit = __ffs(pending); 89 90 generic_handle_domain_irq(gc->domain, bit); 91 pending &= ~BIT(bit); 92 } 93 94 chained_irq_exit(chip, desc); 95 } 96 97 static void liointc_set_bit(struct irq_chip_generic *gc, 98 unsigned int offset, 99 u32 mask, bool set) 100 { 101 if (set) 102 writel(readl(gc->reg_base + offset) | mask, 103 gc->reg_base + offset); 104 else 105 writel(readl(gc->reg_base + offset) & ~mask, 106 gc->reg_base + offset); 107 } 108 109 static int liointc_set_type(struct irq_data *data, unsigned int type) 110 { 111 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 112 u32 mask = data->mask; 113 unsigned long flags; 114 115 irq_gc_lock_irqsave(gc, flags); 116 switch (type) { 117 case IRQ_TYPE_LEVEL_HIGH: 118 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); 119 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); 120 break; 121 case IRQ_TYPE_LEVEL_LOW: 122 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); 123 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); 124 break; 125 case IRQ_TYPE_EDGE_RISING: 126 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); 127 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); 128 break; 129 case IRQ_TYPE_EDGE_FALLING: 130 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); 131 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); 132 break; 133 default: 134 irq_gc_unlock_irqrestore(gc, flags); 135 return -EINVAL; 136 } 137 irq_gc_unlock_irqrestore(gc, flags); 138 139 irqd_set_trigger_type(data, type); 140 return 0; 141 } 142 143 static void liointc_suspend(struct irq_chip_generic *gc) 144 { 145 struct liointc_priv *priv = gc->private; 146 147 priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL); 148 priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE); 149 } 150 151 static void liointc_resume(struct irq_chip_generic *gc) 152 { 153 struct liointc_priv *priv = gc->private; 154 unsigned long flags; 155 int i; 156 157 irq_gc_lock_irqsave(gc, flags); 158 /* Disable all at first */ 159 writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE); 160 /* Restore map cache */ 161 for (i = 0; i < LIOINTC_CHIP_IRQ; i++) 162 writeb(priv->map_cache[i], gc->reg_base + i); 163 writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL); 164 writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE); 165 /* Restore mask cache */ 166 writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); 167 irq_gc_unlock_irqrestore(gc, flags); 168 } 169 170 static int parent_irq[LIOINTC_NUM_PARENT]; 171 static u32 parent_int_map[LIOINTC_NUM_PARENT]; 172 static const char *const parent_names[] = {"int0", "int1", "int2", "int3"}; 173 static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; 174 175 static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 176 const u32 *intspec, unsigned int intsize, 177 unsigned long *out_hwirq, unsigned int *out_type) 178 { 179 if (WARN_ON(intsize < 1)) 180 return -EINVAL; 181 *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ; 182 183 if (intsize > 1) 184 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 185 else 186 *out_type = IRQ_TYPE_NONE; 187 188 return 0; 189 } 190 191 static const struct irq_domain_ops acpi_irq_gc_ops = { 192 .map = irq_map_generic_chip, 193 .unmap = irq_unmap_generic_chip, 194 .xlate = liointc_domain_xlate, 195 }; 196 197 static int liointc_init(phys_addr_t addr, unsigned long size, int revision, 198 struct fwnode_handle *domain_handle, struct device_node *node) 199 { 200 int i, err; 201 void __iomem *base; 202 struct irq_chip_type *ct; 203 struct irq_chip_generic *gc; 204 struct irq_domain *domain; 205 struct liointc_priv *priv; 206 207 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 208 if (!priv) 209 return -ENOMEM; 210 211 base = ioremap(addr, size); 212 if (!base) 213 goto out_free_priv; 214 215 for (i = 0; i < LIOINTC_NUM_CORES; i++) 216 priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; 217 218 for (i = 0; i < LIOINTC_NUM_PARENT; i++) 219 priv->handler[i].parent_int_map = parent_int_map[i]; 220 221 if (revision > 1) { 222 for (i = 0; i < LIOINTC_NUM_CORES; i++) { 223 int index = of_property_match_string(node, 224 "reg-names", core_reg_names[i]); 225 226 if (index < 0) 227 continue; 228 229 priv->core_isr[i] = of_iomap(node, index); 230 } 231 232 if (!priv->core_isr[0]) 233 goto out_iounmap; 234 } 235 236 /* Setup IRQ domain */ 237 if (!acpi_disabled) 238 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, 239 &acpi_irq_gc_ops, priv); 240 else 241 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, 242 &irq_generic_chip_ops, priv); 243 if (!domain) { 244 pr_err("loongson-liointc: cannot add IRQ domain\n"); 245 goto out_iounmap; 246 } 247 248 err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1, 249 (node ? node->full_name : "LIOINTC"), 250 handle_level_irq, 0, IRQ_NOPROBE, 0); 251 if (err) { 252 pr_err("loongson-liointc: unable to register IRQ domain\n"); 253 goto out_free_domain; 254 } 255 256 257 /* Disable all IRQs */ 258 writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE); 259 /* Set to level triggered */ 260 writel(0x0, base + LIOINTC_REG_INTC_EDGE); 261 262 /* Generate parent INT part of map cache */ 263 for (i = 0; i < LIOINTC_NUM_PARENT; i++) { 264 u32 pending = priv->handler[i].parent_int_map; 265 266 while (pending) { 267 int bit = __ffs(pending); 268 269 priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx; 270 pending &= ~BIT(bit); 271 } 272 } 273 274 for (i = 0; i < LIOINTC_CHIP_IRQ; i++) { 275 /* Generate core part of map cache */ 276 priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id); 277 writeb(priv->map_cache[i], base + i); 278 } 279 280 gc = irq_get_domain_generic_chip(domain, 0); 281 gc->private = priv; 282 gc->reg_base = base; 283 gc->domain = domain; 284 gc->suspend = liointc_suspend; 285 gc->resume = liointc_resume; 286 287 ct = gc->chip_types; 288 ct->regs.enable = LIOINTC_REG_INTC_ENABLE; 289 ct->regs.disable = LIOINTC_REG_INTC_DISABLE; 290 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 291 ct->chip.irq_mask = irq_gc_mask_disable_reg; 292 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; 293 ct->chip.irq_set_type = liointc_set_type; 294 295 gc->mask_cache = 0; 296 priv->gc = gc; 297 298 for (i = 0; i < LIOINTC_NUM_PARENT; i++) { 299 if (parent_irq[i] <= 0) 300 continue; 301 302 priv->handler[i].priv = priv; 303 irq_set_chained_handler_and_data(parent_irq[i], 304 liointc_chained_handle_irq, &priv->handler[i]); 305 } 306 307 liointc_handle = domain_handle; 308 return 0; 309 310 out_free_domain: 311 irq_domain_remove(domain); 312 out_iounmap: 313 iounmap(base); 314 out_free_priv: 315 kfree(priv); 316 317 return -EINVAL; 318 } 319 320 #ifdef CONFIG_OF 321 322 static int __init liointc_of_init(struct device_node *node, 323 struct device_node *parent) 324 { 325 bool have_parent = FALSE; 326 int sz, i, index, revision, err = 0; 327 struct resource res; 328 329 if (!of_device_is_compatible(node, "loongson,liointc-2.0")) { 330 index = 0; 331 revision = 1; 332 } else { 333 index = of_property_match_string(node, "reg-names", "main"); 334 revision = 2; 335 } 336 337 if (of_address_to_resource(node, index, &res)) 338 return -EINVAL; 339 340 for (i = 0; i < LIOINTC_NUM_PARENT; i++) { 341 parent_irq[i] = of_irq_get_byname(node, parent_names[i]); 342 if (parent_irq[i] > 0) 343 have_parent = TRUE; 344 } 345 if (!have_parent) 346 return -ENODEV; 347 348 sz = of_property_read_variable_u32_array(node, 349 "loongson,parent_int_map", 350 &parent_int_map[0], 351 LIOINTC_NUM_PARENT, 352 LIOINTC_NUM_PARENT); 353 if (sz < 4) { 354 pr_err("loongson-liointc: No parent_int_map\n"); 355 return -ENODEV; 356 } 357 358 err = liointc_init(res.start, resource_size(&res), 359 revision, of_node_to_fwnode(node), node); 360 if (err < 0) 361 return err; 362 363 return 0; 364 } 365 366 IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); 367 IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); 368 IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); 369 370 #endif 371 372 #ifdef CONFIG_ACPI 373 static int __init htintc_parse_madt(union acpi_subtable_headers *header, 374 const unsigned long end) 375 { 376 struct acpi_madt_ht_pic *htintc_entry = (struct acpi_madt_ht_pic *)header; 377 struct irq_domain *parent = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); 378 379 return htvec_acpi_init(parent, htintc_entry); 380 } 381 382 static int __init acpi_cascade_irqdomain_init(void) 383 { 384 int r; 385 386 r = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, htintc_parse_madt, 0); 387 if (r < 0) 388 return r; 389 390 return 0; 391 } 392 393 int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc) 394 { 395 int ret; 396 struct fwnode_handle *domain_handle; 397 398 parent_int_map[0] = acpi_liointc->cascade_map[0]; 399 parent_int_map[1] = acpi_liointc->cascade_map[1]; 400 401 parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]); 402 parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]); 403 404 domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address); 405 if (!domain_handle) { 406 pr_err("Unable to allocate domain handle\n"); 407 return -ENOMEM; 408 } 409 410 ret = liointc_init(acpi_liointc->address, acpi_liointc->size, 411 1, domain_handle, NULL); 412 if (ret == 0) 413 ret = acpi_cascade_irqdomain_init(); 414 else 415 irq_domain_free_fwnode(domain_handle); 416 417 return ret; 418 } 419 #endif 420