1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Author: Steve Chen <schen@mvista.com> 4 // Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com> 5 // Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> 6 // Copyright (C) 2019, Texas Instruments 7 // 8 // TI Common Platform Interrupt Controller (cp_intc) driver 9 10 #include <linux/export.h> 11 #include <linux/init.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip.h> 14 #include <linux/irqchip/irq-davinci-cp-intc.h> 15 #include <linux/irqdomain.h> 16 #include <linux/io.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 21 #include <asm/exception.h> 22 23 #define DAVINCI_CP_INTC_CTRL 0x04 24 #define DAVINCI_CP_INTC_HOST_CTRL 0x0c 25 #define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10 26 #define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24 27 #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28 28 #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c 29 #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34 30 #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38 31 #define DAVINCI_CP_INTC_PRIO_IDX 0x80 32 #define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2)) 33 #define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2)) 34 #define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2)) 35 #define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2)) 36 #define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2)) 37 #define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2)) 38 #define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0) 39 #define DAVINCI_CP_INTC_GPIR_NONE BIT(31) 40 41 static void __iomem *davinci_cp_intc_base; 42 static struct irq_domain *davinci_cp_intc_irq_domain; 43 44 static inline unsigned int davinci_cp_intc_read(unsigned int offset) 45 { 46 return readl_relaxed(davinci_cp_intc_base + offset); 47 } 48 49 static inline void davinci_cp_intc_write(unsigned long value, 50 unsigned int offset) 51 { 52 writel_relaxed(value, davinci_cp_intc_base + offset); 53 } 54 55 static void davinci_cp_intc_ack_irq(struct irq_data *d) 56 { 57 davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR); 58 } 59 60 static void davinci_cp_intc_mask_irq(struct irq_data *d) 61 { 62 /* XXX don't know why we need to disable nIRQ here... */ 63 davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR); 64 davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR); 65 davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); 66 } 67 68 static void davinci_cp_intc_unmask_irq(struct irq_data *d) 69 { 70 davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET); 71 } 72 73 static int davinci_cp_intc_set_irq_type(struct irq_data *d, 74 unsigned int flow_type) 75 { 76 unsigned int reg, mask, polarity, type; 77 78 reg = BIT_WORD(d->hwirq); 79 mask = BIT_MASK(d->hwirq); 80 polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg)); 81 type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg)); 82 83 switch (flow_type) { 84 case IRQ_TYPE_EDGE_RISING: 85 polarity |= mask; 86 type |= mask; 87 break; 88 case IRQ_TYPE_EDGE_FALLING: 89 polarity &= ~mask; 90 type |= mask; 91 break; 92 case IRQ_TYPE_LEVEL_HIGH: 93 polarity |= mask; 94 type &= ~mask; 95 break; 96 case IRQ_TYPE_LEVEL_LOW: 97 polarity &= ~mask; 98 type &= ~mask; 99 break; 100 default: 101 return -EINVAL; 102 } 103 104 davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg)); 105 davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg)); 106 107 return 0; 108 } 109 110 static struct irq_chip davinci_cp_intc_irq_chip = { 111 .name = "cp_intc", 112 .irq_ack = davinci_cp_intc_ack_irq, 113 .irq_mask = davinci_cp_intc_mask_irq, 114 .irq_unmask = davinci_cp_intc_unmask_irq, 115 .irq_set_type = davinci_cp_intc_set_irq_type, 116 .flags = IRQCHIP_SKIP_SET_WAKE, 117 }; 118 119 static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs) 120 { 121 int gpir, irqnr, none; 122 123 /* 124 * The interrupt number is in first ten bits. The NONE field set to 1 125 * indicates a spurious irq. 126 */ 127 128 gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX); 129 irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK; 130 none = gpir & DAVINCI_CP_INTC_GPIR_NONE; 131 132 if (unlikely(none)) { 133 pr_err_once("%s: spurious irq!\n", __func__); 134 return; 135 } 136 137 generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr); 138 } 139 140 static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq, 141 irq_hw_number_t hw) 142 { 143 pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw); 144 145 irq_set_chip(virq, &davinci_cp_intc_irq_chip); 146 irq_set_probe(virq); 147 irq_set_handler(virq, handle_edge_irq); 148 149 return 0; 150 } 151 152 static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = { 153 .map = davinci_cp_intc_host_map, 154 .xlate = irq_domain_xlate_onetwocell, 155 }; 156 157 static int __init 158 davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config, 159 struct device_node *node) 160 { 161 unsigned int num_regs = BITS_TO_LONGS(config->num_irqs); 162 int offset, irq_base; 163 void __iomem *req; 164 165 req = request_mem_region(config->reg.start, 166 resource_size(&config->reg), 167 "davinci-cp-intc"); 168 if (!req) { 169 pr_err("%s: register range busy\n", __func__); 170 return -EBUSY; 171 } 172 173 davinci_cp_intc_base = ioremap(config->reg.start, 174 resource_size(&config->reg)); 175 if (!davinci_cp_intc_base) { 176 pr_err("%s: unable to ioremap register range\n", __func__); 177 return -EINVAL; 178 } 179 180 davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE); 181 182 /* Disable all host interrupts */ 183 davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0)); 184 185 /* Disable system interrupts */ 186 for (offset = 0; offset < num_regs; offset++) 187 davinci_cp_intc_write(~0, 188 DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset)); 189 190 /* Set to normal mode, no nesting, no priority hold */ 191 davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL); 192 davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL); 193 194 /* Clear system interrupt status */ 195 for (offset = 0; offset < num_regs; offset++) 196 davinci_cp_intc_write(~0, 197 DAVINCI_CP_INTC_SYS_STAT_CLR(offset)); 198 199 /* Enable nIRQ (what about nFIQ?) */ 200 davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); 201 202 /* Default all priorities to channel 7. */ 203 num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */ 204 for (offset = 0; offset < num_regs; offset++) 205 davinci_cp_intc_write(0x07070707, 206 DAVINCI_CP_INTC_CHAN_MAP(offset)); 207 208 irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0); 209 if (irq_base < 0) { 210 pr_err("%s: unable to allocate interrupt descriptors: %d\n", 211 __func__, irq_base); 212 return irq_base; 213 } 214 215 davinci_cp_intc_irq_domain = irq_domain_add_legacy( 216 node, config->num_irqs, irq_base, 0, 217 &davinci_cp_intc_irq_domain_ops, NULL); 218 219 if (!davinci_cp_intc_irq_domain) { 220 pr_err("%s: unable to create an interrupt domain\n", __func__); 221 return -EINVAL; 222 } 223 224 set_handle_irq(davinci_cp_intc_handle_irq); 225 226 /* Enable global interrupt */ 227 davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE); 228 229 return 0; 230 } 231 232 int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config) 233 { 234 return davinci_cp_intc_do_init(config, NULL); 235 } 236 237 static int __init davinci_cp_intc_of_init(struct device_node *node, 238 struct device_node *parent) 239 { 240 struct davinci_cp_intc_config config = { }; 241 int ret; 242 243 ret = of_address_to_resource(node, 0, &config.reg); 244 if (ret) { 245 pr_err("%s: unable to get the register range from device-tree\n", 246 __func__); 247 return ret; 248 } 249 250 ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs); 251 if (ret) { 252 pr_err("%s: unable to read the 'ti,intc-size' property\n", 253 __func__); 254 return ret; 255 } 256 257 return davinci_cp_intc_do_init(&config, node); 258 } 259 IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init); 260