1 /* 2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) 3 * 4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/smp.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/spinlock.h> 15 #include <soc/arc/mcip.h> 16 #include <asm/irqflags-arcv2.h> 17 #include <asm/setup.h> 18 19 static DEFINE_RAW_SPINLOCK(mcip_lock); 20 21 #ifdef CONFIG_SMP 22 23 static char smp_cpuinfo_buf[128]; 24 25 /* 26 * Set mask to halt GFRC if any online core in SMP cluster is halted. 27 * Only works for ARC HS v3.0+, on earlier versions has no effect. 28 */ 29 static void mcip_update_gfrc_halt_mask(int cpu) 30 { 31 struct bcr_generic gfrc; 32 unsigned long flags; 33 u32 gfrc_halt_mask; 34 35 READ_BCR(ARC_REG_GFRC_BUILD, gfrc); 36 37 /* 38 * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in 39 * GFRC 0x3 version. 40 */ 41 if (gfrc.ver < 0x3) 42 return; 43 44 raw_spin_lock_irqsave(&mcip_lock, flags); 45 46 __mcip_cmd(CMD_GFRC_READ_CORE, 0); 47 gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); 48 gfrc_halt_mask |= BIT(cpu); 49 __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); 50 51 raw_spin_unlock_irqrestore(&mcip_lock, flags); 52 } 53 54 static void mcip_setup_per_cpu(int cpu) 55 { 56 struct mcip_bcr mp; 57 58 READ_BCR(ARC_REG_MCIP_BCR, mp); 59 60 smp_ipi_irq_setup(cpu, IPI_IRQ); 61 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); 62 63 /* Update GFRC halt mask as new CPU came online */ 64 if (mp.gfrc) 65 mcip_update_gfrc_halt_mask(cpu); 66 } 67 68 static void mcip_ipi_send(int cpu) 69 { 70 unsigned long flags; 71 int ipi_was_pending; 72 73 /* ARConnect can only send IPI to others */ 74 if (unlikely(cpu == raw_smp_processor_id())) { 75 arc_softirq_trigger(SOFTIRQ_IRQ); 76 return; 77 } 78 79 raw_spin_lock_irqsave(&mcip_lock, flags); 80 81 /* 82 * If receiver already has a pending interrupt, elide sending this one. 83 * Linux cross core calling works well with concurrent IPIs 84 * coalesced into one 85 * see arch/arc/kernel/smp.c: ipi_send_msg_one() 86 */ 87 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); 88 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); 89 if (!ipi_was_pending) 90 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); 91 92 raw_spin_unlock_irqrestore(&mcip_lock, flags); 93 } 94 95 static void mcip_ipi_clear(int irq) 96 { 97 unsigned int cpu, c; 98 unsigned long flags; 99 100 if (unlikely(irq == SOFTIRQ_IRQ)) { 101 arc_softirq_clear(irq); 102 return; 103 } 104 105 raw_spin_lock_irqsave(&mcip_lock, flags); 106 107 /* Who sent the IPI */ 108 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); 109 110 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ 111 112 /* 113 * In rare case, multiple concurrent IPIs sent to same target can 114 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be 115 * "vectored" (multiple bits sets) as opposed to typical single bit 116 */ 117 do { 118 c = __ffs(cpu); /* 0,1,2,3 */ 119 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); 120 cpu &= ~(1U << c); 121 } while (cpu); 122 123 raw_spin_unlock_irqrestore(&mcip_lock, flags); 124 } 125 126 static void mcip_probe_n_setup(void) 127 { 128 struct mcip_bcr mp; 129 130 READ_BCR(ARC_REG_MCIP_BCR, mp); 131 132 sprintf(smp_cpuinfo_buf, 133 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", 134 mp.ver, mp.num_cores, 135 IS_AVAIL1(mp.ipi, "IPI "), 136 IS_AVAIL1(mp.idu, "IDU "), 137 IS_AVAIL1(mp.dbg, "DEBUG "), 138 IS_AVAIL1(mp.gfrc, "GFRC")); 139 140 cpuinfo_arc700[0].extn.gfrc = mp.gfrc; 141 142 if (mp.dbg) { 143 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); 144 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); 145 } 146 } 147 148 struct plat_smp_ops plat_smp_ops = { 149 .info = smp_cpuinfo_buf, 150 .init_early_smp = mcip_probe_n_setup, 151 .init_per_cpu = mcip_setup_per_cpu, 152 .ipi_send = mcip_ipi_send, 153 .ipi_clear = mcip_ipi_clear, 154 }; 155 156 #endif 157 158 /*************************************************************************** 159 * ARCv2 Interrupt Distribution Unit (IDU) 160 * 161 * Connects external "COMMON" IRQs to core intc, providing: 162 * -dynamic routing (IRQ affinity) 163 * -load balancing (Round Robin interrupt distribution) 164 * -1:N distribution 165 * 166 * It physically resides in the MCIP hw block 167 */ 168 169 #include <linux/irqchip.h> 170 #include <linux/of.h> 171 #include <linux/of_irq.h> 172 173 /* 174 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) 175 */ 176 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) 177 { 178 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 179 } 180 181 static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 182 unsigned int distr) 183 { 184 union { 185 unsigned int word; 186 struct { 187 unsigned int distr:2, pad:2, lvl:1, pad2:27; 188 }; 189 } data; 190 191 data.distr = distr; 192 data.lvl = lvl; 193 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 194 } 195 196 static void idu_irq_mask_raw(irq_hw_number_t hwirq) 197 { 198 unsigned long flags; 199 200 raw_spin_lock_irqsave(&mcip_lock, flags); 201 __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); 202 raw_spin_unlock_irqrestore(&mcip_lock, flags); 203 } 204 205 static void idu_irq_mask(struct irq_data *data) 206 { 207 idu_irq_mask_raw(data->hwirq); 208 } 209 210 static void idu_irq_unmask(struct irq_data *data) 211 { 212 unsigned long flags; 213 214 raw_spin_lock_irqsave(&mcip_lock, flags); 215 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); 216 raw_spin_unlock_irqrestore(&mcip_lock, flags); 217 } 218 219 static int 220 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 221 bool force) 222 { 223 unsigned long flags; 224 cpumask_t online; 225 unsigned int destination_bits; 226 unsigned int distribution_mode; 227 228 /* errout if no online cpu per @cpumask */ 229 if (!cpumask_and(&online, cpumask, cpu_online_mask)) 230 return -EINVAL; 231 232 raw_spin_lock_irqsave(&mcip_lock, flags); 233 234 destination_bits = cpumask_bits(&online)[0]; 235 idu_set_dest(data->hwirq, destination_bits); 236 237 if (ffs(destination_bits) == fls(destination_bits)) 238 distribution_mode = IDU_M_DISTRI_DEST; 239 else 240 distribution_mode = IDU_M_DISTRI_RR; 241 242 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 243 244 raw_spin_unlock_irqrestore(&mcip_lock, flags); 245 246 return IRQ_SET_MASK_OK; 247 } 248 249 static void idu_irq_enable(struct irq_data *data) 250 { 251 /* 252 * By default send all common interrupts to all available online CPUs. 253 * The affinity of common interrupts in IDU must be set manually since 254 * in some cases the kernel will not call irq_set_affinity() by itself: 255 * 1. When the kernel is not configured with support of SMP. 256 * 2. When the kernel is configured with support of SMP but upper 257 * interrupt controllers does not support setting of the affinity 258 * and cannot propagate it to IDU. 259 */ 260 idu_irq_set_affinity(data, cpu_online_mask, false); 261 idu_irq_unmask(data); 262 } 263 264 static struct irq_chip idu_irq_chip = { 265 .name = "MCIP IDU Intc", 266 .irq_mask = idu_irq_mask, 267 .irq_unmask = idu_irq_unmask, 268 .irq_enable = idu_irq_enable, 269 #ifdef CONFIG_SMP 270 .irq_set_affinity = idu_irq_set_affinity, 271 #endif 272 273 }; 274 275 static void idu_cascade_isr(struct irq_desc *desc) 276 { 277 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 278 struct irq_chip *core_chip = irq_desc_get_chip(desc); 279 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 280 irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; 281 282 chained_irq_enter(core_chip, desc); 283 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 284 chained_irq_exit(core_chip, desc); 285 } 286 287 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 288 { 289 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); 290 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 291 292 return 0; 293 } 294 295 static const struct irq_domain_ops idu_irq_ops = { 296 .xlate = irq_domain_xlate_onecell, 297 .map = idu_irq_map, 298 }; 299 300 /* 301 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) 302 * [24, 23+C]: If C > 0 then "C" common IRQs 303 * [24+C, N]: Not statically assigned, private-per-core 304 */ 305 306 307 static int __init 308 idu_of_init(struct device_node *intc, struct device_node *parent) 309 { 310 struct irq_domain *domain; 311 int nr_irqs; 312 int i, virq; 313 struct mcip_bcr mp; 314 struct mcip_idu_bcr idu_bcr; 315 316 READ_BCR(ARC_REG_MCIP_BCR, mp); 317 318 if (!mp.idu) 319 panic("IDU not detected, but DeviceTree using it"); 320 321 READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); 322 nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); 323 324 pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); 325 326 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); 327 328 /* Parent interrupts (core-intc) are already mapped */ 329 330 for (i = 0; i < nr_irqs; i++) { 331 /* Mask all common interrupts by default */ 332 idu_irq_mask_raw(i); 333 334 /* 335 * Return parent uplink IRQs (towards core intc) 24,25,..... 336 * this step has been done before already 337 * however we need it to get the parent virq and set IDU handler 338 * as first level isr 339 */ 340 virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); 341 BUG_ON(!virq); 342 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); 343 } 344 345 __mcip_cmd(CMD_IDU_ENABLE, 0); 346 347 return 0; 348 } 349 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); 350