1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2014 Cavium, Inc. 7 */ 8 9 #include <linux/of_address.h> 10 #include <linux/interrupt.h> 11 #include <linux/irqdomain.h> 12 #include <linux/bitops.h> 13 #include <linux/of_irq.h> 14 #include <linux/percpu.h> 15 #include <linux/slab.h> 16 #include <linux/irq.h> 17 #include <linux/smp.h> 18 #include <linux/of.h> 19 20 #include <asm/octeon/octeon.h> 21 #include <asm/octeon/cvmx-ciu2-defs.h> 22 23 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 25 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 26 27 struct octeon_irq_ciu_domain_data { 28 int num_sum; /* number of sum registers (2 or 3). */ 29 }; 30 31 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 32 33 struct octeon_ciu_chip_data { 34 union { 35 struct { /* only used for ciu3 */ 36 u64 ciu3_addr; 37 unsigned int intsn; 38 }; 39 struct { /* only used for ciu/ciu2 */ 40 u8 line; 41 u8 bit; 42 u8 gpio_line; 43 }; 44 }; 45 int current_cpu; /* Next CPU expected to take this irq */ 46 }; 47 48 struct octeon_core_chip_data { 49 struct mutex core_irq_mutex; 50 bool current_en; 51 bool desired_en; 52 u8 bit; 53 }; 54 55 #define MIPS_CORE_IRQ_LINES 8 56 57 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 58 59 static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 60 struct irq_chip *chip, 61 irq_flow_handler_t handler) 62 { 63 struct octeon_ciu_chip_data *cd; 64 65 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 66 if (!cd) 67 return -ENOMEM; 68 69 irq_set_chip_and_handler(irq, chip, handler); 70 71 cd->line = line; 72 cd->bit = bit; 73 cd->gpio_line = gpio_line; 74 75 irq_set_chip_data(irq, cd); 76 octeon_irq_ciu_to_irq[line][bit] = irq; 77 return 0; 78 } 79 80 static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) 81 { 82 struct irq_data *data = irq_get_irq_data(irq); 83 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 84 85 irq_set_chip_data(irq, NULL); 86 kfree(cd); 87 } 88 89 static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, 90 int irq, int line, int bit) 91 { 92 return irq_domain_associate(domain, irq, line << 6 | bit); 93 } 94 95 static int octeon_coreid_for_cpu(int cpu) 96 { 97 #ifdef CONFIG_SMP 98 return cpu_logical_map(cpu); 99 #else 100 return cvmx_get_core_num(); 101 #endif 102 } 103 104 static int octeon_cpu_for_coreid(int coreid) 105 { 106 #ifdef CONFIG_SMP 107 return cpu_number_map(coreid); 108 #else 109 return smp_processor_id(); 110 #endif 111 } 112 113 static void octeon_irq_core_ack(struct irq_data *data) 114 { 115 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 116 unsigned int bit = cd->bit; 117 118 /* 119 * We don't need to disable IRQs to make these atomic since 120 * they are already disabled earlier in the low level 121 * interrupt code. 122 */ 123 clear_c0_status(0x100 << bit); 124 /* The two user interrupts must be cleared manually. */ 125 if (bit < 2) 126 clear_c0_cause(0x100 << bit); 127 } 128 129 static void octeon_irq_core_eoi(struct irq_data *data) 130 { 131 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 132 133 /* 134 * We don't need to disable IRQs to make these atomic since 135 * they are already disabled earlier in the low level 136 * interrupt code. 137 */ 138 set_c0_status(0x100 << cd->bit); 139 } 140 141 static void octeon_irq_core_set_enable_local(void *arg) 142 { 143 struct irq_data *data = arg; 144 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 145 unsigned int mask = 0x100 << cd->bit; 146 147 /* 148 * Interrupts are already disabled, so these are atomic. 149 */ 150 if (cd->desired_en) 151 set_c0_status(mask); 152 else 153 clear_c0_status(mask); 154 155 } 156 157 static void octeon_irq_core_disable(struct irq_data *data) 158 { 159 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 160 cd->desired_en = false; 161 } 162 163 static void octeon_irq_core_enable(struct irq_data *data) 164 { 165 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 166 cd->desired_en = true; 167 } 168 169 static void octeon_irq_core_bus_lock(struct irq_data *data) 170 { 171 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 172 173 mutex_lock(&cd->core_irq_mutex); 174 } 175 176 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) 177 { 178 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 179 180 if (cd->desired_en != cd->current_en) { 181 on_each_cpu(octeon_irq_core_set_enable_local, data, 1); 182 183 cd->current_en = cd->desired_en; 184 } 185 186 mutex_unlock(&cd->core_irq_mutex); 187 } 188 189 static struct irq_chip octeon_irq_chip_core = { 190 .name = "Core", 191 .irq_enable = octeon_irq_core_enable, 192 .irq_disable = octeon_irq_core_disable, 193 .irq_ack = octeon_irq_core_ack, 194 .irq_eoi = octeon_irq_core_eoi, 195 .irq_bus_lock = octeon_irq_core_bus_lock, 196 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, 197 198 .irq_cpu_online = octeon_irq_core_eoi, 199 .irq_cpu_offline = octeon_irq_core_ack, 200 .flags = IRQCHIP_ONOFFLINE_ENABLED, 201 }; 202 203 static void __init octeon_irq_init_core(void) 204 { 205 int i; 206 int irq; 207 struct octeon_core_chip_data *cd; 208 209 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { 210 cd = &octeon_irq_core_chip_data[i]; 211 cd->current_en = false; 212 cd->desired_en = false; 213 cd->bit = i; 214 mutex_init(&cd->core_irq_mutex); 215 216 irq = OCTEON_IRQ_SW0 + i; 217 irq_set_chip_data(irq, cd); 218 irq_set_chip_and_handler(irq, &octeon_irq_chip_core, 219 handle_percpu_irq); 220 } 221 } 222 223 static int next_cpu_for_irq(struct irq_data *data) 224 { 225 226 #ifdef CONFIG_SMP 227 int cpu; 228 int weight = cpumask_weight(data->affinity); 229 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 230 231 if (weight > 1) { 232 cpu = cd->current_cpu; 233 for (;;) { 234 cpu = cpumask_next(cpu, data->affinity); 235 if (cpu >= nr_cpu_ids) { 236 cpu = -1; 237 continue; 238 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { 239 break; 240 } 241 } 242 } else if (weight == 1) { 243 cpu = cpumask_first(data->affinity); 244 } else { 245 cpu = smp_processor_id(); 246 } 247 cd->current_cpu = cpu; 248 return cpu; 249 #else 250 return smp_processor_id(); 251 #endif 252 } 253 254 static void octeon_irq_ciu_enable(struct irq_data *data) 255 { 256 int cpu = next_cpu_for_irq(data); 257 int coreid = octeon_coreid_for_cpu(cpu); 258 unsigned long *pen; 259 unsigned long flags; 260 struct octeon_ciu_chip_data *cd; 261 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 262 263 cd = irq_data_get_irq_chip_data(data); 264 265 raw_spin_lock_irqsave(lock, flags); 266 if (cd->line == 0) { 267 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 268 __set_bit(cd->bit, pen); 269 /* 270 * Must be visible to octeon_irq_ip{2,3}_ciu() before 271 * enabling the irq. 272 */ 273 wmb(); 274 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 275 } else { 276 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 277 __set_bit(cd->bit, pen); 278 /* 279 * Must be visible to octeon_irq_ip{2,3}_ciu() before 280 * enabling the irq. 281 */ 282 wmb(); 283 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 284 } 285 raw_spin_unlock_irqrestore(lock, flags); 286 } 287 288 static void octeon_irq_ciu_enable_local(struct irq_data *data) 289 { 290 unsigned long *pen; 291 unsigned long flags; 292 struct octeon_ciu_chip_data *cd; 293 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 294 295 cd = irq_data_get_irq_chip_data(data); 296 297 raw_spin_lock_irqsave(lock, flags); 298 if (cd->line == 0) { 299 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 300 __set_bit(cd->bit, pen); 301 /* 302 * Must be visible to octeon_irq_ip{2,3}_ciu() before 303 * enabling the irq. 304 */ 305 wmb(); 306 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 307 } else { 308 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 309 __set_bit(cd->bit, pen); 310 /* 311 * Must be visible to octeon_irq_ip{2,3}_ciu() before 312 * enabling the irq. 313 */ 314 wmb(); 315 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 316 } 317 raw_spin_unlock_irqrestore(lock, flags); 318 } 319 320 static void octeon_irq_ciu_disable_local(struct irq_data *data) 321 { 322 unsigned long *pen; 323 unsigned long flags; 324 struct octeon_ciu_chip_data *cd; 325 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 326 327 cd = irq_data_get_irq_chip_data(data); 328 329 raw_spin_lock_irqsave(lock, flags); 330 if (cd->line == 0) { 331 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 332 __clear_bit(cd->bit, pen); 333 /* 334 * Must be visible to octeon_irq_ip{2,3}_ciu() before 335 * enabling the irq. 336 */ 337 wmb(); 338 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 339 } else { 340 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 341 __clear_bit(cd->bit, pen); 342 /* 343 * Must be visible to octeon_irq_ip{2,3}_ciu() before 344 * enabling the irq. 345 */ 346 wmb(); 347 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 348 } 349 raw_spin_unlock_irqrestore(lock, flags); 350 } 351 352 static void octeon_irq_ciu_disable_all(struct irq_data *data) 353 { 354 unsigned long flags; 355 unsigned long *pen; 356 int cpu; 357 struct octeon_ciu_chip_data *cd; 358 raw_spinlock_t *lock; 359 360 cd = irq_data_get_irq_chip_data(data); 361 362 for_each_online_cpu(cpu) { 363 int coreid = octeon_coreid_for_cpu(cpu); 364 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 365 if (cd->line == 0) 366 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 367 else 368 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 369 370 raw_spin_lock_irqsave(lock, flags); 371 __clear_bit(cd->bit, pen); 372 /* 373 * Must be visible to octeon_irq_ip{2,3}_ciu() before 374 * enabling the irq. 375 */ 376 wmb(); 377 if (cd->line == 0) 378 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 379 else 380 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 381 raw_spin_unlock_irqrestore(lock, flags); 382 } 383 } 384 385 static void octeon_irq_ciu_enable_all(struct irq_data *data) 386 { 387 unsigned long flags; 388 unsigned long *pen; 389 int cpu; 390 struct octeon_ciu_chip_data *cd; 391 raw_spinlock_t *lock; 392 393 cd = irq_data_get_irq_chip_data(data); 394 395 for_each_online_cpu(cpu) { 396 int coreid = octeon_coreid_for_cpu(cpu); 397 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 398 if (cd->line == 0) 399 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 400 else 401 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 402 403 raw_spin_lock_irqsave(lock, flags); 404 __set_bit(cd->bit, pen); 405 /* 406 * Must be visible to octeon_irq_ip{2,3}_ciu() before 407 * enabling the irq. 408 */ 409 wmb(); 410 if (cd->line == 0) 411 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 412 else 413 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 414 raw_spin_unlock_irqrestore(lock, flags); 415 } 416 } 417 418 /* 419 * Enable the irq on the next core in the affinity set for chips that 420 * have the EN*_W1{S,C} registers. 421 */ 422 static void octeon_irq_ciu_enable_v2(struct irq_data *data) 423 { 424 u64 mask; 425 int cpu = next_cpu_for_irq(data); 426 struct octeon_ciu_chip_data *cd; 427 428 cd = irq_data_get_irq_chip_data(data); 429 mask = 1ull << (cd->bit); 430 431 /* 432 * Called under the desc lock, so these should never get out 433 * of sync. 434 */ 435 if (cd->line == 0) { 436 int index = octeon_coreid_for_cpu(cpu) * 2; 437 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 438 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 439 } else { 440 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 441 set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 442 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 443 } 444 } 445 446 /* 447 * Enable the irq in the sum2 registers. 448 */ 449 static void octeon_irq_ciu_enable_sum2(struct irq_data *data) 450 { 451 u64 mask; 452 int cpu = next_cpu_for_irq(data); 453 int index = octeon_coreid_for_cpu(cpu); 454 struct octeon_ciu_chip_data *cd; 455 456 cd = irq_data_get_irq_chip_data(data); 457 mask = 1ull << (cd->bit); 458 459 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 460 } 461 462 /* 463 * Disable the irq in the sum2 registers. 464 */ 465 static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) 466 { 467 u64 mask; 468 int cpu = next_cpu_for_irq(data); 469 int index = octeon_coreid_for_cpu(cpu); 470 struct octeon_ciu_chip_data *cd; 471 472 cd = irq_data_get_irq_chip_data(data); 473 mask = 1ull << (cd->bit); 474 475 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 476 } 477 478 static void octeon_irq_ciu_ack_sum2(struct irq_data *data) 479 { 480 u64 mask; 481 int cpu = next_cpu_for_irq(data); 482 int index = octeon_coreid_for_cpu(cpu); 483 struct octeon_ciu_chip_data *cd; 484 485 cd = irq_data_get_irq_chip_data(data); 486 mask = 1ull << (cd->bit); 487 488 cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); 489 } 490 491 static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) 492 { 493 int cpu; 494 struct octeon_ciu_chip_data *cd; 495 u64 mask; 496 497 cd = irq_data_get_irq_chip_data(data); 498 mask = 1ull << (cd->bit); 499 500 for_each_online_cpu(cpu) { 501 int coreid = octeon_coreid_for_cpu(cpu); 502 503 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); 504 } 505 } 506 507 /* 508 * Enable the irq on the current CPU for chips that 509 * have the EN*_W1{S,C} registers. 510 */ 511 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 512 { 513 u64 mask; 514 struct octeon_ciu_chip_data *cd; 515 516 cd = irq_data_get_irq_chip_data(data); 517 mask = 1ull << (cd->bit); 518 519 if (cd->line == 0) { 520 int index = cvmx_get_core_num() * 2; 521 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 522 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 523 } else { 524 int index = cvmx_get_core_num() * 2 + 1; 525 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 526 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 527 } 528 } 529 530 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 531 { 532 u64 mask; 533 struct octeon_ciu_chip_data *cd; 534 535 cd = irq_data_get_irq_chip_data(data); 536 mask = 1ull << (cd->bit); 537 538 if (cd->line == 0) { 539 int index = cvmx_get_core_num() * 2; 540 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 541 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 542 } else { 543 int index = cvmx_get_core_num() * 2 + 1; 544 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 545 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 546 } 547 } 548 549 /* 550 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. 551 */ 552 static void octeon_irq_ciu_ack(struct irq_data *data) 553 { 554 u64 mask; 555 struct octeon_ciu_chip_data *cd; 556 557 cd = irq_data_get_irq_chip_data(data); 558 mask = 1ull << (cd->bit); 559 560 if (cd->line == 0) { 561 int index = cvmx_get_core_num() * 2; 562 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 563 } else { 564 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); 565 } 566 } 567 568 /* 569 * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 570 * registers. 571 */ 572 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) 573 { 574 int cpu; 575 u64 mask; 576 struct octeon_ciu_chip_data *cd; 577 578 cd = irq_data_get_irq_chip_data(data); 579 mask = 1ull << (cd->bit); 580 581 if (cd->line == 0) { 582 for_each_online_cpu(cpu) { 583 int index = octeon_coreid_for_cpu(cpu) * 2; 584 clear_bit(cd->bit, 585 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 586 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 587 } 588 } else { 589 for_each_online_cpu(cpu) { 590 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 591 clear_bit(cd->bit, 592 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 593 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 594 } 595 } 596 } 597 598 /* 599 * Enable the irq on the all cores for chips that have the EN*_W1{S,C} 600 * registers. 601 */ 602 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) 603 { 604 int cpu; 605 u64 mask; 606 struct octeon_ciu_chip_data *cd; 607 608 cd = irq_data_get_irq_chip_data(data); 609 mask = 1ull << (cd->bit); 610 611 if (cd->line == 0) { 612 for_each_online_cpu(cpu) { 613 int index = octeon_coreid_for_cpu(cpu) * 2; 614 set_bit(cd->bit, 615 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 616 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 617 } 618 } else { 619 for_each_online_cpu(cpu) { 620 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 621 set_bit(cd->bit, 622 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 623 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 624 } 625 } 626 } 627 628 static void octeon_irq_gpio_setup(struct irq_data *data) 629 { 630 union cvmx_gpio_bit_cfgx cfg; 631 struct octeon_ciu_chip_data *cd; 632 u32 t = irqd_get_trigger_type(data); 633 634 cd = irq_data_get_irq_chip_data(data); 635 636 cfg.u64 = 0; 637 cfg.s.int_en = 1; 638 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; 639 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; 640 641 /* 140 nS glitch filter*/ 642 cfg.s.fil_cnt = 7; 643 cfg.s.fil_sel = 3; 644 645 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); 646 } 647 648 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) 649 { 650 octeon_irq_gpio_setup(data); 651 octeon_irq_ciu_enable_v2(data); 652 } 653 654 static void octeon_irq_ciu_enable_gpio(struct irq_data *data) 655 { 656 octeon_irq_gpio_setup(data); 657 octeon_irq_ciu_enable(data); 658 } 659 660 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) 661 { 662 irqd_set_trigger_type(data, t); 663 octeon_irq_gpio_setup(data); 664 665 return IRQ_SET_MASK_OK; 666 } 667 668 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 669 { 670 struct octeon_ciu_chip_data *cd; 671 672 cd = irq_data_get_irq_chip_data(data); 673 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 674 675 octeon_irq_ciu_disable_all_v2(data); 676 } 677 678 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 679 { 680 struct octeon_ciu_chip_data *cd; 681 682 cd = irq_data_get_irq_chip_data(data); 683 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 684 685 octeon_irq_ciu_disable_all(data); 686 } 687 688 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 689 { 690 struct octeon_ciu_chip_data *cd; 691 u64 mask; 692 693 cd = irq_data_get_irq_chip_data(data); 694 mask = 1ull << (cd->gpio_line); 695 696 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 697 } 698 699 static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) 700 { 701 struct irq_data *data = irq_desc_get_irq_data(desc); 702 703 if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH) 704 handle_edge_irq(irq, desc); 705 else 706 handle_level_irq(irq, desc); 707 } 708 709 #ifdef CONFIG_SMP 710 711 static void octeon_irq_cpu_offline_ciu(struct irq_data *data) 712 { 713 int cpu = smp_processor_id(); 714 cpumask_t new_affinity; 715 716 if (!cpumask_test_cpu(cpu, data->affinity)) 717 return; 718 719 if (cpumask_weight(data->affinity) > 1) { 720 /* 721 * It has multi CPU affinity, just remove this CPU 722 * from the affinity set. 723 */ 724 cpumask_copy(&new_affinity, data->affinity); 725 cpumask_clear_cpu(cpu, &new_affinity); 726 } else { 727 /* Otherwise, put it on lowest numbered online CPU. */ 728 cpumask_clear(&new_affinity); 729 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 730 } 731 irq_set_affinity_locked(data, &new_affinity, false); 732 } 733 734 static int octeon_irq_ciu_set_affinity(struct irq_data *data, 735 const struct cpumask *dest, bool force) 736 { 737 int cpu; 738 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 739 unsigned long flags; 740 struct octeon_ciu_chip_data *cd; 741 unsigned long *pen; 742 raw_spinlock_t *lock; 743 744 cd = irq_data_get_irq_chip_data(data); 745 746 /* 747 * For non-v2 CIU, we will allow only single CPU affinity. 748 * This removes the need to do locking in the .ack/.eoi 749 * functions. 750 */ 751 if (cpumask_weight(dest) != 1) 752 return -EINVAL; 753 754 if (!enable_one) 755 return 0; 756 757 758 for_each_online_cpu(cpu) { 759 int coreid = octeon_coreid_for_cpu(cpu); 760 761 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 762 raw_spin_lock_irqsave(lock, flags); 763 764 if (cd->line == 0) 765 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 766 else 767 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 768 769 if (cpumask_test_cpu(cpu, dest) && enable_one) { 770 enable_one = 0; 771 __set_bit(cd->bit, pen); 772 } else { 773 __clear_bit(cd->bit, pen); 774 } 775 /* 776 * Must be visible to octeon_irq_ip{2,3}_ciu() before 777 * enabling the irq. 778 */ 779 wmb(); 780 781 if (cd->line == 0) 782 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 783 else 784 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 785 786 raw_spin_unlock_irqrestore(lock, flags); 787 } 788 return 0; 789 } 790 791 /* 792 * Set affinity for the irq for chips that have the EN*_W1{S,C} 793 * registers. 794 */ 795 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, 796 const struct cpumask *dest, 797 bool force) 798 { 799 int cpu; 800 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 801 u64 mask; 802 struct octeon_ciu_chip_data *cd; 803 804 if (!enable_one) 805 return 0; 806 807 cd = irq_data_get_irq_chip_data(data); 808 mask = 1ull << cd->bit; 809 810 if (cd->line == 0) { 811 for_each_online_cpu(cpu) { 812 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 813 int index = octeon_coreid_for_cpu(cpu) * 2; 814 if (cpumask_test_cpu(cpu, dest) && enable_one) { 815 enable_one = false; 816 set_bit(cd->bit, pen); 817 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 818 } else { 819 clear_bit(cd->bit, pen); 820 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 821 } 822 } 823 } else { 824 for_each_online_cpu(cpu) { 825 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 826 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 827 if (cpumask_test_cpu(cpu, dest) && enable_one) { 828 enable_one = false; 829 set_bit(cd->bit, pen); 830 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 831 } else { 832 clear_bit(cd->bit, pen); 833 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 834 } 835 } 836 } 837 return 0; 838 } 839 840 static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, 841 const struct cpumask *dest, 842 bool force) 843 { 844 int cpu; 845 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 846 u64 mask; 847 struct octeon_ciu_chip_data *cd; 848 849 if (!enable_one) 850 return 0; 851 852 cd = irq_data_get_irq_chip_data(data); 853 mask = 1ull << cd->bit; 854 855 for_each_online_cpu(cpu) { 856 int index = octeon_coreid_for_cpu(cpu); 857 858 if (cpumask_test_cpu(cpu, dest) && enable_one) { 859 enable_one = false; 860 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 861 } else { 862 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 863 } 864 } 865 return 0; 866 } 867 #endif 868 869 /* 870 * Newer octeon chips have support for lockless CIU operation. 871 */ 872 static struct irq_chip octeon_irq_chip_ciu_v2 = { 873 .name = "CIU", 874 .irq_enable = octeon_irq_ciu_enable_v2, 875 .irq_disable = octeon_irq_ciu_disable_all_v2, 876 .irq_mask = octeon_irq_ciu_disable_local_v2, 877 .irq_unmask = octeon_irq_ciu_enable_v2, 878 #ifdef CONFIG_SMP 879 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 880 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 881 #endif 882 }; 883 884 static struct irq_chip octeon_irq_chip_ciu_v2_edge = { 885 .name = "CIU", 886 .irq_enable = octeon_irq_ciu_enable_v2, 887 .irq_disable = octeon_irq_ciu_disable_all_v2, 888 .irq_ack = octeon_irq_ciu_ack, 889 .irq_mask = octeon_irq_ciu_disable_local_v2, 890 .irq_unmask = octeon_irq_ciu_enable_v2, 891 #ifdef CONFIG_SMP 892 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 893 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 894 #endif 895 }; 896 897 /* 898 * Newer octeon chips have support for lockless CIU operation. 899 */ 900 static struct irq_chip octeon_irq_chip_ciu_sum2 = { 901 .name = "CIU", 902 .irq_enable = octeon_irq_ciu_enable_sum2, 903 .irq_disable = octeon_irq_ciu_disable_all_sum2, 904 .irq_mask = octeon_irq_ciu_disable_local_sum2, 905 .irq_unmask = octeon_irq_ciu_enable_sum2, 906 #ifdef CONFIG_SMP 907 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 908 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 909 #endif 910 }; 911 912 static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { 913 .name = "CIU", 914 .irq_enable = octeon_irq_ciu_enable_sum2, 915 .irq_disable = octeon_irq_ciu_disable_all_sum2, 916 .irq_ack = octeon_irq_ciu_ack_sum2, 917 .irq_mask = octeon_irq_ciu_disable_local_sum2, 918 .irq_unmask = octeon_irq_ciu_enable_sum2, 919 #ifdef CONFIG_SMP 920 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 921 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 922 #endif 923 }; 924 925 static struct irq_chip octeon_irq_chip_ciu = { 926 .name = "CIU", 927 .irq_enable = octeon_irq_ciu_enable, 928 .irq_disable = octeon_irq_ciu_disable_all, 929 .irq_mask = octeon_irq_ciu_disable_local, 930 .irq_unmask = octeon_irq_ciu_enable, 931 #ifdef CONFIG_SMP 932 .irq_set_affinity = octeon_irq_ciu_set_affinity, 933 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 934 #endif 935 }; 936 937 static struct irq_chip octeon_irq_chip_ciu_edge = { 938 .name = "CIU", 939 .irq_enable = octeon_irq_ciu_enable, 940 .irq_disable = octeon_irq_ciu_disable_all, 941 .irq_ack = octeon_irq_ciu_ack, 942 .irq_mask = octeon_irq_ciu_disable_local, 943 .irq_unmask = octeon_irq_ciu_enable, 944 #ifdef CONFIG_SMP 945 .irq_set_affinity = octeon_irq_ciu_set_affinity, 946 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 947 #endif 948 }; 949 950 /* The mbox versions don't do any affinity or round-robin. */ 951 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { 952 .name = "CIU-M", 953 .irq_enable = octeon_irq_ciu_enable_all_v2, 954 .irq_disable = octeon_irq_ciu_disable_all_v2, 955 .irq_ack = octeon_irq_ciu_disable_local_v2, 956 .irq_eoi = octeon_irq_ciu_enable_local_v2, 957 958 .irq_cpu_online = octeon_irq_ciu_enable_local_v2, 959 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, 960 .flags = IRQCHIP_ONOFFLINE_ENABLED, 961 }; 962 963 static struct irq_chip octeon_irq_chip_ciu_mbox = { 964 .name = "CIU-M", 965 .irq_enable = octeon_irq_ciu_enable_all, 966 .irq_disable = octeon_irq_ciu_disable_all, 967 .irq_ack = octeon_irq_ciu_disable_local, 968 .irq_eoi = octeon_irq_ciu_enable_local, 969 970 .irq_cpu_online = octeon_irq_ciu_enable_local, 971 .irq_cpu_offline = octeon_irq_ciu_disable_local, 972 .flags = IRQCHIP_ONOFFLINE_ENABLED, 973 }; 974 975 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { 976 .name = "CIU-GPIO", 977 .irq_enable = octeon_irq_ciu_enable_gpio_v2, 978 .irq_disable = octeon_irq_ciu_disable_gpio_v2, 979 .irq_ack = octeon_irq_ciu_gpio_ack, 980 .irq_mask = octeon_irq_ciu_disable_local_v2, 981 .irq_unmask = octeon_irq_ciu_enable_v2, 982 .irq_set_type = octeon_irq_ciu_gpio_set_type, 983 #ifdef CONFIG_SMP 984 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 985 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 986 #endif 987 .flags = IRQCHIP_SET_TYPE_MASKED, 988 }; 989 990 static struct irq_chip octeon_irq_chip_ciu_gpio = { 991 .name = "CIU-GPIO", 992 .irq_enable = octeon_irq_ciu_enable_gpio, 993 .irq_disable = octeon_irq_ciu_disable_gpio, 994 .irq_mask = octeon_irq_ciu_disable_local, 995 .irq_unmask = octeon_irq_ciu_enable, 996 .irq_ack = octeon_irq_ciu_gpio_ack, 997 .irq_set_type = octeon_irq_ciu_gpio_set_type, 998 #ifdef CONFIG_SMP 999 .irq_set_affinity = octeon_irq_ciu_set_affinity, 1000 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1001 #endif 1002 .flags = IRQCHIP_SET_TYPE_MASKED, 1003 }; 1004 1005 /* 1006 * Watchdog interrupts are special. They are associated with a single 1007 * core, so we hardwire the affinity to that core. 1008 */ 1009 static void octeon_irq_ciu_wd_enable(struct irq_data *data) 1010 { 1011 unsigned long flags; 1012 unsigned long *pen; 1013 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 1014 int cpu = octeon_cpu_for_coreid(coreid); 1015 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 1016 1017 raw_spin_lock_irqsave(lock, flags); 1018 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 1019 __set_bit(coreid, pen); 1020 /* 1021 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling 1022 * the irq. 1023 */ 1024 wmb(); 1025 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 1026 raw_spin_unlock_irqrestore(lock, flags); 1027 } 1028 1029 /* 1030 * Watchdog interrupts are special. They are associated with a single 1031 * core, so we hardwire the affinity to that core. 1032 */ 1033 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) 1034 { 1035 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1036 int cpu = octeon_cpu_for_coreid(coreid); 1037 1038 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 1039 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); 1040 } 1041 1042 1043 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { 1044 .name = "CIU-W", 1045 .irq_enable = octeon_irq_ciu1_wd_enable_v2, 1046 .irq_disable = octeon_irq_ciu_disable_all_v2, 1047 .irq_mask = octeon_irq_ciu_disable_local_v2, 1048 .irq_unmask = octeon_irq_ciu_enable_local_v2, 1049 }; 1050 1051 static struct irq_chip octeon_irq_chip_ciu_wd = { 1052 .name = "CIU-W", 1053 .irq_enable = octeon_irq_ciu_wd_enable, 1054 .irq_disable = octeon_irq_ciu_disable_all, 1055 .irq_mask = octeon_irq_ciu_disable_local, 1056 .irq_unmask = octeon_irq_ciu_enable_local, 1057 }; 1058 1059 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) 1060 { 1061 bool edge = false; 1062 1063 if (line == 0) 1064 switch (bit) { 1065 case 48 ... 49: /* GMX DRP */ 1066 case 50: /* IPD_DRP */ 1067 case 52 ... 55: /* Timers */ 1068 case 58: /* MPI */ 1069 edge = true; 1070 break; 1071 default: 1072 break; 1073 } 1074 else /* line == 1 */ 1075 switch (bit) { 1076 case 47: /* PTP */ 1077 edge = true; 1078 break; 1079 default: 1080 break; 1081 } 1082 return edge; 1083 } 1084 1085 struct octeon_irq_gpio_domain_data { 1086 unsigned int base_hwirq; 1087 }; 1088 1089 static int octeon_irq_gpio_xlat(struct irq_domain *d, 1090 struct device_node *node, 1091 const u32 *intspec, 1092 unsigned int intsize, 1093 unsigned long *out_hwirq, 1094 unsigned int *out_type) 1095 { 1096 unsigned int type; 1097 unsigned int pin; 1098 unsigned int trigger; 1099 1100 if (d->of_node != node) 1101 return -EINVAL; 1102 1103 if (intsize < 2) 1104 return -EINVAL; 1105 1106 pin = intspec[0]; 1107 if (pin >= 16) 1108 return -EINVAL; 1109 1110 trigger = intspec[1]; 1111 1112 switch (trigger) { 1113 case 1: 1114 type = IRQ_TYPE_EDGE_RISING; 1115 break; 1116 case 2: 1117 type = IRQ_TYPE_EDGE_FALLING; 1118 break; 1119 case 4: 1120 type = IRQ_TYPE_LEVEL_HIGH; 1121 break; 1122 case 8: 1123 type = IRQ_TYPE_LEVEL_LOW; 1124 break; 1125 default: 1126 pr_err("Error: (%s) Invalid irq trigger specification: %x\n", 1127 node->name, 1128 trigger); 1129 type = IRQ_TYPE_LEVEL_LOW; 1130 break; 1131 } 1132 *out_type = type; 1133 *out_hwirq = pin; 1134 1135 return 0; 1136 } 1137 1138 static int octeon_irq_ciu_xlat(struct irq_domain *d, 1139 struct device_node *node, 1140 const u32 *intspec, 1141 unsigned int intsize, 1142 unsigned long *out_hwirq, 1143 unsigned int *out_type) 1144 { 1145 unsigned int ciu, bit; 1146 struct octeon_irq_ciu_domain_data *dd = d->host_data; 1147 1148 ciu = intspec[0]; 1149 bit = intspec[1]; 1150 1151 if (ciu >= dd->num_sum || bit > 63) 1152 return -EINVAL; 1153 1154 *out_hwirq = (ciu << 6) | bit; 1155 *out_type = 0; 1156 1157 return 0; 1158 } 1159 1160 static struct irq_chip *octeon_irq_ciu_chip; 1161 static struct irq_chip *octeon_irq_ciu_chip_edge; 1162 static struct irq_chip *octeon_irq_gpio_chip; 1163 1164 static bool octeon_irq_virq_in_range(unsigned int virq) 1165 { 1166 /* We cannot let it overflow the mapping array. */ 1167 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) 1168 return true; 1169 1170 WARN_ONCE(true, "virq out of range %u.\n", virq); 1171 return false; 1172 } 1173 1174 static int octeon_irq_ciu_map(struct irq_domain *d, 1175 unsigned int virq, irq_hw_number_t hw) 1176 { 1177 int rv; 1178 unsigned int line = hw >> 6; 1179 unsigned int bit = hw & 63; 1180 struct octeon_irq_ciu_domain_data *dd = d->host_data; 1181 1182 if (!octeon_irq_virq_in_range(virq)) 1183 return -EINVAL; 1184 1185 /* Don't map irq if it is reserved for GPIO. */ 1186 if (line == 0 && bit >= 16 && bit <32) 1187 return 0; 1188 1189 if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) 1190 return -EINVAL; 1191 1192 if (line == 2) { 1193 if (octeon_irq_ciu_is_edge(line, bit)) 1194 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1195 &octeon_irq_chip_ciu_sum2_edge, 1196 handle_edge_irq); 1197 else 1198 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1199 &octeon_irq_chip_ciu_sum2, 1200 handle_level_irq); 1201 } else { 1202 if (octeon_irq_ciu_is_edge(line, bit)) 1203 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1204 octeon_irq_ciu_chip_edge, 1205 handle_edge_irq); 1206 else 1207 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1208 octeon_irq_ciu_chip, 1209 handle_level_irq); 1210 } 1211 return rv; 1212 } 1213 1214 static int octeon_irq_gpio_map(struct irq_domain *d, 1215 unsigned int virq, irq_hw_number_t hw) 1216 { 1217 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1218 unsigned int line, bit; 1219 int r; 1220 1221 if (!octeon_irq_virq_in_range(virq)) 1222 return -EINVAL; 1223 1224 line = (hw + gpiod->base_hwirq) >> 6; 1225 bit = (hw + gpiod->base_hwirq) & 63; 1226 if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1227 octeon_irq_ciu_to_irq[line][bit] != 0) 1228 return -EINVAL; 1229 1230 r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1231 octeon_irq_gpio_chip, octeon_irq_handle_trigger); 1232 return r; 1233 } 1234 1235 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1236 .map = octeon_irq_ciu_map, 1237 .unmap = octeon_irq_free_cd, 1238 .xlate = octeon_irq_ciu_xlat, 1239 }; 1240 1241 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1242 .map = octeon_irq_gpio_map, 1243 .unmap = octeon_irq_free_cd, 1244 .xlate = octeon_irq_gpio_xlat, 1245 }; 1246 1247 static void octeon_irq_ip2_ciu(void) 1248 { 1249 const unsigned long core_id = cvmx_get_core_num(); 1250 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1251 1252 ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); 1253 if (likely(ciu_sum)) { 1254 int bit = fls64(ciu_sum) - 1; 1255 int irq = octeon_irq_ciu_to_irq[0][bit]; 1256 if (likely(irq)) 1257 do_IRQ(irq); 1258 else 1259 spurious_interrupt(); 1260 } else { 1261 spurious_interrupt(); 1262 } 1263 } 1264 1265 static void octeon_irq_ip3_ciu(void) 1266 { 1267 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1268 1269 ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); 1270 if (likely(ciu_sum)) { 1271 int bit = fls64(ciu_sum) - 1; 1272 int irq = octeon_irq_ciu_to_irq[1][bit]; 1273 if (likely(irq)) 1274 do_IRQ(irq); 1275 else 1276 spurious_interrupt(); 1277 } else { 1278 spurious_interrupt(); 1279 } 1280 } 1281 1282 static void octeon_irq_ip4_ciu(void) 1283 { 1284 int coreid = cvmx_get_core_num(); 1285 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); 1286 u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); 1287 1288 ciu_sum &= ciu_en; 1289 if (likely(ciu_sum)) { 1290 int bit = fls64(ciu_sum) - 1; 1291 int irq = octeon_irq_ciu_to_irq[2][bit]; 1292 1293 if (likely(irq)) 1294 do_IRQ(irq); 1295 else 1296 spurious_interrupt(); 1297 } else { 1298 spurious_interrupt(); 1299 } 1300 } 1301 1302 static bool octeon_irq_use_ip4; 1303 1304 static void octeon_irq_local_enable_ip4(void *arg) 1305 { 1306 set_c0_status(STATUSF_IP4); 1307 } 1308 1309 static void octeon_irq_ip4_mask(void) 1310 { 1311 clear_c0_status(STATUSF_IP4); 1312 spurious_interrupt(); 1313 } 1314 1315 static void (*octeon_irq_ip2)(void); 1316 static void (*octeon_irq_ip3)(void); 1317 static void (*octeon_irq_ip4)(void); 1318 1319 void (*octeon_irq_setup_secondary)(void); 1320 1321 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) 1322 { 1323 octeon_irq_ip4 = h; 1324 octeon_irq_use_ip4 = true; 1325 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); 1326 } 1327 1328 static void octeon_irq_percpu_enable(void) 1329 { 1330 irq_cpu_online(); 1331 } 1332 1333 static void octeon_irq_init_ciu_percpu(void) 1334 { 1335 int coreid = cvmx_get_core_num(); 1336 1337 1338 __this_cpu_write(octeon_irq_ciu0_en_mirror, 0); 1339 __this_cpu_write(octeon_irq_ciu1_en_mirror, 0); 1340 wmb(); 1341 raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); 1342 /* 1343 * Disable All CIU Interrupts. The ones we need will be 1344 * enabled later. Read the SUM register so we know the write 1345 * completed. 1346 */ 1347 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 1348 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 1349 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 1350 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 1351 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 1352 } 1353 1354 static void octeon_irq_init_ciu2_percpu(void) 1355 { 1356 u64 regx, ipx; 1357 int coreid = cvmx_get_core_num(); 1358 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); 1359 1360 /* 1361 * Disable All CIU2 Interrupts. The ones we need will be 1362 * enabled later. Read the SUM register so we know the write 1363 * completed. 1364 * 1365 * There are 9 registers and 3 IPX levels with strides 0x1000 1366 * and 0x200 respectivly. Use loops to clear them. 1367 */ 1368 for (regx = 0; regx <= 0x8000; regx += 0x1000) { 1369 for (ipx = 0; ipx <= 0x400; ipx += 0x200) 1370 cvmx_write_csr(base + regx + ipx, 0); 1371 } 1372 1373 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); 1374 } 1375 1376 static void octeon_irq_setup_secondary_ciu(void) 1377 { 1378 octeon_irq_init_ciu_percpu(); 1379 octeon_irq_percpu_enable(); 1380 1381 /* Enable the CIU lines */ 1382 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1383 if (octeon_irq_use_ip4) 1384 set_c0_status(STATUSF_IP4); 1385 else 1386 clear_c0_status(STATUSF_IP4); 1387 } 1388 1389 static void octeon_irq_setup_secondary_ciu2(void) 1390 { 1391 octeon_irq_init_ciu2_percpu(); 1392 octeon_irq_percpu_enable(); 1393 1394 /* Enable the CIU lines */ 1395 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1396 if (octeon_irq_use_ip4) 1397 set_c0_status(STATUSF_IP4); 1398 else 1399 clear_c0_status(STATUSF_IP4); 1400 } 1401 1402 static int __init octeon_irq_init_ciu( 1403 struct device_node *ciu_node, struct device_node *parent) 1404 { 1405 unsigned int i, r; 1406 struct irq_chip *chip; 1407 struct irq_chip *chip_edge; 1408 struct irq_chip *chip_mbox; 1409 struct irq_chip *chip_wd; 1410 struct irq_domain *ciu_domain = NULL; 1411 struct octeon_irq_ciu_domain_data *dd; 1412 1413 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 1414 if (!dd) 1415 return -ENOMEM; 1416 1417 octeon_irq_init_ciu_percpu(); 1418 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1419 1420 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1421 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1422 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) 1423 && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { 1424 octeon_irq_ip4 = octeon_irq_ip4_ciu; 1425 dd->num_sum = 3; 1426 octeon_irq_use_ip4 = true; 1427 } else { 1428 octeon_irq_ip4 = octeon_irq_ip4_mask; 1429 dd->num_sum = 2; 1430 octeon_irq_use_ip4 = false; 1431 } 1432 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1433 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1434 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1435 OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { 1436 chip = &octeon_irq_chip_ciu_v2; 1437 chip_edge = &octeon_irq_chip_ciu_v2_edge; 1438 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1439 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1440 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1441 } else { 1442 chip = &octeon_irq_chip_ciu; 1443 chip_edge = &octeon_irq_chip_ciu_edge; 1444 chip_mbox = &octeon_irq_chip_ciu_mbox; 1445 chip_wd = &octeon_irq_chip_ciu_wd; 1446 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1447 } 1448 octeon_irq_ciu_chip = chip; 1449 octeon_irq_ciu_chip_edge = chip_edge; 1450 1451 /* Mips internal */ 1452 octeon_irq_init_core(); 1453 1454 ciu_domain = irq_domain_add_tree( 1455 ciu_node, &octeon_irq_domain_ciu_ops, dd); 1456 irq_set_default_host(ciu_domain); 1457 1458 /* CIU_0 */ 1459 for (i = 0; i < 16; i++) { 1460 r = octeon_irq_force_ciu_mapping( 1461 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1462 if (r) 1463 goto err; 1464 } 1465 1466 r = octeon_irq_set_ciu_mapping( 1467 OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1468 if (r) 1469 goto err; 1470 r = octeon_irq_set_ciu_mapping( 1471 OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1472 if (r) 1473 goto err; 1474 1475 for (i = 0; i < 4; i++) { 1476 r = octeon_irq_force_ciu_mapping( 1477 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1478 if (r) 1479 goto err; 1480 } 1481 for (i = 0; i < 4; i++) { 1482 r = octeon_irq_force_ciu_mapping( 1483 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1484 if (r) 1485 goto err; 1486 } 1487 1488 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1489 if (r) 1490 goto err; 1491 1492 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1493 if (r) 1494 goto err; 1495 1496 for (i = 0; i < 4; i++) { 1497 r = octeon_irq_force_ciu_mapping( 1498 ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1499 if (r) 1500 goto err; 1501 } 1502 1503 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1504 if (r) 1505 goto err; 1506 1507 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1508 if (r) 1509 goto err; 1510 1511 /* CIU_1 */ 1512 for (i = 0; i < 16; i++) { 1513 r = octeon_irq_set_ciu_mapping( 1514 i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, 1515 handle_level_irq); 1516 if (r) 1517 goto err; 1518 } 1519 1520 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1521 if (r) 1522 goto err; 1523 1524 /* Enable the CIU lines */ 1525 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1526 if (octeon_irq_use_ip4) 1527 set_c0_status(STATUSF_IP4); 1528 else 1529 clear_c0_status(STATUSF_IP4); 1530 1531 return 0; 1532 err: 1533 return r; 1534 } 1535 1536 static int __init octeon_irq_init_gpio( 1537 struct device_node *gpio_node, struct device_node *parent) 1538 { 1539 struct octeon_irq_gpio_domain_data *gpiod; 1540 u32 interrupt_cells; 1541 unsigned int base_hwirq; 1542 int r; 1543 1544 r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); 1545 if (r) 1546 return r; 1547 1548 if (interrupt_cells == 1) { 1549 u32 v; 1550 1551 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); 1552 if (r) { 1553 pr_warn("No \"interrupts\" property.\n"); 1554 return r; 1555 } 1556 base_hwirq = v; 1557 } else if (interrupt_cells == 2) { 1558 u32 v0, v1; 1559 1560 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); 1561 if (r) { 1562 pr_warn("No \"interrupts\" property.\n"); 1563 return r; 1564 } 1565 r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); 1566 if (r) { 1567 pr_warn("No \"interrupts\" property.\n"); 1568 return r; 1569 } 1570 base_hwirq = (v0 << 6) | v1; 1571 } else { 1572 pr_warn("Bad \"#interrupt-cells\" property: %u\n", 1573 interrupt_cells); 1574 return -EINVAL; 1575 } 1576 1577 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1578 if (gpiod) { 1579 /* gpio domain host_data is the base hwirq number. */ 1580 gpiod->base_hwirq = base_hwirq; 1581 irq_domain_add_linear( 1582 gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1583 } else { 1584 pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1585 return -ENOMEM; 1586 } 1587 1588 return 0; 1589 } 1590 /* 1591 * Watchdog interrupts are special. They are associated with a single 1592 * core, so we hardwire the affinity to that core. 1593 */ 1594 static void octeon_irq_ciu2_wd_enable(struct irq_data *data) 1595 { 1596 u64 mask; 1597 u64 en_addr; 1598 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1599 struct octeon_ciu_chip_data *cd; 1600 1601 cd = irq_data_get_irq_chip_data(data); 1602 mask = 1ull << (cd->bit); 1603 1604 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1605 (0x1000ull * cd->line); 1606 cvmx_write_csr(en_addr, mask); 1607 1608 } 1609 1610 static void octeon_irq_ciu2_enable(struct irq_data *data) 1611 { 1612 u64 mask; 1613 u64 en_addr; 1614 int cpu = next_cpu_for_irq(data); 1615 int coreid = octeon_coreid_for_cpu(cpu); 1616 struct octeon_ciu_chip_data *cd; 1617 1618 cd = irq_data_get_irq_chip_data(data); 1619 mask = 1ull << (cd->bit); 1620 1621 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1622 (0x1000ull * cd->line); 1623 cvmx_write_csr(en_addr, mask); 1624 } 1625 1626 static void octeon_irq_ciu2_enable_local(struct irq_data *data) 1627 { 1628 u64 mask; 1629 u64 en_addr; 1630 int coreid = cvmx_get_core_num(); 1631 struct octeon_ciu_chip_data *cd; 1632 1633 cd = irq_data_get_irq_chip_data(data); 1634 mask = 1ull << (cd->bit); 1635 1636 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1637 (0x1000ull * cd->line); 1638 cvmx_write_csr(en_addr, mask); 1639 1640 } 1641 1642 static void octeon_irq_ciu2_disable_local(struct irq_data *data) 1643 { 1644 u64 mask; 1645 u64 en_addr; 1646 int coreid = cvmx_get_core_num(); 1647 struct octeon_ciu_chip_data *cd; 1648 1649 cd = irq_data_get_irq_chip_data(data); 1650 mask = 1ull << (cd->bit); 1651 1652 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + 1653 (0x1000ull * cd->line); 1654 cvmx_write_csr(en_addr, mask); 1655 1656 } 1657 1658 static void octeon_irq_ciu2_ack(struct irq_data *data) 1659 { 1660 u64 mask; 1661 u64 en_addr; 1662 int coreid = cvmx_get_core_num(); 1663 struct octeon_ciu_chip_data *cd; 1664 1665 cd = irq_data_get_irq_chip_data(data); 1666 mask = 1ull << (cd->bit); 1667 1668 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); 1669 cvmx_write_csr(en_addr, mask); 1670 1671 } 1672 1673 static void octeon_irq_ciu2_disable_all(struct irq_data *data) 1674 { 1675 int cpu; 1676 u64 mask; 1677 struct octeon_ciu_chip_data *cd; 1678 1679 cd = irq_data_get_irq_chip_data(data); 1680 mask = 1ull << (cd->bit); 1681 1682 for_each_online_cpu(cpu) { 1683 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1684 octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); 1685 cvmx_write_csr(en_addr, mask); 1686 } 1687 } 1688 1689 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) 1690 { 1691 int cpu; 1692 u64 mask; 1693 1694 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1695 1696 for_each_online_cpu(cpu) { 1697 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( 1698 octeon_coreid_for_cpu(cpu)); 1699 cvmx_write_csr(en_addr, mask); 1700 } 1701 } 1702 1703 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) 1704 { 1705 int cpu; 1706 u64 mask; 1707 1708 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1709 1710 for_each_online_cpu(cpu) { 1711 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( 1712 octeon_coreid_for_cpu(cpu)); 1713 cvmx_write_csr(en_addr, mask); 1714 } 1715 } 1716 1717 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) 1718 { 1719 u64 mask; 1720 u64 en_addr; 1721 int coreid = cvmx_get_core_num(); 1722 1723 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1724 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); 1725 cvmx_write_csr(en_addr, mask); 1726 } 1727 1728 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) 1729 { 1730 u64 mask; 1731 u64 en_addr; 1732 int coreid = cvmx_get_core_num(); 1733 1734 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1735 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); 1736 cvmx_write_csr(en_addr, mask); 1737 } 1738 1739 #ifdef CONFIG_SMP 1740 static int octeon_irq_ciu2_set_affinity(struct irq_data *data, 1741 const struct cpumask *dest, bool force) 1742 { 1743 int cpu; 1744 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1745 u64 mask; 1746 struct octeon_ciu_chip_data *cd; 1747 1748 if (!enable_one) 1749 return 0; 1750 1751 cd = irq_data_get_irq_chip_data(data); 1752 mask = 1ull << cd->bit; 1753 1754 for_each_online_cpu(cpu) { 1755 u64 en_addr; 1756 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1757 enable_one = false; 1758 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( 1759 octeon_coreid_for_cpu(cpu)) + 1760 (0x1000ull * cd->line); 1761 } else { 1762 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1763 octeon_coreid_for_cpu(cpu)) + 1764 (0x1000ull * cd->line); 1765 } 1766 cvmx_write_csr(en_addr, mask); 1767 } 1768 1769 return 0; 1770 } 1771 #endif 1772 1773 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) 1774 { 1775 octeon_irq_gpio_setup(data); 1776 octeon_irq_ciu2_enable(data); 1777 } 1778 1779 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1780 { 1781 struct octeon_ciu_chip_data *cd; 1782 1783 cd = irq_data_get_irq_chip_data(data); 1784 1785 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 1786 1787 octeon_irq_ciu2_disable_all(data); 1788 } 1789 1790 static struct irq_chip octeon_irq_chip_ciu2 = { 1791 .name = "CIU2-E", 1792 .irq_enable = octeon_irq_ciu2_enable, 1793 .irq_disable = octeon_irq_ciu2_disable_all, 1794 .irq_mask = octeon_irq_ciu2_disable_local, 1795 .irq_unmask = octeon_irq_ciu2_enable, 1796 #ifdef CONFIG_SMP 1797 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1798 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1799 #endif 1800 }; 1801 1802 static struct irq_chip octeon_irq_chip_ciu2_edge = { 1803 .name = "CIU2-E", 1804 .irq_enable = octeon_irq_ciu2_enable, 1805 .irq_disable = octeon_irq_ciu2_disable_all, 1806 .irq_ack = octeon_irq_ciu2_ack, 1807 .irq_mask = octeon_irq_ciu2_disable_local, 1808 .irq_unmask = octeon_irq_ciu2_enable, 1809 #ifdef CONFIG_SMP 1810 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1811 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1812 #endif 1813 }; 1814 1815 static struct irq_chip octeon_irq_chip_ciu2_mbox = { 1816 .name = "CIU2-M", 1817 .irq_enable = octeon_irq_ciu2_mbox_enable_all, 1818 .irq_disable = octeon_irq_ciu2_mbox_disable_all, 1819 .irq_ack = octeon_irq_ciu2_mbox_disable_local, 1820 .irq_eoi = octeon_irq_ciu2_mbox_enable_local, 1821 1822 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, 1823 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, 1824 .flags = IRQCHIP_ONOFFLINE_ENABLED, 1825 }; 1826 1827 static struct irq_chip octeon_irq_chip_ciu2_wd = { 1828 .name = "CIU2-W", 1829 .irq_enable = octeon_irq_ciu2_wd_enable, 1830 .irq_disable = octeon_irq_ciu2_disable_all, 1831 .irq_mask = octeon_irq_ciu2_disable_local, 1832 .irq_unmask = octeon_irq_ciu2_enable_local, 1833 }; 1834 1835 static struct irq_chip octeon_irq_chip_ciu2_gpio = { 1836 .name = "CIU-GPIO", 1837 .irq_enable = octeon_irq_ciu2_enable_gpio, 1838 .irq_disable = octeon_irq_ciu2_disable_gpio, 1839 .irq_ack = octeon_irq_ciu_gpio_ack, 1840 .irq_mask = octeon_irq_ciu2_disable_local, 1841 .irq_unmask = octeon_irq_ciu2_enable, 1842 .irq_set_type = octeon_irq_ciu_gpio_set_type, 1843 #ifdef CONFIG_SMP 1844 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1845 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1846 #endif 1847 .flags = IRQCHIP_SET_TYPE_MASKED, 1848 }; 1849 1850 static int octeon_irq_ciu2_xlat(struct irq_domain *d, 1851 struct device_node *node, 1852 const u32 *intspec, 1853 unsigned int intsize, 1854 unsigned long *out_hwirq, 1855 unsigned int *out_type) 1856 { 1857 unsigned int ciu, bit; 1858 1859 ciu = intspec[0]; 1860 bit = intspec[1]; 1861 1862 *out_hwirq = (ciu << 6) | bit; 1863 *out_type = 0; 1864 1865 return 0; 1866 } 1867 1868 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) 1869 { 1870 bool edge = false; 1871 1872 if (line == 3) /* MIO */ 1873 switch (bit) { 1874 case 2: /* IPD_DRP */ 1875 case 8 ... 11: /* Timers */ 1876 case 48: /* PTP */ 1877 edge = true; 1878 break; 1879 default: 1880 break; 1881 } 1882 else if (line == 6) /* PKT */ 1883 switch (bit) { 1884 case 52 ... 53: /* ILK_DRP */ 1885 case 8 ... 12: /* GMX_DRP */ 1886 edge = true; 1887 break; 1888 default: 1889 break; 1890 } 1891 return edge; 1892 } 1893 1894 static int octeon_irq_ciu2_map(struct irq_domain *d, 1895 unsigned int virq, irq_hw_number_t hw) 1896 { 1897 unsigned int line = hw >> 6; 1898 unsigned int bit = hw & 63; 1899 1900 if (!octeon_irq_virq_in_range(virq)) 1901 return -EINVAL; 1902 1903 /* 1904 * Don't map irq if it is reserved for GPIO. 1905 * (Line 7 are the GPIO lines.) 1906 */ 1907 if (line == 7) 1908 return 0; 1909 1910 if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) 1911 return -EINVAL; 1912 1913 if (octeon_irq_ciu2_is_edge(line, bit)) 1914 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1915 &octeon_irq_chip_ciu2_edge, 1916 handle_edge_irq); 1917 else 1918 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1919 &octeon_irq_chip_ciu2, 1920 handle_level_irq); 1921 1922 return 0; 1923 } 1924 1925 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1926 .map = octeon_irq_ciu2_map, 1927 .unmap = octeon_irq_free_cd, 1928 .xlate = octeon_irq_ciu2_xlat, 1929 }; 1930 1931 static void octeon_irq_ciu2(void) 1932 { 1933 int line; 1934 int bit; 1935 int irq; 1936 u64 src_reg, src, sum; 1937 const unsigned long core_id = cvmx_get_core_num(); 1938 1939 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; 1940 1941 if (unlikely(!sum)) 1942 goto spurious; 1943 1944 line = fls64(sum) - 1; 1945 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); 1946 src = cvmx_read_csr(src_reg); 1947 1948 if (unlikely(!src)) 1949 goto spurious; 1950 1951 bit = fls64(src) - 1; 1952 irq = octeon_irq_ciu_to_irq[line][bit]; 1953 if (unlikely(!irq)) 1954 goto spurious; 1955 1956 do_IRQ(irq); 1957 goto out; 1958 1959 spurious: 1960 spurious_interrupt(); 1961 out: 1962 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1963 can stop interrupts from propagating */ 1964 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1965 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1966 else 1967 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); 1968 return; 1969 } 1970 1971 static void octeon_irq_ciu2_mbox(void) 1972 { 1973 int line; 1974 1975 const unsigned long core_id = cvmx_get_core_num(); 1976 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; 1977 1978 if (unlikely(!sum)) 1979 goto spurious; 1980 1981 line = fls64(sum) - 1; 1982 1983 do_IRQ(OCTEON_IRQ_MBOX0 + line); 1984 goto out; 1985 1986 spurious: 1987 spurious_interrupt(); 1988 out: 1989 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1990 can stop interrupts from propagating */ 1991 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1992 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1993 else 1994 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); 1995 return; 1996 } 1997 1998 static int __init octeon_irq_init_ciu2( 1999 struct device_node *ciu_node, struct device_node *parent) 2000 { 2001 unsigned int i, r; 2002 struct irq_domain *ciu_domain = NULL; 2003 2004 octeon_irq_init_ciu2_percpu(); 2005 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 2006 2007 octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; 2008 octeon_irq_ip2 = octeon_irq_ciu2; 2009 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 2010 octeon_irq_ip4 = octeon_irq_ip4_mask; 2011 2012 /* Mips internal */ 2013 octeon_irq_init_core(); 2014 2015 ciu_domain = irq_domain_add_tree( 2016 ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 2017 irq_set_default_host(ciu_domain); 2018 2019 /* CUI2 */ 2020 for (i = 0; i < 64; i++) { 2021 r = octeon_irq_force_ciu_mapping( 2022 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 2023 if (r) 2024 goto err; 2025 } 2026 2027 for (i = 0; i < 32; i++) { 2028 r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 2029 &octeon_irq_chip_ciu2_wd, handle_level_irq); 2030 if (r) 2031 goto err; 2032 } 2033 2034 for (i = 0; i < 4; i++) { 2035 r = octeon_irq_force_ciu_mapping( 2036 ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 2037 if (r) 2038 goto err; 2039 } 2040 2041 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 2042 if (r) 2043 goto err; 2044 2045 for (i = 0; i < 4; i++) { 2046 r = octeon_irq_force_ciu_mapping( 2047 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 2048 if (r) 2049 goto err; 2050 } 2051 2052 for (i = 0; i < 4; i++) { 2053 r = octeon_irq_force_ciu_mapping( 2054 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 2055 if (r) 2056 goto err; 2057 } 2058 2059 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2060 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2061 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2062 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2063 2064 /* Enable the CIU lines */ 2065 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 2066 clear_c0_status(STATUSF_IP4); 2067 return 0; 2068 err: 2069 return r; 2070 } 2071 2072 struct octeon_irq_cib_host_data { 2073 raw_spinlock_t lock; 2074 u64 raw_reg; 2075 u64 en_reg; 2076 int max_bits; 2077 }; 2078 2079 struct octeon_irq_cib_chip_data { 2080 struct octeon_irq_cib_host_data *host_data; 2081 int bit; 2082 }; 2083 2084 static void octeon_irq_cib_enable(struct irq_data *data) 2085 { 2086 unsigned long flags; 2087 u64 en; 2088 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 2089 struct octeon_irq_cib_host_data *host_data = cd->host_data; 2090 2091 raw_spin_lock_irqsave(&host_data->lock, flags); 2092 en = cvmx_read_csr(host_data->en_reg); 2093 en |= 1ull << cd->bit; 2094 cvmx_write_csr(host_data->en_reg, en); 2095 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2096 } 2097 2098 static void octeon_irq_cib_disable(struct irq_data *data) 2099 { 2100 unsigned long flags; 2101 u64 en; 2102 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 2103 struct octeon_irq_cib_host_data *host_data = cd->host_data; 2104 2105 raw_spin_lock_irqsave(&host_data->lock, flags); 2106 en = cvmx_read_csr(host_data->en_reg); 2107 en &= ~(1ull << cd->bit); 2108 cvmx_write_csr(host_data->en_reg, en); 2109 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2110 } 2111 2112 static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) 2113 { 2114 irqd_set_trigger_type(data, t); 2115 return IRQ_SET_MASK_OK; 2116 } 2117 2118 static struct irq_chip octeon_irq_chip_cib = { 2119 .name = "CIB", 2120 .irq_enable = octeon_irq_cib_enable, 2121 .irq_disable = octeon_irq_cib_disable, 2122 .irq_mask = octeon_irq_cib_disable, 2123 .irq_unmask = octeon_irq_cib_enable, 2124 .irq_set_type = octeon_irq_cib_set_type, 2125 }; 2126 2127 static int octeon_irq_cib_xlat(struct irq_domain *d, 2128 struct device_node *node, 2129 const u32 *intspec, 2130 unsigned int intsize, 2131 unsigned long *out_hwirq, 2132 unsigned int *out_type) 2133 { 2134 unsigned int type = 0; 2135 2136 if (intsize == 2) 2137 type = intspec[1]; 2138 2139 switch (type) { 2140 case 0: /* unofficial value, but we might as well let it work. */ 2141 case 4: /* official value for level triggering. */ 2142 *out_type = IRQ_TYPE_LEVEL_HIGH; 2143 break; 2144 case 1: /* official value for edge triggering. */ 2145 *out_type = IRQ_TYPE_EDGE_RISING; 2146 break; 2147 default: /* Nothing else is acceptable. */ 2148 return -EINVAL; 2149 } 2150 2151 *out_hwirq = intspec[0]; 2152 2153 return 0; 2154 } 2155 2156 static int octeon_irq_cib_map(struct irq_domain *d, 2157 unsigned int virq, irq_hw_number_t hw) 2158 { 2159 struct octeon_irq_cib_host_data *host_data = d->host_data; 2160 struct octeon_irq_cib_chip_data *cd; 2161 2162 if (hw >= host_data->max_bits) { 2163 pr_err("ERROR: %s mapping %u is to big!\n", 2164 d->of_node->name, (unsigned)hw); 2165 return -EINVAL; 2166 } 2167 2168 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 2169 cd->host_data = host_data; 2170 cd->bit = hw; 2171 2172 irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, 2173 handle_simple_irq); 2174 irq_set_chip_data(virq, cd); 2175 return 0; 2176 } 2177 2178 static struct irq_domain_ops octeon_irq_domain_cib_ops = { 2179 .map = octeon_irq_cib_map, 2180 .unmap = octeon_irq_free_cd, 2181 .xlate = octeon_irq_cib_xlat, 2182 }; 2183 2184 /* Chain to real handler. */ 2185 static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) 2186 { 2187 u64 en; 2188 u64 raw; 2189 u64 bits; 2190 int i; 2191 int irq; 2192 struct irq_domain *cib_domain = data; 2193 struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; 2194 2195 en = cvmx_read_csr(host_data->en_reg); 2196 raw = cvmx_read_csr(host_data->raw_reg); 2197 2198 bits = en & raw; 2199 2200 for (i = 0; i < host_data->max_bits; i++) { 2201 if ((bits & 1ull << i) == 0) 2202 continue; 2203 irq = irq_find_mapping(cib_domain, i); 2204 if (!irq) { 2205 unsigned long flags; 2206 2207 pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", 2208 i, host_data->raw_reg); 2209 raw_spin_lock_irqsave(&host_data->lock, flags); 2210 en = cvmx_read_csr(host_data->en_reg); 2211 en &= ~(1ull << i); 2212 cvmx_write_csr(host_data->en_reg, en); 2213 cvmx_write_csr(host_data->raw_reg, 1ull << i); 2214 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2215 } else { 2216 struct irq_desc *desc = irq_to_desc(irq); 2217 struct irq_data *irq_data = irq_desc_get_irq_data(desc); 2218 /* If edge, acknowledge the bit we will be sending. */ 2219 if (irqd_get_trigger_type(irq_data) & 2220 IRQ_TYPE_EDGE_BOTH) 2221 cvmx_write_csr(host_data->raw_reg, 1ull << i); 2222 generic_handle_irq_desc(irq, desc); 2223 } 2224 } 2225 2226 return IRQ_HANDLED; 2227 } 2228 2229 static int __init octeon_irq_init_cib(struct device_node *ciu_node, 2230 struct device_node *parent) 2231 { 2232 const __be32 *addr; 2233 u32 val; 2234 struct octeon_irq_cib_host_data *host_data; 2235 int parent_irq; 2236 int r; 2237 struct irq_domain *cib_domain; 2238 2239 parent_irq = irq_of_parse_and_map(ciu_node, 0); 2240 if (!parent_irq) { 2241 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", 2242 ciu_node->name); 2243 return -EINVAL; 2244 } 2245 2246 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 2247 raw_spin_lock_init(&host_data->lock); 2248 2249 addr = of_get_address(ciu_node, 0, NULL, NULL); 2250 if (!addr) { 2251 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); 2252 return -EINVAL; 2253 } 2254 host_data->raw_reg = (u64)phys_to_virt( 2255 of_translate_address(ciu_node, addr)); 2256 2257 addr = of_get_address(ciu_node, 1, NULL, NULL); 2258 if (!addr) { 2259 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); 2260 return -EINVAL; 2261 } 2262 host_data->en_reg = (u64)phys_to_virt( 2263 of_translate_address(ciu_node, addr)); 2264 2265 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); 2266 if (r) { 2267 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", 2268 ciu_node->name); 2269 return r; 2270 } 2271 host_data->max_bits = val; 2272 2273 cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, 2274 &octeon_irq_domain_cib_ops, 2275 host_data); 2276 if (!cib_domain) { 2277 pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); 2278 return -ENOMEM; 2279 } 2280 2281 cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ 2282 cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ 2283 2284 r = request_irq(parent_irq, octeon_irq_cib_handler, 2285 IRQF_NO_THREAD, "cib", cib_domain); 2286 if (r) { 2287 pr_err("request_irq cib failed %d\n", r); 2288 return r; 2289 } 2290 pr_info("CIB interrupt controller probed: %llx %d\n", 2291 host_data->raw_reg, host_data->max_bits); 2292 return 0; 2293 } 2294 2295 static struct of_device_id ciu_types[] __initdata = { 2296 {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, 2297 {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, 2298 {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, 2299 {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, 2300 {} 2301 }; 2302 2303 void __init arch_init_irq(void) 2304 { 2305 #ifdef CONFIG_SMP 2306 /* Set the default affinity to the boot cpu. */ 2307 cpumask_clear(irq_default_affinity); 2308 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 2309 #endif 2310 of_irq_init(ciu_types); 2311 } 2312 2313 asmlinkage void plat_irq_dispatch(void) 2314 { 2315 unsigned long cop0_cause; 2316 unsigned long cop0_status; 2317 2318 while (1) { 2319 cop0_cause = read_c0_cause(); 2320 cop0_status = read_c0_status(); 2321 cop0_cause &= cop0_status; 2322 cop0_cause &= ST0_IM; 2323 2324 if (cop0_cause & STATUSF_IP2) 2325 octeon_irq_ip2(); 2326 else if (cop0_cause & STATUSF_IP3) 2327 octeon_irq_ip3(); 2328 else if (cop0_cause & STATUSF_IP4) 2329 octeon_irq_ip4(); 2330 else if (cop0_cause) 2331 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 2332 else 2333 break; 2334 } 2335 } 2336 2337 #ifdef CONFIG_HOTPLUG_CPU 2338 2339 void octeon_fixup_irqs(void) 2340 { 2341 irq_cpu_offline(); 2342 } 2343 2344 #endif /* CONFIG_HOTPLUG_CPU */ 2345