1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Loongson Extend I/O Interrupt Controller support 4 * 5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6 */ 7 8 #define pr_fmt(fmt) "eiointc: " fmt 9 10 #include <linux/cpuhotplug.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/irqchip.h> 14 #include <linux/irqdomain.h> 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/kernel.h> 17 #include <linux/kvm_para.h> 18 #include <linux/syscore_ops.h> 19 #include <asm/numa.h> 20 21 #include "irq-loongson.h" 22 23 #define EIOINTC_REG_NODEMAP 0x14a0 24 #define EIOINTC_REG_IPMAP 0x14c0 25 #define EIOINTC_REG_ENABLE 0x1600 26 #define EIOINTC_REG_BOUNCE 0x1680 27 #define EIOINTC_REG_ISR 0x1800 28 #define EIOINTC_REG_ROUTE 0x1c00 29 30 #define EXTIOI_VIRT_FEATURES 0x40000000 31 #define EXTIOI_HAS_VIRT_EXTENSION BIT(0) 32 #define EXTIOI_HAS_ENABLE_OPTION BIT(1) 33 #define EXTIOI_HAS_INT_ENCODE BIT(2) 34 #define EXTIOI_HAS_CPU_ENCODE BIT(3) 35 #define EXTIOI_VIRT_CONFIG 0x40000004 36 #define EXTIOI_ENABLE BIT(1) 37 #define EXTIOI_ENABLE_INT_ENCODE BIT(2) 38 #define EXTIOI_ENABLE_CPU_ENCODE BIT(3) 39 40 #define VEC_COUNT 256 41 #define VEC_COUNT_PER_REG BITS_PER_LONG 42 #define VEC_REG_COUNT (VEC_COUNT / BITS_PER_LONG) 43 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) 44 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) 45 #define EIOINTC_ALL_ENABLE 0xffffffff 46 #define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f)) 47 #define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2)) 48 #define EIOINTC_USE_CPU_ENCODE BIT(0) 49 #define EIOINTC_ROUTE_MULT_IP BIT(1) 50 51 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) 52 53 /* 54 * Routing registers are 32bit, and there is 8-bit route setting for every 55 * interrupt vector. So one Route register contains four vectors routing 56 * information. 57 */ 58 #define EIOINTC_REG_ROUTE_VEC(vector) (EIOINTC_REG_ROUTE + (vector & ~0x03)) 59 #define EIOINTC_REG_ROUTE_VEC_SHIFT(vector) ((vector & 0x03) << 3) 60 #define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector)) 61 62 static int nr_pics; 63 struct eiointc_priv; 64 65 struct eiointc_ip_route { 66 struct eiointc_priv *priv; 67 /* Offset Routed destination IP */ 68 int start; 69 int end; 70 }; 71 72 struct eiointc_priv { 73 u32 node; 74 u32 vec_count; 75 nodemask_t node_map; 76 cpumask_t cpuspan_map; 77 struct fwnode_handle *domain_handle; 78 struct irq_domain *eiointc_domain; 79 int flags; 80 irq_hw_number_t parent_hwirq; 81 struct eiointc_ip_route route_info[VEC_REG_COUNT]; 82 }; 83 84 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; 85 86 static void eiointc_enable(void) 87 { 88 #ifdef CONFIG_MACH_LOONGSON64 89 uint64_t misc; 90 91 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 92 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; 93 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); 94 #endif 95 } 96 97 static int cpu_to_eio_node(int cpu) 98 { 99 if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) 100 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 101 else 102 return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE; 103 } 104 105 #ifdef CONFIG_SMP 106 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) 107 { 108 int i, node, cpu_node, route_node; 109 unsigned char coremap; 110 uint32_t pos_off, data, data_byte, data_mask; 111 112 pos_off = pos & ~3; 113 data_byte = pos & 3; 114 data_mask = ~BIT_MASK(data_byte) & 0xf; 115 116 /* Calculate node and coremap of target irq */ 117 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 118 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); 119 120 for_each_online_cpu(i) { 121 node = cpu_to_eio_node(i); 122 if (!node_isset(node, *node_map)) 123 continue; 124 125 /* EIO node 0 is in charge of inter-node interrupt dispatch */ 126 route_node = (node == mnode) ? cpu_node : node; 127 data = ((coremap | (route_node << 4)) << (data_byte * 8)); 128 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); 129 } 130 } 131 132 static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu) 133 { 134 unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector); 135 unsigned int data; 136 137 data = iocsr_read32(reg); 138 data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector); 139 data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector); 140 iocsr_write32(data, reg); 141 } 142 143 static DEFINE_RAW_SPINLOCK(affinity_lock); 144 145 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) 146 { 147 unsigned int cpu; 148 unsigned long flags; 149 uint32_t vector, regaddr; 150 struct eiointc_priv *priv = d->domain->host_data; 151 152 raw_spin_lock_irqsave(&affinity_lock, flags); 153 154 cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask); 155 if (cpu >= nr_cpu_ids) { 156 raw_spin_unlock_irqrestore(&affinity_lock, flags); 157 return -EINVAL; 158 } 159 160 vector = d->hwirq; 161 regaddr = EIOINTC_REG_ENABLE_VEC(vector); 162 163 if (priv->flags & EIOINTC_USE_CPU_ENCODE) { 164 iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr); 165 veiointc_set_irq_route(vector, cpu); 166 iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); 167 } else { 168 /* Mask target vector */ 169 csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector), 170 0x0, priv->node * CORES_PER_EIO_NODE); 171 172 /* Set route for target vector */ 173 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); 174 175 /* Unmask target vector */ 176 csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 177 0x0, priv->node * CORES_PER_EIO_NODE); 178 } 179 180 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 181 182 raw_spin_unlock_irqrestore(&affinity_lock, flags); 183 184 return IRQ_SET_MASK_OK; 185 } 186 #endif 187 188 static int eiointc_index(int node) 189 { 190 int i; 191 192 for (i = 0; i < nr_pics; i++) { 193 if (node_isset(node, eiointc_priv[i]->node_map)) 194 return i; 195 } 196 197 return -1; 198 } 199 200 static int eiointc_router_init(unsigned int cpu) 201 { 202 int i, bit, cores, index, node; 203 unsigned int data; 204 int hwirq, mask; 205 206 node = cpu_to_eio_node(cpu); 207 index = eiointc_index(node); 208 209 if (index < 0) { 210 pr_err("Error: invalid nodemap!\n"); 211 return -EINVAL; 212 } 213 214 /* Enable cpu interrupt pin from eiointc */ 215 hwirq = eiointc_priv[index]->parent_hwirq; 216 mask = BIT(hwirq); 217 if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) 218 mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3); 219 set_csr_ecfg(mask); 220 221 if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)) 222 cores = CORES_PER_EIO_NODE; 223 else 224 cores = CORES_PER_VEIO_NODE; 225 226 if ((cpu_logical_map(cpu) % cores) == 0) { 227 eiointc_enable(); 228 229 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 230 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); 231 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); 232 } 233 234 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) { 235 /* 236 * Route to interrupt pin, relative offset used here 237 * Offset 0 means routing to IP0 and so on 238 * 239 * If flags is set with EIOINTC_ROUTE_MULT_IP, 240 * every 64 vector routes to different consecutive 241 * IPs, otherwise all vector routes to the same IP 242 */ 243 if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) { 244 /* The first 64 vectors route to hwirq */ 245 bit = BIT(hwirq++ - INT_HWI0); 246 data = bit | (bit << 8); 247 248 /* The second 64 vectors route to hwirq + 1 */ 249 bit = BIT(hwirq++ - INT_HWI0); 250 data |= (bit << 16) | (bit << 24); 251 252 /* 253 * Route to hwirq + 2/hwirq + 3 separately 254 * in next loop 255 */ 256 } else { 257 bit = BIT(hwirq - INT_HWI0); 258 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 259 } 260 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); 261 } 262 263 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { 264 /* Route to Node-0 Core-0 */ 265 if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE) 266 bit = cpu_logical_map(0); 267 else if (index == 0) 268 bit = BIT(cpu_logical_map(0)); 269 else 270 bit = (eiointc_priv[index]->node << 4) | 1; 271 272 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 273 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); 274 } 275 276 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { 277 data = 0xffffffff; 278 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); 279 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); 280 } 281 } 282 283 return 0; 284 } 285 286 #if VEC_COUNT_PER_REG == 32 287 static inline unsigned long read_isr(int i) 288 { 289 return iocsr_read32(EIOINTC_REG_ISR + (i << 2)); 290 } 291 292 static inline void write_isr(int i, unsigned long val) 293 { 294 iocsr_write32(val, EIOINTC_REG_ISR + (i << 2)); 295 } 296 #else 297 static inline unsigned long read_isr(int i) 298 { 299 return iocsr_read64(EIOINTC_REG_ISR + (i << 3)); 300 } 301 302 static inline void write_isr(int i, unsigned long val) 303 { 304 iocsr_write64(val, EIOINTC_REG_ISR + (i << 3)); 305 } 306 #endif 307 308 static void eiointc_irq_dispatch(struct irq_desc *desc) 309 { 310 struct eiointc_ip_route *info = irq_desc_get_handler_data(desc); 311 struct irq_chip *chip = irq_desc_get_chip(desc); 312 unsigned long pending; 313 bool handled = false; 314 int i; 315 316 chained_irq_enter(chip, desc); 317 318 /* 319 * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in 320 * eiointc interrupt controller routes to different cpu interrupt pins 321 * 322 * Every cpu interrupt pin has its own irq handler, it is ok to 323 * read ISR for these 64 interrupt vectors rather than all vectors 324 */ 325 for (i = info->start; i < info->end; i++) { 326 pending = read_isr(i); 327 328 /* Skip handling if pending bitmap is zero */ 329 if (!pending) 330 continue; 331 332 /* Clear the IRQs */ 333 write_isr(i, pending); 334 while (pending) { 335 int bit = __ffs(pending); 336 int irq = bit + VEC_COUNT_PER_REG * i; 337 338 generic_handle_domain_irq(info->priv->eiointc_domain, irq); 339 pending &= ~BIT(bit); 340 handled = true; 341 } 342 } 343 344 if (!handled) 345 spurious_interrupt(); 346 347 chained_irq_exit(chip, desc); 348 } 349 350 static void eiointc_ack_irq(struct irq_data *d) 351 { 352 } 353 354 static void eiointc_mask_irq(struct irq_data *d) 355 { 356 } 357 358 static void eiointc_unmask_irq(struct irq_data *d) 359 { 360 } 361 362 static struct irq_chip eiointc_irq_chip = { 363 .name = "EIOINTC", 364 .irq_ack = eiointc_ack_irq, 365 .irq_mask = eiointc_mask_irq, 366 .irq_unmask = eiointc_unmask_irq, 367 #ifdef CONFIG_SMP 368 .irq_set_affinity = eiointc_set_irq_affinity, 369 #endif 370 }; 371 372 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, 373 unsigned int nr_irqs, void *arg) 374 { 375 int ret; 376 unsigned int i, type; 377 unsigned long hwirq = 0; 378 struct eiointc_priv *priv = domain->host_data; 379 380 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); 381 if (ret) 382 return ret; 383 384 for (i = 0; i < nr_irqs; i++) { 385 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, 386 priv, handle_edge_irq, NULL, NULL); 387 } 388 389 return 0; 390 } 391 392 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, 393 unsigned int nr_irqs) 394 { 395 int i; 396 397 for (i = 0; i < nr_irqs; i++) { 398 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 399 400 irq_set_handler(virq + i, NULL); 401 irq_domain_reset_irq_data(d); 402 } 403 } 404 405 static const struct irq_domain_ops eiointc_domain_ops = { 406 .translate = irq_domain_translate_onecell, 407 .alloc = eiointc_domain_alloc, 408 .free = eiointc_domain_free, 409 }; 410 411 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) 412 { 413 int i; 414 415 for (i = 0; i < MAX_IO_PICS; i++) { 416 if (node == vec_group[i].node) { 417 vec_group[i].parent = parent; 418 return; 419 } 420 } 421 } 422 423 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) 424 { 425 int i; 426 427 for (i = 0; i < MAX_IO_PICS; i++) { 428 if (node == vec_group[i].node) 429 return vec_group[i].parent; 430 } 431 return NULL; 432 } 433 434 static int eiointc_suspend(void *data) 435 { 436 return 0; 437 } 438 439 static void eiointc_resume(void *data) 440 { 441 eiointc_router_init(0); 442 } 443 444 static const struct syscore_ops eiointc_syscore_ops = { 445 .suspend = eiointc_suspend, 446 .resume = eiointc_resume, 447 }; 448 449 static struct syscore eiointc_syscore = { 450 .ops = &eiointc_syscore_ops, 451 }; 452 453 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, 454 const unsigned long end) 455 { 456 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; 457 unsigned int node = (pchpic_entry->address >> 44) & 0xf; 458 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); 459 460 if (parent) 461 return pch_pic_acpi_init(parent, pchpic_entry); 462 463 return 0; 464 } 465 466 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, 467 const unsigned long end) 468 { 469 struct irq_domain *parent; 470 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 471 int node; 472 473 if (cpu_has_flatmode) 474 node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); 475 else 476 node = eiointc_priv[nr_pics - 1]->node; 477 478 parent = acpi_get_vec_parent(node, msi_group); 479 480 if (parent) 481 return pch_msi_acpi_init(parent, pchmsi_entry); 482 483 return 0; 484 } 485 486 static int __init acpi_cascade_irqdomain_init(void) 487 { 488 int r; 489 490 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0); 491 if (r < 0) 492 return r; 493 494 if (cpu_has_avecint) 495 return 0; 496 497 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 498 if (r < 0) 499 return r; 500 501 return 0; 502 } 503 504 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, 505 u64 node_map) 506 { 507 int i, val; 508 509 node_map = node_map ? node_map : -1ULL; 510 for_each_possible_cpu(i) { 511 if (node_map & (1ULL << (cpu_to_eio_node(i)))) { 512 node_set(cpu_to_eio_node(i), priv->node_map); 513 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, 514 cpumask_of(i)); 515 } 516 } 517 518 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, 519 priv->vec_count, 520 &eiointc_domain_ops, 521 priv); 522 if (!priv->eiointc_domain) { 523 pr_err("loongson-extioi: cannot add IRQ domain\n"); 524 return -ENOMEM; 525 } 526 527 if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) { 528 val = iocsr_read32(EXTIOI_VIRT_FEATURES); 529 /* 530 * With EXTIOI_ENABLE_CPU_ENCODE set 531 * interrupts can route to 256 vCPUs. 532 */ 533 if (val & EXTIOI_HAS_CPU_ENCODE) { 534 val = iocsr_read32(EXTIOI_VIRT_CONFIG); 535 val |= EXTIOI_ENABLE_CPU_ENCODE; 536 iocsr_write32(val, EXTIOI_VIRT_CONFIG); 537 priv->flags = EIOINTC_USE_CPU_ENCODE; 538 } 539 } 540 541 eiointc_priv[nr_pics++] = priv; 542 /* 543 * Only the first eiointc device on VM supports routing to 544 * different CPU interrupt pins. The later eiointc devices use 545 * generic method if there are multiple eiointc devices in future 546 */ 547 if (cpu_has_hypervisor && (nr_pics == 1)) { 548 priv->flags |= EIOINTC_ROUTE_MULT_IP; 549 priv->parent_hwirq = INT_HWI0; 550 } 551 552 if (priv->flags & EIOINTC_ROUTE_MULT_IP) { 553 for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) { 554 priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i; 555 priv->route_info[i].end = priv->route_info[i].start + 1; 556 priv->route_info[i].priv = priv; 557 parent_irq = get_percpu_irq(priv->parent_hwirq + i); 558 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, 559 &priv->route_info[i]); 560 } 561 } else { 562 priv->route_info[0].start = 0; 563 priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG; 564 priv->route_info[0].priv = priv; 565 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, 566 &priv->route_info[0]); 567 } 568 eiointc_router_init(0); 569 570 if (nr_pics == 1) { 571 register_syscore(&eiointc_syscore); 572 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, 573 "irqchip/loongarch/eiointc:starting", 574 eiointc_router_init, NULL); 575 } 576 577 return 0; 578 } 579 580 int __init eiointc_acpi_init(struct irq_domain *parent, 581 struct acpi_madt_eio_pic *acpi_eiointc) 582 { 583 int parent_irq, ret; 584 struct eiointc_priv *priv; 585 int node; 586 587 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 588 if (!priv) 589 return -ENOMEM; 590 591 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC", 592 acpi_eiointc->node); 593 if (!priv->domain_handle) { 594 pr_err("Unable to allocate domain handle\n"); 595 goto out_free_priv; 596 } 597 598 priv->vec_count = VEC_COUNT; 599 priv->node = acpi_eiointc->node; 600 priv->parent_hwirq = acpi_eiointc->cascade; 601 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); 602 603 ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map); 604 if (ret < 0) 605 goto out_free_handle; 606 607 if (cpu_has_flatmode) 608 node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); 609 else 610 node = acpi_eiointc->node; 611 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); 612 acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); 613 614 ret = acpi_cascade_irqdomain_init(); 615 if (ret < 0) 616 goto out_free_handle; 617 618 return ret; 619 620 out_free_handle: 621 irq_domain_free_fwnode(priv->domain_handle); 622 priv->domain_handle = NULL; 623 out_free_priv: 624 kfree(priv); 625 626 return -ENOMEM; 627 } 628 629 static int __init eiointc_of_init(struct device_node *of_node, 630 struct device_node *parent) 631 { 632 struct eiointc_priv *priv; 633 struct irq_data *irq_data; 634 int parent_irq, ret; 635 636 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 637 if (!priv) 638 return -ENOMEM; 639 640 parent_irq = irq_of_parse_and_map(of_node, 0); 641 if (parent_irq <= 0) { 642 ret = -ENODEV; 643 goto out_free_priv; 644 } 645 646 ret = irq_set_handler_data(parent_irq, priv); 647 if (ret < 0) 648 goto out_free_priv; 649 650 irq_data = irq_get_irq_data(parent_irq); 651 if (!irq_data) { 652 ret = -ENODEV; 653 goto out_free_priv; 654 } 655 656 /* 657 * In particular, the number of devices supported by the LS2K0500 658 * extended I/O interrupt vector is 128. 659 */ 660 if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc")) 661 priv->vec_count = 128; 662 else 663 priv->vec_count = VEC_COUNT; 664 priv->parent_hwirq = irqd_to_hwirq(irq_data); 665 priv->node = 0; 666 priv->domain_handle = of_fwnode_handle(of_node); 667 668 ret = eiointc_init(priv, parent_irq, 0); 669 if (ret < 0) 670 goto out_free_priv; 671 672 return 0; 673 674 out_free_priv: 675 kfree(priv); 676 return ret; 677 } 678 679 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init); 680 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init); 681