1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/irqdomain.h> 19 20 #include <trace/events/irq.h> 21 22 #include "internals.h" 23 24 /** 25 * irq_set_chip - set the irq chip for an irq 26 * @irq: irq number 27 * @chip: pointer to irq chip description structure 28 */ 29 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 30 { 31 unsigned long flags; 32 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 33 34 if (!desc) 35 return -EINVAL; 36 37 if (!chip) 38 chip = &no_irq_chip; 39 40 desc->irq_data.chip = chip; 41 irq_put_desc_unlock(desc, flags); 42 /* 43 * For !CONFIG_SPARSE_IRQ make the irq show up in 44 * allocated_irqs. 45 */ 46 irq_mark_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_common_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 94 * @irq_base: Interrupt number base 95 * @irq_offset: Interrupt number offset 96 * @entry: Pointer to MSI descriptor data 97 * 98 * Set the MSI descriptor entry for an irq at offset 99 */ 100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 101 struct msi_desc *entry) 102 { 103 unsigned long flags; 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 105 106 if (!desc) 107 return -EINVAL; 108 desc->irq_common_data.msi_desc = entry; 109 if (entry && !irq_offset) 110 entry->irq = irq_base; 111 irq_put_desc_unlock(desc, flags); 112 return 0; 113 } 114 115 /** 116 * irq_set_msi_desc - set MSI descriptor data for an irq 117 * @irq: Interrupt number 118 * @entry: Pointer to MSI descriptor data 119 * 120 * Set the MSI descriptor entry for an irq 121 */ 122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 123 { 124 return irq_set_msi_desc_off(irq, 0, entry); 125 } 126 127 /** 128 * irq_set_chip_data - set irq chip data for an irq 129 * @irq: Interrupt number 130 * @data: Pointer to chip specific data 131 * 132 * Set the hardware irq chip data for an irq 133 */ 134 int irq_set_chip_data(unsigned int irq, void *data) 135 { 136 unsigned long flags; 137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 138 139 if (!desc) 140 return -EINVAL; 141 desc->irq_data.chip_data = data; 142 irq_put_desc_unlock(desc, flags); 143 return 0; 144 } 145 EXPORT_SYMBOL(irq_set_chip_data); 146 147 struct irq_data *irq_get_irq_data(unsigned int irq) 148 { 149 struct irq_desc *desc = irq_to_desc(irq); 150 151 return desc ? &desc->irq_data : NULL; 152 } 153 EXPORT_SYMBOL_GPL(irq_get_irq_data); 154 155 static void irq_state_clr_disabled(struct irq_desc *desc) 156 { 157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 158 } 159 160 static void irq_state_set_disabled(struct irq_desc *desc) 161 { 162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 163 } 164 165 static void irq_state_clr_masked(struct irq_desc *desc) 166 { 167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 168 } 169 170 static void irq_state_set_masked(struct irq_desc *desc) 171 { 172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 173 } 174 175 int irq_startup(struct irq_desc *desc, bool resend) 176 { 177 int ret = 0; 178 179 irq_state_clr_disabled(desc); 180 desc->depth = 0; 181 182 irq_domain_activate_irq(&desc->irq_data); 183 if (desc->irq_data.chip->irq_startup) { 184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 185 irq_state_clr_masked(desc); 186 } else { 187 irq_enable(desc); 188 } 189 if (resend) 190 check_irq_resend(desc); 191 return ret; 192 } 193 194 void irq_shutdown(struct irq_desc *desc) 195 { 196 irq_state_set_disabled(desc); 197 desc->depth = 1; 198 if (desc->irq_data.chip->irq_shutdown) 199 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 200 else if (desc->irq_data.chip->irq_disable) 201 desc->irq_data.chip->irq_disable(&desc->irq_data); 202 else 203 desc->irq_data.chip->irq_mask(&desc->irq_data); 204 irq_domain_deactivate_irq(&desc->irq_data); 205 irq_state_set_masked(desc); 206 } 207 208 void irq_enable(struct irq_desc *desc) 209 { 210 irq_state_clr_disabled(desc); 211 if (desc->irq_data.chip->irq_enable) 212 desc->irq_data.chip->irq_enable(&desc->irq_data); 213 else 214 desc->irq_data.chip->irq_unmask(&desc->irq_data); 215 irq_state_clr_masked(desc); 216 } 217 218 /** 219 * irq_disable - Mark interrupt disabled 220 * @desc: irq descriptor which should be disabled 221 * 222 * If the chip does not implement the irq_disable callback, we 223 * use a lazy disable approach. That means we mark the interrupt 224 * disabled, but leave the hardware unmasked. That's an 225 * optimization because we avoid the hardware access for the 226 * common case where no interrupt happens after we marked it 227 * disabled. If an interrupt happens, then the interrupt flow 228 * handler masks the line at the hardware level and marks it 229 * pending. 230 */ 231 void irq_disable(struct irq_desc *desc) 232 { 233 irq_state_set_disabled(desc); 234 if (desc->irq_data.chip->irq_disable) { 235 desc->irq_data.chip->irq_disable(&desc->irq_data); 236 irq_state_set_masked(desc); 237 } 238 } 239 240 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 241 { 242 if (desc->irq_data.chip->irq_enable) 243 desc->irq_data.chip->irq_enable(&desc->irq_data); 244 else 245 desc->irq_data.chip->irq_unmask(&desc->irq_data); 246 cpumask_set_cpu(cpu, desc->percpu_enabled); 247 } 248 249 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 250 { 251 if (desc->irq_data.chip->irq_disable) 252 desc->irq_data.chip->irq_disable(&desc->irq_data); 253 else 254 desc->irq_data.chip->irq_mask(&desc->irq_data); 255 cpumask_clear_cpu(cpu, desc->percpu_enabled); 256 } 257 258 static inline void mask_ack_irq(struct irq_desc *desc) 259 { 260 if (desc->irq_data.chip->irq_mask_ack) 261 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 262 else { 263 desc->irq_data.chip->irq_mask(&desc->irq_data); 264 if (desc->irq_data.chip->irq_ack) 265 desc->irq_data.chip->irq_ack(&desc->irq_data); 266 } 267 irq_state_set_masked(desc); 268 } 269 270 void mask_irq(struct irq_desc *desc) 271 { 272 if (desc->irq_data.chip->irq_mask) { 273 desc->irq_data.chip->irq_mask(&desc->irq_data); 274 irq_state_set_masked(desc); 275 } 276 } 277 278 void unmask_irq(struct irq_desc *desc) 279 { 280 if (desc->irq_data.chip->irq_unmask) { 281 desc->irq_data.chip->irq_unmask(&desc->irq_data); 282 irq_state_clr_masked(desc); 283 } 284 } 285 286 void unmask_threaded_irq(struct irq_desc *desc) 287 { 288 struct irq_chip *chip = desc->irq_data.chip; 289 290 if (chip->flags & IRQCHIP_EOI_THREADED) 291 chip->irq_eoi(&desc->irq_data); 292 293 if (chip->irq_unmask) { 294 chip->irq_unmask(&desc->irq_data); 295 irq_state_clr_masked(desc); 296 } 297 } 298 299 /* 300 * handle_nested_irq - Handle a nested irq from a irq thread 301 * @irq: the interrupt number 302 * 303 * Handle interrupts which are nested into a threaded interrupt 304 * handler. The handler function is called inside the calling 305 * threads context. 306 */ 307 void handle_nested_irq(unsigned int irq) 308 { 309 struct irq_desc *desc = irq_to_desc(irq); 310 struct irqaction *action; 311 irqreturn_t action_ret; 312 313 might_sleep(); 314 315 raw_spin_lock_irq(&desc->lock); 316 317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 318 kstat_incr_irqs_this_cpu(desc); 319 320 action = desc->action; 321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 322 desc->istate |= IRQS_PENDING; 323 goto out_unlock; 324 } 325 326 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 327 raw_spin_unlock_irq(&desc->lock); 328 329 action_ret = action->thread_fn(action->irq, action->dev_id); 330 if (!noirqdebug) 331 note_interrupt(desc, action_ret); 332 333 raw_spin_lock_irq(&desc->lock); 334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 335 336 out_unlock: 337 raw_spin_unlock_irq(&desc->lock); 338 } 339 EXPORT_SYMBOL_GPL(handle_nested_irq); 340 341 static bool irq_check_poll(struct irq_desc *desc) 342 { 343 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 344 return false; 345 return irq_wait_for_poll(desc); 346 } 347 348 static bool irq_may_run(struct irq_desc *desc) 349 { 350 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 351 352 /* 353 * If the interrupt is not in progress and is not an armed 354 * wakeup interrupt, proceed. 355 */ 356 if (!irqd_has_set(&desc->irq_data, mask)) 357 return true; 358 359 /* 360 * If the interrupt is an armed wakeup source, mark it pending 361 * and suspended, disable it and notify the pm core about the 362 * event. 363 */ 364 if (irq_pm_check_wakeup(desc)) 365 return false; 366 367 /* 368 * Handle a potential concurrent poll on a different core. 369 */ 370 return irq_check_poll(desc); 371 } 372 373 /** 374 * handle_simple_irq - Simple and software-decoded IRQs. 375 * @desc: the interrupt description structure for this irq 376 * 377 * Simple interrupts are either sent from a demultiplexing interrupt 378 * handler or come from hardware, where no interrupt hardware control 379 * is necessary. 380 * 381 * Note: The caller is expected to handle the ack, clear, mask and 382 * unmask issues if necessary. 383 */ 384 void handle_simple_irq(struct irq_desc *desc) 385 { 386 raw_spin_lock(&desc->lock); 387 388 if (!irq_may_run(desc)) 389 goto out_unlock; 390 391 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 392 kstat_incr_irqs_this_cpu(desc); 393 394 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 395 desc->istate |= IRQS_PENDING; 396 goto out_unlock; 397 } 398 399 handle_irq_event(desc); 400 401 out_unlock: 402 raw_spin_unlock(&desc->lock); 403 } 404 EXPORT_SYMBOL_GPL(handle_simple_irq); 405 406 /* 407 * Called unconditionally from handle_level_irq() and only for oneshot 408 * interrupts from handle_fasteoi_irq() 409 */ 410 static void cond_unmask_irq(struct irq_desc *desc) 411 { 412 /* 413 * We need to unmask in the following cases: 414 * - Standard level irq (IRQF_ONESHOT is not set) 415 * - Oneshot irq which did not wake the thread (caused by a 416 * spurious interrupt or a primary handler handling it 417 * completely). 418 */ 419 if (!irqd_irq_disabled(&desc->irq_data) && 420 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 421 unmask_irq(desc); 422 } 423 424 /** 425 * handle_level_irq - Level type irq handler 426 * @desc: the interrupt description structure for this irq 427 * 428 * Level type interrupts are active as long as the hardware line has 429 * the active level. This may require to mask the interrupt and unmask 430 * it after the associated handler has acknowledged the device, so the 431 * interrupt line is back to inactive. 432 */ 433 void handle_level_irq(struct irq_desc *desc) 434 { 435 raw_spin_lock(&desc->lock); 436 mask_ack_irq(desc); 437 438 if (!irq_may_run(desc)) 439 goto out_unlock; 440 441 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 442 kstat_incr_irqs_this_cpu(desc); 443 444 /* 445 * If its disabled or no action available 446 * keep it masked and get out of here 447 */ 448 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 449 desc->istate |= IRQS_PENDING; 450 goto out_unlock; 451 } 452 453 handle_irq_event(desc); 454 455 cond_unmask_irq(desc); 456 457 out_unlock: 458 raw_spin_unlock(&desc->lock); 459 } 460 EXPORT_SYMBOL_GPL(handle_level_irq); 461 462 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 463 static inline void preflow_handler(struct irq_desc *desc) 464 { 465 if (desc->preflow_handler) 466 desc->preflow_handler(&desc->irq_data); 467 } 468 #else 469 static inline void preflow_handler(struct irq_desc *desc) { } 470 #endif 471 472 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 473 { 474 if (!(desc->istate & IRQS_ONESHOT)) { 475 chip->irq_eoi(&desc->irq_data); 476 return; 477 } 478 /* 479 * We need to unmask in the following cases: 480 * - Oneshot irq which did not wake the thread (caused by a 481 * spurious interrupt or a primary handler handling it 482 * completely). 483 */ 484 if (!irqd_irq_disabled(&desc->irq_data) && 485 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 486 chip->irq_eoi(&desc->irq_data); 487 unmask_irq(desc); 488 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 489 chip->irq_eoi(&desc->irq_data); 490 } 491 } 492 493 /** 494 * handle_fasteoi_irq - irq handler for transparent controllers 495 * @desc: the interrupt description structure for this irq 496 * 497 * Only a single callback will be issued to the chip: an ->eoi() 498 * call when the interrupt has been serviced. This enables support 499 * for modern forms of interrupt handlers, which handle the flow 500 * details in hardware, transparently. 501 */ 502 void handle_fasteoi_irq(struct irq_desc *desc) 503 { 504 struct irq_chip *chip = desc->irq_data.chip; 505 506 raw_spin_lock(&desc->lock); 507 508 if (!irq_may_run(desc)) 509 goto out; 510 511 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 512 kstat_incr_irqs_this_cpu(desc); 513 514 /* 515 * If its disabled or no action available 516 * then mask it and get out of here: 517 */ 518 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 519 desc->istate |= IRQS_PENDING; 520 mask_irq(desc); 521 goto out; 522 } 523 524 if (desc->istate & IRQS_ONESHOT) 525 mask_irq(desc); 526 527 preflow_handler(desc); 528 handle_irq_event(desc); 529 530 cond_unmask_eoi_irq(desc, chip); 531 532 raw_spin_unlock(&desc->lock); 533 return; 534 out: 535 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 536 chip->irq_eoi(&desc->irq_data); 537 raw_spin_unlock(&desc->lock); 538 } 539 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 540 541 /** 542 * handle_edge_irq - edge type IRQ handler 543 * @desc: the interrupt description structure for this irq 544 * 545 * Interrupt occures on the falling and/or rising edge of a hardware 546 * signal. The occurrence is latched into the irq controller hardware 547 * and must be acked in order to be reenabled. After the ack another 548 * interrupt can happen on the same source even before the first one 549 * is handled by the associated event handler. If this happens it 550 * might be necessary to disable (mask) the interrupt depending on the 551 * controller hardware. This requires to reenable the interrupt inside 552 * of the loop which handles the interrupts which have arrived while 553 * the handler was running. If all pending interrupts are handled, the 554 * loop is left. 555 */ 556 void handle_edge_irq(struct irq_desc *desc) 557 { 558 raw_spin_lock(&desc->lock); 559 560 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 561 562 if (!irq_may_run(desc)) { 563 desc->istate |= IRQS_PENDING; 564 mask_ack_irq(desc); 565 goto out_unlock; 566 } 567 568 /* 569 * If its disabled or no action available then mask it and get 570 * out of here. 571 */ 572 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 573 desc->istate |= IRQS_PENDING; 574 mask_ack_irq(desc); 575 goto out_unlock; 576 } 577 578 kstat_incr_irqs_this_cpu(desc); 579 580 /* Start handling the irq */ 581 desc->irq_data.chip->irq_ack(&desc->irq_data); 582 583 do { 584 if (unlikely(!desc->action)) { 585 mask_irq(desc); 586 goto out_unlock; 587 } 588 589 /* 590 * When another irq arrived while we were handling 591 * one, we could have masked the irq. 592 * Renable it, if it was not disabled in meantime. 593 */ 594 if (unlikely(desc->istate & IRQS_PENDING)) { 595 if (!irqd_irq_disabled(&desc->irq_data) && 596 irqd_irq_masked(&desc->irq_data)) 597 unmask_irq(desc); 598 } 599 600 handle_irq_event(desc); 601 602 } while ((desc->istate & IRQS_PENDING) && 603 !irqd_irq_disabled(&desc->irq_data)); 604 605 out_unlock: 606 raw_spin_unlock(&desc->lock); 607 } 608 EXPORT_SYMBOL(handle_edge_irq); 609 610 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 611 /** 612 * handle_edge_eoi_irq - edge eoi type IRQ handler 613 * @desc: the interrupt description structure for this irq 614 * 615 * Similar as the above handle_edge_irq, but using eoi and w/o the 616 * mask/unmask logic. 617 */ 618 void handle_edge_eoi_irq(struct irq_desc *desc) 619 { 620 struct irq_chip *chip = irq_desc_get_chip(desc); 621 622 raw_spin_lock(&desc->lock); 623 624 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 625 626 if (!irq_may_run(desc)) { 627 desc->istate |= IRQS_PENDING; 628 goto out_eoi; 629 } 630 631 /* 632 * If its disabled or no action available then mask it and get 633 * out of here. 634 */ 635 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 636 desc->istate |= IRQS_PENDING; 637 goto out_eoi; 638 } 639 640 kstat_incr_irqs_this_cpu(desc); 641 642 do { 643 if (unlikely(!desc->action)) 644 goto out_eoi; 645 646 handle_irq_event(desc); 647 648 } while ((desc->istate & IRQS_PENDING) && 649 !irqd_irq_disabled(&desc->irq_data)); 650 651 out_eoi: 652 chip->irq_eoi(&desc->irq_data); 653 raw_spin_unlock(&desc->lock); 654 } 655 #endif 656 657 /** 658 * handle_percpu_irq - Per CPU local irq handler 659 * @desc: the interrupt description structure for this irq 660 * 661 * Per CPU interrupts on SMP machines without locking requirements 662 */ 663 void handle_percpu_irq(struct irq_desc *desc) 664 { 665 struct irq_chip *chip = irq_desc_get_chip(desc); 666 667 kstat_incr_irqs_this_cpu(desc); 668 669 if (chip->irq_ack) 670 chip->irq_ack(&desc->irq_data); 671 672 handle_irq_event_percpu(desc, desc->action); 673 674 if (chip->irq_eoi) 675 chip->irq_eoi(&desc->irq_data); 676 } 677 678 /** 679 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 680 * @desc: the interrupt description structure for this irq 681 * 682 * Per CPU interrupts on SMP machines without locking requirements. Same as 683 * handle_percpu_irq() above but with the following extras: 684 * 685 * action->percpu_dev_id is a pointer to percpu variables which 686 * contain the real device id for the cpu on which this handler is 687 * called 688 */ 689 void handle_percpu_devid_irq(struct irq_desc *desc) 690 { 691 struct irq_chip *chip = irq_desc_get_chip(desc); 692 struct irqaction *action = desc->action; 693 void *dev_id = raw_cpu_ptr(action->percpu_dev_id); 694 unsigned int irq = irq_desc_get_irq(desc); 695 irqreturn_t res; 696 697 kstat_incr_irqs_this_cpu(desc); 698 699 if (chip->irq_ack) 700 chip->irq_ack(&desc->irq_data); 701 702 trace_irq_handler_entry(irq, action); 703 res = action->handler(irq, dev_id); 704 trace_irq_handler_exit(irq, action, res); 705 706 if (chip->irq_eoi) 707 chip->irq_eoi(&desc->irq_data); 708 } 709 710 void 711 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 712 int is_chained, const char *name) 713 { 714 if (!handle) { 715 handle = handle_bad_irq; 716 } else { 717 struct irq_data *irq_data = &desc->irq_data; 718 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 719 /* 720 * With hierarchical domains we might run into a 721 * situation where the outermost chip is not yet set 722 * up, but the inner chips are there. Instead of 723 * bailing we install the handler, but obviously we 724 * cannot enable/startup the interrupt at this point. 725 */ 726 while (irq_data) { 727 if (irq_data->chip != &no_irq_chip) 728 break; 729 /* 730 * Bail out if the outer chip is not set up 731 * and the interrrupt supposed to be started 732 * right away. 733 */ 734 if (WARN_ON(is_chained)) 735 return; 736 /* Try the parent */ 737 irq_data = irq_data->parent_data; 738 } 739 #endif 740 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 741 return; 742 } 743 744 /* Uninstall? */ 745 if (handle == handle_bad_irq) { 746 if (desc->irq_data.chip != &no_irq_chip) 747 mask_ack_irq(desc); 748 irq_state_set_disabled(desc); 749 desc->depth = 1; 750 } 751 desc->handle_irq = handle; 752 desc->name = name; 753 754 if (handle != handle_bad_irq && is_chained) { 755 irq_settings_set_noprobe(desc); 756 irq_settings_set_norequest(desc); 757 irq_settings_set_nothread(desc); 758 irq_startup(desc, true); 759 } 760 } 761 762 void 763 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 764 const char *name) 765 { 766 unsigned long flags; 767 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 768 769 if (!desc) 770 return; 771 772 __irq_do_set_handler(desc, handle, is_chained, name); 773 irq_put_desc_busunlock(desc, flags); 774 } 775 EXPORT_SYMBOL_GPL(__irq_set_handler); 776 777 void 778 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 779 void *data) 780 { 781 unsigned long flags; 782 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 783 784 if (!desc) 785 return; 786 787 __irq_do_set_handler(desc, handle, 1, NULL); 788 desc->irq_common_data.handler_data = data; 789 790 irq_put_desc_busunlock(desc, flags); 791 } 792 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 793 794 void 795 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 796 irq_flow_handler_t handle, const char *name) 797 { 798 irq_set_chip(irq, chip); 799 __irq_set_handler(irq, handle, 0, name); 800 } 801 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 802 803 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 804 { 805 unsigned long flags; 806 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 807 808 if (!desc) 809 return; 810 irq_settings_clr_and_set(desc, clr, set); 811 812 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 813 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 814 if (irq_settings_has_no_balance_set(desc)) 815 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 816 if (irq_settings_is_per_cpu(desc)) 817 irqd_set(&desc->irq_data, IRQD_PER_CPU); 818 if (irq_settings_can_move_pcntxt(desc)) 819 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 820 if (irq_settings_is_level(desc)) 821 irqd_set(&desc->irq_data, IRQD_LEVEL); 822 823 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 824 825 irq_put_desc_unlock(desc, flags); 826 } 827 EXPORT_SYMBOL_GPL(irq_modify_status); 828 829 /** 830 * irq_cpu_online - Invoke all irq_cpu_online functions. 831 * 832 * Iterate through all irqs and invoke the chip.irq_cpu_online() 833 * for each. 834 */ 835 void irq_cpu_online(void) 836 { 837 struct irq_desc *desc; 838 struct irq_chip *chip; 839 unsigned long flags; 840 unsigned int irq; 841 842 for_each_active_irq(irq) { 843 desc = irq_to_desc(irq); 844 if (!desc) 845 continue; 846 847 raw_spin_lock_irqsave(&desc->lock, flags); 848 849 chip = irq_data_get_irq_chip(&desc->irq_data); 850 if (chip && chip->irq_cpu_online && 851 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 852 !irqd_irq_disabled(&desc->irq_data))) 853 chip->irq_cpu_online(&desc->irq_data); 854 855 raw_spin_unlock_irqrestore(&desc->lock, flags); 856 } 857 } 858 859 /** 860 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 861 * 862 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 863 * for each. 864 */ 865 void irq_cpu_offline(void) 866 { 867 struct irq_desc *desc; 868 struct irq_chip *chip; 869 unsigned long flags; 870 unsigned int irq; 871 872 for_each_active_irq(irq) { 873 desc = irq_to_desc(irq); 874 if (!desc) 875 continue; 876 877 raw_spin_lock_irqsave(&desc->lock, flags); 878 879 chip = irq_data_get_irq_chip(&desc->irq_data); 880 if (chip && chip->irq_cpu_offline && 881 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 882 !irqd_irq_disabled(&desc->irq_data))) 883 chip->irq_cpu_offline(&desc->irq_data); 884 885 raw_spin_unlock_irqrestore(&desc->lock, flags); 886 } 887 } 888 889 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 890 /** 891 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 892 * NULL) 893 * @data: Pointer to interrupt specific data 894 */ 895 void irq_chip_enable_parent(struct irq_data *data) 896 { 897 data = data->parent_data; 898 if (data->chip->irq_enable) 899 data->chip->irq_enable(data); 900 else 901 data->chip->irq_unmask(data); 902 } 903 904 /** 905 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 906 * NULL) 907 * @data: Pointer to interrupt specific data 908 */ 909 void irq_chip_disable_parent(struct irq_data *data) 910 { 911 data = data->parent_data; 912 if (data->chip->irq_disable) 913 data->chip->irq_disable(data); 914 else 915 data->chip->irq_mask(data); 916 } 917 918 /** 919 * irq_chip_ack_parent - Acknowledge the parent interrupt 920 * @data: Pointer to interrupt specific data 921 */ 922 void irq_chip_ack_parent(struct irq_data *data) 923 { 924 data = data->parent_data; 925 data->chip->irq_ack(data); 926 } 927 928 /** 929 * irq_chip_mask_parent - Mask the parent interrupt 930 * @data: Pointer to interrupt specific data 931 */ 932 void irq_chip_mask_parent(struct irq_data *data) 933 { 934 data = data->parent_data; 935 data->chip->irq_mask(data); 936 } 937 938 /** 939 * irq_chip_unmask_parent - Unmask the parent interrupt 940 * @data: Pointer to interrupt specific data 941 */ 942 void irq_chip_unmask_parent(struct irq_data *data) 943 { 944 data = data->parent_data; 945 data->chip->irq_unmask(data); 946 } 947 948 /** 949 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 950 * @data: Pointer to interrupt specific data 951 */ 952 void irq_chip_eoi_parent(struct irq_data *data) 953 { 954 data = data->parent_data; 955 data->chip->irq_eoi(data); 956 } 957 958 /** 959 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 960 * @data: Pointer to interrupt specific data 961 * @dest: The affinity mask to set 962 * @force: Flag to enforce setting (disable online checks) 963 * 964 * Conditinal, as the underlying parent chip might not implement it. 965 */ 966 int irq_chip_set_affinity_parent(struct irq_data *data, 967 const struct cpumask *dest, bool force) 968 { 969 data = data->parent_data; 970 if (data->chip->irq_set_affinity) 971 return data->chip->irq_set_affinity(data, dest, force); 972 973 return -ENOSYS; 974 } 975 976 /** 977 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 978 * @data: Pointer to interrupt specific data 979 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 980 * 981 * Conditional, as the underlying parent chip might not implement it. 982 */ 983 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 984 { 985 data = data->parent_data; 986 987 if (data->chip->irq_set_type) 988 return data->chip->irq_set_type(data, type); 989 990 return -ENOSYS; 991 } 992 993 /** 994 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 995 * @data: Pointer to interrupt specific data 996 * 997 * Iterate through the domain hierarchy of the interrupt and check 998 * whether a hw retrigger function exists. If yes, invoke it. 999 */ 1000 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1001 { 1002 for (data = data->parent_data; data; data = data->parent_data) 1003 if (data->chip && data->chip->irq_retrigger) 1004 return data->chip->irq_retrigger(data); 1005 1006 return 0; 1007 } 1008 1009 /** 1010 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1011 * @data: Pointer to interrupt specific data 1012 * @vcpu_info: The vcpu affinity information 1013 */ 1014 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1015 { 1016 data = data->parent_data; 1017 if (data->chip->irq_set_vcpu_affinity) 1018 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1019 1020 return -ENOSYS; 1021 } 1022 1023 /** 1024 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1025 * @data: Pointer to interrupt specific data 1026 * @on: Whether to set or reset the wake-up capability of this irq 1027 * 1028 * Conditional, as the underlying parent chip might not implement it. 1029 */ 1030 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1031 { 1032 data = data->parent_data; 1033 if (data->chip->irq_set_wake) 1034 return data->chip->irq_set_wake(data, on); 1035 1036 return -ENOSYS; 1037 } 1038 #endif 1039 1040 /** 1041 * irq_chip_compose_msi_msg - Componse msi message for a irq chip 1042 * @data: Pointer to interrupt specific data 1043 * @msg: Pointer to the MSI message 1044 * 1045 * For hierarchical domains we find the first chip in the hierarchy 1046 * which implements the irq_compose_msi_msg callback. For non 1047 * hierarchical we use the top level chip. 1048 */ 1049 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1050 { 1051 struct irq_data *pos = NULL; 1052 1053 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1054 for (; data; data = data->parent_data) 1055 #endif 1056 if (data->chip && data->chip->irq_compose_msi_msg) 1057 pos = data; 1058 if (!pos) 1059 return -ENOSYS; 1060 1061 pos->chip->irq_compose_msi_msg(pos, msg); 1062 1063 return 0; 1064 } 1065