1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/irqdomain.h> 19 20 #include <trace/events/irq.h> 21 22 #include "internals.h" 23 24 /** 25 * irq_set_chip - set the irq chip for an irq 26 * @irq: irq number 27 * @chip: pointer to irq chip description structure 28 */ 29 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 30 { 31 unsigned long flags; 32 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 33 34 if (!desc) 35 return -EINVAL; 36 37 if (!chip) 38 chip = &no_irq_chip; 39 40 desc->irq_data.chip = chip; 41 irq_put_desc_unlock(desc, flags); 42 /* 43 * For !CONFIG_SPARSE_IRQ make the irq show up in 44 * allocated_irqs. 45 */ 46 irq_mark_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, irq, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 94 * @irq_base: Interrupt number base 95 * @irq_offset: Interrupt number offset 96 * @entry: Pointer to MSI descriptor data 97 * 98 * Set the MSI descriptor entry for an irq at offset 99 */ 100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 101 struct msi_desc *entry) 102 { 103 unsigned long flags; 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 105 106 if (!desc) 107 return -EINVAL; 108 desc->irq_data.msi_desc = entry; 109 if (entry && !irq_offset) 110 entry->irq = irq_base; 111 irq_put_desc_unlock(desc, flags); 112 return 0; 113 } 114 115 /** 116 * irq_set_msi_desc - set MSI descriptor data for an irq 117 * @irq: Interrupt number 118 * @entry: Pointer to MSI descriptor data 119 * 120 * Set the MSI descriptor entry for an irq 121 */ 122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 123 { 124 return irq_set_msi_desc_off(irq, 0, entry); 125 } 126 127 /** 128 * irq_set_chip_data - set irq chip data for an irq 129 * @irq: Interrupt number 130 * @data: Pointer to chip specific data 131 * 132 * Set the hardware irq chip data for an irq 133 */ 134 int irq_set_chip_data(unsigned int irq, void *data) 135 { 136 unsigned long flags; 137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 138 139 if (!desc) 140 return -EINVAL; 141 desc->irq_data.chip_data = data; 142 irq_put_desc_unlock(desc, flags); 143 return 0; 144 } 145 EXPORT_SYMBOL(irq_set_chip_data); 146 147 struct irq_data *irq_get_irq_data(unsigned int irq) 148 { 149 struct irq_desc *desc = irq_to_desc(irq); 150 151 return desc ? &desc->irq_data : NULL; 152 } 153 EXPORT_SYMBOL_GPL(irq_get_irq_data); 154 155 static void irq_state_clr_disabled(struct irq_desc *desc) 156 { 157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 158 } 159 160 static void irq_state_set_disabled(struct irq_desc *desc) 161 { 162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 163 } 164 165 static void irq_state_clr_masked(struct irq_desc *desc) 166 { 167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 168 } 169 170 static void irq_state_set_masked(struct irq_desc *desc) 171 { 172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 173 } 174 175 int irq_startup(struct irq_desc *desc, bool resend) 176 { 177 int ret = 0; 178 179 irq_state_clr_disabled(desc); 180 desc->depth = 0; 181 182 irq_domain_activate_irq(&desc->irq_data); 183 if (desc->irq_data.chip->irq_startup) { 184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 185 irq_state_clr_masked(desc); 186 } else { 187 irq_enable(desc); 188 } 189 if (resend) 190 check_irq_resend(desc, desc->irq_data.irq); 191 return ret; 192 } 193 194 void irq_shutdown(struct irq_desc *desc) 195 { 196 irq_state_set_disabled(desc); 197 desc->depth = 1; 198 if (desc->irq_data.chip->irq_shutdown) 199 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 200 else if (desc->irq_data.chip->irq_disable) 201 desc->irq_data.chip->irq_disable(&desc->irq_data); 202 else 203 desc->irq_data.chip->irq_mask(&desc->irq_data); 204 irq_domain_deactivate_irq(&desc->irq_data); 205 irq_state_set_masked(desc); 206 } 207 208 void irq_enable(struct irq_desc *desc) 209 { 210 irq_state_clr_disabled(desc); 211 if (desc->irq_data.chip->irq_enable) 212 desc->irq_data.chip->irq_enable(&desc->irq_data); 213 else 214 desc->irq_data.chip->irq_unmask(&desc->irq_data); 215 irq_state_clr_masked(desc); 216 } 217 218 /** 219 * irq_disable - Mark interrupt disabled 220 * @desc: irq descriptor which should be disabled 221 * 222 * If the chip does not implement the irq_disable callback, we 223 * use a lazy disable approach. That means we mark the interrupt 224 * disabled, but leave the hardware unmasked. That's an 225 * optimization because we avoid the hardware access for the 226 * common case where no interrupt happens after we marked it 227 * disabled. If an interrupt happens, then the interrupt flow 228 * handler masks the line at the hardware level and marks it 229 * pending. 230 */ 231 void irq_disable(struct irq_desc *desc) 232 { 233 irq_state_set_disabled(desc); 234 if (desc->irq_data.chip->irq_disable) { 235 desc->irq_data.chip->irq_disable(&desc->irq_data); 236 irq_state_set_masked(desc); 237 } 238 } 239 240 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 241 { 242 if (desc->irq_data.chip->irq_enable) 243 desc->irq_data.chip->irq_enable(&desc->irq_data); 244 else 245 desc->irq_data.chip->irq_unmask(&desc->irq_data); 246 cpumask_set_cpu(cpu, desc->percpu_enabled); 247 } 248 249 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 250 { 251 if (desc->irq_data.chip->irq_disable) 252 desc->irq_data.chip->irq_disable(&desc->irq_data); 253 else 254 desc->irq_data.chip->irq_mask(&desc->irq_data); 255 cpumask_clear_cpu(cpu, desc->percpu_enabled); 256 } 257 258 static inline void mask_ack_irq(struct irq_desc *desc) 259 { 260 if (desc->irq_data.chip->irq_mask_ack) 261 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 262 else { 263 desc->irq_data.chip->irq_mask(&desc->irq_data); 264 if (desc->irq_data.chip->irq_ack) 265 desc->irq_data.chip->irq_ack(&desc->irq_data); 266 } 267 irq_state_set_masked(desc); 268 } 269 270 void mask_irq(struct irq_desc *desc) 271 { 272 if (desc->irq_data.chip->irq_mask) { 273 desc->irq_data.chip->irq_mask(&desc->irq_data); 274 irq_state_set_masked(desc); 275 } 276 } 277 278 void unmask_irq(struct irq_desc *desc) 279 { 280 if (desc->irq_data.chip->irq_unmask) { 281 desc->irq_data.chip->irq_unmask(&desc->irq_data); 282 irq_state_clr_masked(desc); 283 } 284 } 285 286 void unmask_threaded_irq(struct irq_desc *desc) 287 { 288 struct irq_chip *chip = desc->irq_data.chip; 289 290 if (chip->flags & IRQCHIP_EOI_THREADED) 291 chip->irq_eoi(&desc->irq_data); 292 293 if (chip->irq_unmask) { 294 chip->irq_unmask(&desc->irq_data); 295 irq_state_clr_masked(desc); 296 } 297 } 298 299 /* 300 * handle_nested_irq - Handle a nested irq from a irq thread 301 * @irq: the interrupt number 302 * 303 * Handle interrupts which are nested into a threaded interrupt 304 * handler. The handler function is called inside the calling 305 * threads context. 306 */ 307 void handle_nested_irq(unsigned int irq) 308 { 309 struct irq_desc *desc = irq_to_desc(irq); 310 struct irqaction *action; 311 irqreturn_t action_ret; 312 313 might_sleep(); 314 315 raw_spin_lock_irq(&desc->lock); 316 317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 318 kstat_incr_irqs_this_cpu(irq, desc); 319 320 action = desc->action; 321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 322 desc->istate |= IRQS_PENDING; 323 goto out_unlock; 324 } 325 326 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 327 raw_spin_unlock_irq(&desc->lock); 328 329 action_ret = action->thread_fn(action->irq, action->dev_id); 330 if (!noirqdebug) 331 note_interrupt(irq, desc, action_ret); 332 333 raw_spin_lock_irq(&desc->lock); 334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 335 336 out_unlock: 337 raw_spin_unlock_irq(&desc->lock); 338 } 339 EXPORT_SYMBOL_GPL(handle_nested_irq); 340 341 static bool irq_check_poll(struct irq_desc *desc) 342 { 343 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 344 return false; 345 return irq_wait_for_poll(desc); 346 } 347 348 static bool irq_may_run(struct irq_desc *desc) 349 { 350 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 351 352 /* 353 * If the interrupt is not in progress and is not an armed 354 * wakeup interrupt, proceed. 355 */ 356 if (!irqd_has_set(&desc->irq_data, mask)) 357 return true; 358 359 /* 360 * If the interrupt is an armed wakeup source, mark it pending 361 * and suspended, disable it and notify the pm core about the 362 * event. 363 */ 364 if (irq_pm_check_wakeup(desc)) 365 return false; 366 367 /* 368 * Handle a potential concurrent poll on a different core. 369 */ 370 return irq_check_poll(desc); 371 } 372 373 /** 374 * handle_simple_irq - Simple and software-decoded IRQs. 375 * @irq: the interrupt number 376 * @desc: the interrupt description structure for this irq 377 * 378 * Simple interrupts are either sent from a demultiplexing interrupt 379 * handler or come from hardware, where no interrupt hardware control 380 * is necessary. 381 * 382 * Note: The caller is expected to handle the ack, clear, mask and 383 * unmask issues if necessary. 384 */ 385 void 386 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 387 { 388 raw_spin_lock(&desc->lock); 389 390 if (!irq_may_run(desc)) 391 goto out_unlock; 392 393 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 394 kstat_incr_irqs_this_cpu(irq, desc); 395 396 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 397 desc->istate |= IRQS_PENDING; 398 goto out_unlock; 399 } 400 401 handle_irq_event(desc); 402 403 out_unlock: 404 raw_spin_unlock(&desc->lock); 405 } 406 EXPORT_SYMBOL_GPL(handle_simple_irq); 407 408 /* 409 * Called unconditionally from handle_level_irq() and only for oneshot 410 * interrupts from handle_fasteoi_irq() 411 */ 412 static void cond_unmask_irq(struct irq_desc *desc) 413 { 414 /* 415 * We need to unmask in the following cases: 416 * - Standard level irq (IRQF_ONESHOT is not set) 417 * - Oneshot irq which did not wake the thread (caused by a 418 * spurious interrupt or a primary handler handling it 419 * completely). 420 */ 421 if (!irqd_irq_disabled(&desc->irq_data) && 422 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 423 unmask_irq(desc); 424 } 425 426 /** 427 * handle_level_irq - Level type irq handler 428 * @irq: the interrupt number 429 * @desc: the interrupt description structure for this irq 430 * 431 * Level type interrupts are active as long as the hardware line has 432 * the active level. This may require to mask the interrupt and unmask 433 * it after the associated handler has acknowledged the device, so the 434 * interrupt line is back to inactive. 435 */ 436 void 437 handle_level_irq(unsigned int irq, struct irq_desc *desc) 438 { 439 raw_spin_lock(&desc->lock); 440 mask_ack_irq(desc); 441 442 if (!irq_may_run(desc)) 443 goto out_unlock; 444 445 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 446 kstat_incr_irqs_this_cpu(irq, desc); 447 448 /* 449 * If its disabled or no action available 450 * keep it masked and get out of here 451 */ 452 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 453 desc->istate |= IRQS_PENDING; 454 goto out_unlock; 455 } 456 457 handle_irq_event(desc); 458 459 cond_unmask_irq(desc); 460 461 out_unlock: 462 raw_spin_unlock(&desc->lock); 463 } 464 EXPORT_SYMBOL_GPL(handle_level_irq); 465 466 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 467 static inline void preflow_handler(struct irq_desc *desc) 468 { 469 if (desc->preflow_handler) 470 desc->preflow_handler(&desc->irq_data); 471 } 472 #else 473 static inline void preflow_handler(struct irq_desc *desc) { } 474 #endif 475 476 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 477 { 478 if (!(desc->istate & IRQS_ONESHOT)) { 479 chip->irq_eoi(&desc->irq_data); 480 return; 481 } 482 /* 483 * We need to unmask in the following cases: 484 * - Oneshot irq which did not wake the thread (caused by a 485 * spurious interrupt or a primary handler handling it 486 * completely). 487 */ 488 if (!irqd_irq_disabled(&desc->irq_data) && 489 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 490 chip->irq_eoi(&desc->irq_data); 491 unmask_irq(desc); 492 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 493 chip->irq_eoi(&desc->irq_data); 494 } 495 } 496 497 /** 498 * handle_fasteoi_irq - irq handler for transparent controllers 499 * @irq: the interrupt number 500 * @desc: the interrupt description structure for this irq 501 * 502 * Only a single callback will be issued to the chip: an ->eoi() 503 * call when the interrupt has been serviced. This enables support 504 * for modern forms of interrupt handlers, which handle the flow 505 * details in hardware, transparently. 506 */ 507 void 508 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 509 { 510 struct irq_chip *chip = desc->irq_data.chip; 511 512 raw_spin_lock(&desc->lock); 513 514 if (!irq_may_run(desc)) 515 goto out; 516 517 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 518 kstat_incr_irqs_this_cpu(irq, desc); 519 520 /* 521 * If its disabled or no action available 522 * then mask it and get out of here: 523 */ 524 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 525 desc->istate |= IRQS_PENDING; 526 mask_irq(desc); 527 goto out; 528 } 529 530 if (desc->istate & IRQS_ONESHOT) 531 mask_irq(desc); 532 533 preflow_handler(desc); 534 handle_irq_event(desc); 535 536 cond_unmask_eoi_irq(desc, chip); 537 538 raw_spin_unlock(&desc->lock); 539 return; 540 out: 541 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 542 chip->irq_eoi(&desc->irq_data); 543 raw_spin_unlock(&desc->lock); 544 } 545 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 546 547 /** 548 * handle_edge_irq - edge type IRQ handler 549 * @irq: the interrupt number 550 * @desc: the interrupt description structure for this irq 551 * 552 * Interrupt occures on the falling and/or rising edge of a hardware 553 * signal. The occurrence is latched into the irq controller hardware 554 * and must be acked in order to be reenabled. After the ack another 555 * interrupt can happen on the same source even before the first one 556 * is handled by the associated event handler. If this happens it 557 * might be necessary to disable (mask) the interrupt depending on the 558 * controller hardware. This requires to reenable the interrupt inside 559 * of the loop which handles the interrupts which have arrived while 560 * the handler was running. If all pending interrupts are handled, the 561 * loop is left. 562 */ 563 void 564 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 565 { 566 raw_spin_lock(&desc->lock); 567 568 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 569 570 if (!irq_may_run(desc)) { 571 desc->istate |= IRQS_PENDING; 572 mask_ack_irq(desc); 573 goto out_unlock; 574 } 575 576 /* 577 * If its disabled or no action available then mask it and get 578 * out of here. 579 */ 580 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 581 desc->istate |= IRQS_PENDING; 582 mask_ack_irq(desc); 583 goto out_unlock; 584 } 585 586 kstat_incr_irqs_this_cpu(irq, desc); 587 588 /* Start handling the irq */ 589 desc->irq_data.chip->irq_ack(&desc->irq_data); 590 591 do { 592 if (unlikely(!desc->action)) { 593 mask_irq(desc); 594 goto out_unlock; 595 } 596 597 /* 598 * When another irq arrived while we were handling 599 * one, we could have masked the irq. 600 * Renable it, if it was not disabled in meantime. 601 */ 602 if (unlikely(desc->istate & IRQS_PENDING)) { 603 if (!irqd_irq_disabled(&desc->irq_data) && 604 irqd_irq_masked(&desc->irq_data)) 605 unmask_irq(desc); 606 } 607 608 handle_irq_event(desc); 609 610 } while ((desc->istate & IRQS_PENDING) && 611 !irqd_irq_disabled(&desc->irq_data)); 612 613 out_unlock: 614 raw_spin_unlock(&desc->lock); 615 } 616 EXPORT_SYMBOL(handle_edge_irq); 617 618 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 619 /** 620 * handle_edge_eoi_irq - edge eoi type IRQ handler 621 * @irq: the interrupt number 622 * @desc: the interrupt description structure for this irq 623 * 624 * Similar as the above handle_edge_irq, but using eoi and w/o the 625 * mask/unmask logic. 626 */ 627 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 628 { 629 struct irq_chip *chip = irq_desc_get_chip(desc); 630 631 raw_spin_lock(&desc->lock); 632 633 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 634 635 if (!irq_may_run(desc)) { 636 desc->istate |= IRQS_PENDING; 637 goto out_eoi; 638 } 639 640 /* 641 * If its disabled or no action available then mask it and get 642 * out of here. 643 */ 644 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 645 desc->istate |= IRQS_PENDING; 646 goto out_eoi; 647 } 648 649 kstat_incr_irqs_this_cpu(irq, desc); 650 651 do { 652 if (unlikely(!desc->action)) 653 goto out_eoi; 654 655 handle_irq_event(desc); 656 657 } while ((desc->istate & IRQS_PENDING) && 658 !irqd_irq_disabled(&desc->irq_data)); 659 660 out_eoi: 661 chip->irq_eoi(&desc->irq_data); 662 raw_spin_unlock(&desc->lock); 663 } 664 #endif 665 666 /** 667 * handle_percpu_irq - Per CPU local irq handler 668 * @irq: the interrupt number 669 * @desc: the interrupt description structure for this irq 670 * 671 * Per CPU interrupts on SMP machines without locking requirements 672 */ 673 void 674 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 675 { 676 struct irq_chip *chip = irq_desc_get_chip(desc); 677 678 kstat_incr_irqs_this_cpu(irq, desc); 679 680 if (chip->irq_ack) 681 chip->irq_ack(&desc->irq_data); 682 683 handle_irq_event_percpu(desc, desc->action); 684 685 if (chip->irq_eoi) 686 chip->irq_eoi(&desc->irq_data); 687 } 688 689 /** 690 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 691 * @irq: the interrupt number 692 * @desc: the interrupt description structure for this irq 693 * 694 * Per CPU interrupts on SMP machines without locking requirements. Same as 695 * handle_percpu_irq() above but with the following extras: 696 * 697 * action->percpu_dev_id is a pointer to percpu variables which 698 * contain the real device id for the cpu on which this handler is 699 * called 700 */ 701 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 702 { 703 struct irq_chip *chip = irq_desc_get_chip(desc); 704 struct irqaction *action = desc->action; 705 void *dev_id = raw_cpu_ptr(action->percpu_dev_id); 706 irqreturn_t res; 707 708 kstat_incr_irqs_this_cpu(irq, desc); 709 710 if (chip->irq_ack) 711 chip->irq_ack(&desc->irq_data); 712 713 trace_irq_handler_entry(irq, action); 714 res = action->handler(irq, dev_id); 715 trace_irq_handler_exit(irq, action, res); 716 717 if (chip->irq_eoi) 718 chip->irq_eoi(&desc->irq_data); 719 } 720 721 void 722 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 723 int is_chained, const char *name) 724 { 725 if (!handle) { 726 handle = handle_bad_irq; 727 } else { 728 struct irq_data *irq_data = &desc->irq_data; 729 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 730 /* 731 * With hierarchical domains we might run into a 732 * situation where the outermost chip is not yet set 733 * up, but the inner chips are there. Instead of 734 * bailing we install the handler, but obviously we 735 * cannot enable/startup the interrupt at this point. 736 */ 737 while (irq_data) { 738 if (irq_data->chip != &no_irq_chip) 739 break; 740 /* 741 * Bail out if the outer chip is not set up 742 * and the interrrupt supposed to be started 743 * right away. 744 */ 745 if (WARN_ON(is_chained)) 746 return; 747 /* Try the parent */ 748 irq_data = irq_data->parent_data; 749 } 750 #endif 751 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 752 return; 753 } 754 755 /* Uninstall? */ 756 if (handle == handle_bad_irq) { 757 if (desc->irq_data.chip != &no_irq_chip) 758 mask_ack_irq(desc); 759 irq_state_set_disabled(desc); 760 desc->depth = 1; 761 } 762 desc->handle_irq = handle; 763 desc->name = name; 764 765 if (handle != handle_bad_irq && is_chained) { 766 irq_settings_set_noprobe(desc); 767 irq_settings_set_norequest(desc); 768 irq_settings_set_nothread(desc); 769 irq_startup(desc, true); 770 } 771 } 772 773 void 774 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 775 const char *name) 776 { 777 unsigned long flags; 778 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 779 780 if (!desc) 781 return; 782 783 __irq_do_set_handler(desc, handle, is_chained, name); 784 irq_put_desc_busunlock(desc, flags); 785 } 786 EXPORT_SYMBOL_GPL(__irq_set_handler); 787 788 void 789 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 790 void *data) 791 { 792 unsigned long flags; 793 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 794 795 if (!desc) 796 return; 797 798 __irq_do_set_handler(desc, handle, 1, NULL); 799 desc->irq_data.handler_data = data; 800 801 irq_put_desc_busunlock(desc, flags); 802 } 803 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 804 805 void 806 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 807 irq_flow_handler_t handle, const char *name) 808 { 809 irq_set_chip(irq, chip); 810 __irq_set_handler(irq, handle, 0, name); 811 } 812 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 813 814 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 815 { 816 unsigned long flags; 817 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 818 819 if (!desc) 820 return; 821 irq_settings_clr_and_set(desc, clr, set); 822 823 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 824 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 825 if (irq_settings_has_no_balance_set(desc)) 826 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 827 if (irq_settings_is_per_cpu(desc)) 828 irqd_set(&desc->irq_data, IRQD_PER_CPU); 829 if (irq_settings_can_move_pcntxt(desc)) 830 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 831 if (irq_settings_is_level(desc)) 832 irqd_set(&desc->irq_data, IRQD_LEVEL); 833 834 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 835 836 irq_put_desc_unlock(desc, flags); 837 } 838 EXPORT_SYMBOL_GPL(irq_modify_status); 839 840 /** 841 * irq_cpu_online - Invoke all irq_cpu_online functions. 842 * 843 * Iterate through all irqs and invoke the chip.irq_cpu_online() 844 * for each. 845 */ 846 void irq_cpu_online(void) 847 { 848 struct irq_desc *desc; 849 struct irq_chip *chip; 850 unsigned long flags; 851 unsigned int irq; 852 853 for_each_active_irq(irq) { 854 desc = irq_to_desc(irq); 855 if (!desc) 856 continue; 857 858 raw_spin_lock_irqsave(&desc->lock, flags); 859 860 chip = irq_data_get_irq_chip(&desc->irq_data); 861 if (chip && chip->irq_cpu_online && 862 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 863 !irqd_irq_disabled(&desc->irq_data))) 864 chip->irq_cpu_online(&desc->irq_data); 865 866 raw_spin_unlock_irqrestore(&desc->lock, flags); 867 } 868 } 869 870 /** 871 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 872 * 873 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 874 * for each. 875 */ 876 void irq_cpu_offline(void) 877 { 878 struct irq_desc *desc; 879 struct irq_chip *chip; 880 unsigned long flags; 881 unsigned int irq; 882 883 for_each_active_irq(irq) { 884 desc = irq_to_desc(irq); 885 if (!desc) 886 continue; 887 888 raw_spin_lock_irqsave(&desc->lock, flags); 889 890 chip = irq_data_get_irq_chip(&desc->irq_data); 891 if (chip && chip->irq_cpu_offline && 892 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 893 !irqd_irq_disabled(&desc->irq_data))) 894 chip->irq_cpu_offline(&desc->irq_data); 895 896 raw_spin_unlock_irqrestore(&desc->lock, flags); 897 } 898 } 899 900 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 901 /** 902 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 903 * NULL) 904 * @data: Pointer to interrupt specific data 905 */ 906 void irq_chip_enable_parent(struct irq_data *data) 907 { 908 data = data->parent_data; 909 if (data->chip->irq_enable) 910 data->chip->irq_enable(data); 911 else 912 data->chip->irq_unmask(data); 913 } 914 915 /** 916 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 917 * NULL) 918 * @data: Pointer to interrupt specific data 919 */ 920 void irq_chip_disable_parent(struct irq_data *data) 921 { 922 data = data->parent_data; 923 if (data->chip->irq_disable) 924 data->chip->irq_disable(data); 925 else 926 data->chip->irq_mask(data); 927 } 928 929 /** 930 * irq_chip_ack_parent - Acknowledge the parent interrupt 931 * @data: Pointer to interrupt specific data 932 */ 933 void irq_chip_ack_parent(struct irq_data *data) 934 { 935 data = data->parent_data; 936 data->chip->irq_ack(data); 937 } 938 939 /** 940 * irq_chip_mask_parent - Mask the parent interrupt 941 * @data: Pointer to interrupt specific data 942 */ 943 void irq_chip_mask_parent(struct irq_data *data) 944 { 945 data = data->parent_data; 946 data->chip->irq_mask(data); 947 } 948 949 /** 950 * irq_chip_unmask_parent - Unmask the parent interrupt 951 * @data: Pointer to interrupt specific data 952 */ 953 void irq_chip_unmask_parent(struct irq_data *data) 954 { 955 data = data->parent_data; 956 data->chip->irq_unmask(data); 957 } 958 959 /** 960 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 961 * @data: Pointer to interrupt specific data 962 */ 963 void irq_chip_eoi_parent(struct irq_data *data) 964 { 965 data = data->parent_data; 966 data->chip->irq_eoi(data); 967 } 968 969 /** 970 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 971 * @data: Pointer to interrupt specific data 972 * @dest: The affinity mask to set 973 * @force: Flag to enforce setting (disable online checks) 974 * 975 * Conditinal, as the underlying parent chip might not implement it. 976 */ 977 int irq_chip_set_affinity_parent(struct irq_data *data, 978 const struct cpumask *dest, bool force) 979 { 980 data = data->parent_data; 981 if (data->chip->irq_set_affinity) 982 return data->chip->irq_set_affinity(data, dest, force); 983 984 return -ENOSYS; 985 } 986 987 /** 988 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 989 * @data: Pointer to interrupt specific data 990 * 991 * Iterate through the domain hierarchy of the interrupt and check 992 * whether a hw retrigger function exists. If yes, invoke it. 993 */ 994 int irq_chip_retrigger_hierarchy(struct irq_data *data) 995 { 996 for (data = data->parent_data; data; data = data->parent_data) 997 if (data->chip && data->chip->irq_retrigger) 998 return data->chip->irq_retrigger(data); 999 1000 return -ENOSYS; 1001 } 1002 1003 /** 1004 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1005 * @data: Pointer to interrupt specific data 1006 * @dest: The vcpu affinity information 1007 */ 1008 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1009 { 1010 data = data->parent_data; 1011 if (data->chip->irq_set_vcpu_affinity) 1012 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1013 1014 return -ENOSYS; 1015 } 1016 1017 /** 1018 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1019 * @data: Pointer to interrupt specific data 1020 * @on: Whether to set or reset the wake-up capability of this irq 1021 * 1022 * Conditional, as the underlying parent chip might not implement it. 1023 */ 1024 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1025 { 1026 data = data->parent_data; 1027 if (data->chip->irq_set_wake) 1028 return data->chip->irq_set_wake(data, on); 1029 1030 return -ENOSYS; 1031 } 1032 #endif 1033 1034 /** 1035 * irq_chip_compose_msi_msg - Componse msi message for a irq chip 1036 * @data: Pointer to interrupt specific data 1037 * @msg: Pointer to the MSI message 1038 * 1039 * For hierarchical domains we find the first chip in the hierarchy 1040 * which implements the irq_compose_msi_msg callback. For non 1041 * hierarchical we use the top level chip. 1042 */ 1043 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1044 { 1045 struct irq_data *pos = NULL; 1046 1047 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1048 for (; data; data = data->parent_data) 1049 #endif 1050 if (data->chip && data->chip->irq_compose_msi_msg) 1051 pos = data; 1052 if (!pos) 1053 return -ENOSYS; 1054 1055 pos->chip->irq_compose_msi_msg(pos, msg); 1056 1057 return 0; 1058 } 1059