1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. 44 */ 45 irq_mark_irq(irq); 46 return 0; 47 } 48 EXPORT_SYMBOL(irq_set_chip); 49 50 /** 51 * irq_set_type - set the irq trigger type for an irq 52 * @irq: irq number 53 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 54 */ 55 int irq_set_irq_type(unsigned int irq, unsigned int type) 56 { 57 unsigned long flags; 58 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 59 int ret = 0; 60 61 if (!desc) 62 return -EINVAL; 63 64 type &= IRQ_TYPE_SENSE_MASK; 65 ret = __irq_set_trigger(desc, irq, type); 66 irq_put_desc_busunlock(desc, flags); 67 return ret; 68 } 69 EXPORT_SYMBOL(irq_set_irq_type); 70 71 /** 72 * irq_set_handler_data - set irq handler data for an irq 73 * @irq: Interrupt number 74 * @data: Pointer to interrupt specific data 75 * 76 * Set the hardware irq controller data for an irq 77 */ 78 int irq_set_handler_data(unsigned int irq, void *data) 79 { 80 unsigned long flags; 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 82 83 if (!desc) 84 return -EINVAL; 85 desc->irq_data.handler_data = data; 86 irq_put_desc_unlock(desc, flags); 87 return 0; 88 } 89 EXPORT_SYMBOL(irq_set_handler_data); 90 91 /** 92 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 93 * @irq_base: Interrupt number base 94 * @irq_offset: Interrupt number offset 95 * @entry: Pointer to MSI descriptor data 96 * 97 * Set the MSI descriptor entry for an irq at offset 98 */ 99 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 100 struct msi_desc *entry) 101 { 102 unsigned long flags; 103 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 104 105 if (!desc) 106 return -EINVAL; 107 desc->irq_data.msi_desc = entry; 108 if (entry && !irq_offset) 109 entry->irq = irq_base; 110 irq_put_desc_unlock(desc, flags); 111 return 0; 112 } 113 114 /** 115 * irq_set_msi_desc - set MSI descriptor data for an irq 116 * @irq: Interrupt number 117 * @entry: Pointer to MSI descriptor data 118 * 119 * Set the MSI descriptor entry for an irq 120 */ 121 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 122 { 123 return irq_set_msi_desc_off(irq, 0, entry); 124 } 125 126 /** 127 * irq_set_chip_data - set irq chip data for an irq 128 * @irq: Interrupt number 129 * @data: Pointer to chip specific data 130 * 131 * Set the hardware irq chip data for an irq 132 */ 133 int irq_set_chip_data(unsigned int irq, void *data) 134 { 135 unsigned long flags; 136 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 137 138 if (!desc) 139 return -EINVAL; 140 desc->irq_data.chip_data = data; 141 irq_put_desc_unlock(desc, flags); 142 return 0; 143 } 144 EXPORT_SYMBOL(irq_set_chip_data); 145 146 struct irq_data *irq_get_irq_data(unsigned int irq) 147 { 148 struct irq_desc *desc = irq_to_desc(irq); 149 150 return desc ? &desc->irq_data : NULL; 151 } 152 EXPORT_SYMBOL_GPL(irq_get_irq_data); 153 154 static void irq_state_clr_disabled(struct irq_desc *desc) 155 { 156 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 157 } 158 159 static void irq_state_set_disabled(struct irq_desc *desc) 160 { 161 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 162 } 163 164 static void irq_state_clr_masked(struct irq_desc *desc) 165 { 166 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 167 } 168 169 static void irq_state_set_masked(struct irq_desc *desc) 170 { 171 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 172 } 173 174 int irq_startup(struct irq_desc *desc, bool resend) 175 { 176 int ret = 0; 177 178 irq_state_clr_disabled(desc); 179 desc->depth = 0; 180 181 if (desc->irq_data.chip->irq_startup) { 182 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 183 irq_state_clr_masked(desc); 184 } else { 185 irq_enable(desc); 186 } 187 if (resend) 188 check_irq_resend(desc, desc->irq_data.irq); 189 return ret; 190 } 191 192 void irq_shutdown(struct irq_desc *desc) 193 { 194 irq_state_set_disabled(desc); 195 desc->depth = 1; 196 if (desc->irq_data.chip->irq_shutdown) 197 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 198 else if (desc->irq_data.chip->irq_disable) 199 desc->irq_data.chip->irq_disable(&desc->irq_data); 200 else 201 desc->irq_data.chip->irq_mask(&desc->irq_data); 202 irq_state_set_masked(desc); 203 } 204 205 void irq_enable(struct irq_desc *desc) 206 { 207 irq_state_clr_disabled(desc); 208 if (desc->irq_data.chip->irq_enable) 209 desc->irq_data.chip->irq_enable(&desc->irq_data); 210 else 211 desc->irq_data.chip->irq_unmask(&desc->irq_data); 212 irq_state_clr_masked(desc); 213 } 214 215 /** 216 * irq_disable - Mark interrupt disabled 217 * @desc: irq descriptor which should be disabled 218 * 219 * If the chip does not implement the irq_disable callback, we 220 * use a lazy disable approach. That means we mark the interrupt 221 * disabled, but leave the hardware unmasked. That's an 222 * optimization because we avoid the hardware access for the 223 * common case where no interrupt happens after we marked it 224 * disabled. If an interrupt happens, then the interrupt flow 225 * handler masks the line at the hardware level and marks it 226 * pending. 227 */ 228 void irq_disable(struct irq_desc *desc) 229 { 230 irq_state_set_disabled(desc); 231 if (desc->irq_data.chip->irq_disable) { 232 desc->irq_data.chip->irq_disable(&desc->irq_data); 233 irq_state_set_masked(desc); 234 } 235 } 236 237 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 238 { 239 if (desc->irq_data.chip->irq_enable) 240 desc->irq_data.chip->irq_enable(&desc->irq_data); 241 else 242 desc->irq_data.chip->irq_unmask(&desc->irq_data); 243 cpumask_set_cpu(cpu, desc->percpu_enabled); 244 } 245 246 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 247 { 248 if (desc->irq_data.chip->irq_disable) 249 desc->irq_data.chip->irq_disable(&desc->irq_data); 250 else 251 desc->irq_data.chip->irq_mask(&desc->irq_data); 252 cpumask_clear_cpu(cpu, desc->percpu_enabled); 253 } 254 255 static inline void mask_ack_irq(struct irq_desc *desc) 256 { 257 if (desc->irq_data.chip->irq_mask_ack) 258 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 259 else { 260 desc->irq_data.chip->irq_mask(&desc->irq_data); 261 if (desc->irq_data.chip->irq_ack) 262 desc->irq_data.chip->irq_ack(&desc->irq_data); 263 } 264 irq_state_set_masked(desc); 265 } 266 267 void mask_irq(struct irq_desc *desc) 268 { 269 if (desc->irq_data.chip->irq_mask) { 270 desc->irq_data.chip->irq_mask(&desc->irq_data); 271 irq_state_set_masked(desc); 272 } 273 } 274 275 void unmask_irq(struct irq_desc *desc) 276 { 277 if (desc->irq_data.chip->irq_unmask) { 278 desc->irq_data.chip->irq_unmask(&desc->irq_data); 279 irq_state_clr_masked(desc); 280 } 281 } 282 283 void unmask_threaded_irq(struct irq_desc *desc) 284 { 285 struct irq_chip *chip = desc->irq_data.chip; 286 287 if (chip->flags & IRQCHIP_EOI_THREADED) 288 chip->irq_eoi(&desc->irq_data); 289 290 if (chip->irq_unmask) { 291 chip->irq_unmask(&desc->irq_data); 292 irq_state_clr_masked(desc); 293 } 294 } 295 296 /* 297 * handle_nested_irq - Handle a nested irq from a irq thread 298 * @irq: the interrupt number 299 * 300 * Handle interrupts which are nested into a threaded interrupt 301 * handler. The handler function is called inside the calling 302 * threads context. 303 */ 304 void handle_nested_irq(unsigned int irq) 305 { 306 struct irq_desc *desc = irq_to_desc(irq); 307 struct irqaction *action; 308 irqreturn_t action_ret; 309 310 might_sleep(); 311 312 raw_spin_lock_irq(&desc->lock); 313 314 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 315 kstat_incr_irqs_this_cpu(irq, desc); 316 317 action = desc->action; 318 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 319 desc->istate |= IRQS_PENDING; 320 goto out_unlock; 321 } 322 323 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 324 raw_spin_unlock_irq(&desc->lock); 325 326 action_ret = action->thread_fn(action->irq, action->dev_id); 327 if (!noirqdebug) 328 note_interrupt(irq, desc, action_ret); 329 330 raw_spin_lock_irq(&desc->lock); 331 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 332 333 out_unlock: 334 raw_spin_unlock_irq(&desc->lock); 335 } 336 EXPORT_SYMBOL_GPL(handle_nested_irq); 337 338 static bool irq_check_poll(struct irq_desc *desc) 339 { 340 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 341 return false; 342 return irq_wait_for_poll(desc); 343 } 344 345 /** 346 * handle_simple_irq - Simple and software-decoded IRQs. 347 * @irq: the interrupt number 348 * @desc: the interrupt description structure for this irq 349 * 350 * Simple interrupts are either sent from a demultiplexing interrupt 351 * handler or come from hardware, where no interrupt hardware control 352 * is necessary. 353 * 354 * Note: The caller is expected to handle the ack, clear, mask and 355 * unmask issues if necessary. 356 */ 357 void 358 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 359 { 360 raw_spin_lock(&desc->lock); 361 362 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 363 if (!irq_check_poll(desc)) 364 goto out_unlock; 365 366 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 367 kstat_incr_irqs_this_cpu(irq, desc); 368 369 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 370 desc->istate |= IRQS_PENDING; 371 goto out_unlock; 372 } 373 374 handle_irq_event(desc); 375 376 out_unlock: 377 raw_spin_unlock(&desc->lock); 378 } 379 EXPORT_SYMBOL_GPL(handle_simple_irq); 380 381 /* 382 * Called unconditionally from handle_level_irq() and only for oneshot 383 * interrupts from handle_fasteoi_irq() 384 */ 385 static void cond_unmask_irq(struct irq_desc *desc) 386 { 387 /* 388 * We need to unmask in the following cases: 389 * - Standard level irq (IRQF_ONESHOT is not set) 390 * - Oneshot irq which did not wake the thread (caused by a 391 * spurious interrupt or a primary handler handling it 392 * completely). 393 */ 394 if (!irqd_irq_disabled(&desc->irq_data) && 395 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 396 unmask_irq(desc); 397 } 398 399 /** 400 * handle_level_irq - Level type irq handler 401 * @irq: the interrupt number 402 * @desc: the interrupt description structure for this irq 403 * 404 * Level type interrupts are active as long as the hardware line has 405 * the active level. This may require to mask the interrupt and unmask 406 * it after the associated handler has acknowledged the device, so the 407 * interrupt line is back to inactive. 408 */ 409 void 410 handle_level_irq(unsigned int irq, struct irq_desc *desc) 411 { 412 raw_spin_lock(&desc->lock); 413 mask_ack_irq(desc); 414 415 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 416 if (!irq_check_poll(desc)) 417 goto out_unlock; 418 419 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 420 kstat_incr_irqs_this_cpu(irq, desc); 421 422 /* 423 * If its disabled or no action available 424 * keep it masked and get out of here 425 */ 426 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 427 desc->istate |= IRQS_PENDING; 428 goto out_unlock; 429 } 430 431 handle_irq_event(desc); 432 433 cond_unmask_irq(desc); 434 435 out_unlock: 436 raw_spin_unlock(&desc->lock); 437 } 438 EXPORT_SYMBOL_GPL(handle_level_irq); 439 440 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 441 static inline void preflow_handler(struct irq_desc *desc) 442 { 443 if (desc->preflow_handler) 444 desc->preflow_handler(&desc->irq_data); 445 } 446 #else 447 static inline void preflow_handler(struct irq_desc *desc) { } 448 #endif 449 450 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 451 { 452 if (!(desc->istate & IRQS_ONESHOT)) { 453 chip->irq_eoi(&desc->irq_data); 454 return; 455 } 456 /* 457 * We need to unmask in the following cases: 458 * - Oneshot irq which did not wake the thread (caused by a 459 * spurious interrupt or a primary handler handling it 460 * completely). 461 */ 462 if (!irqd_irq_disabled(&desc->irq_data) && 463 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 464 chip->irq_eoi(&desc->irq_data); 465 unmask_irq(desc); 466 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 467 chip->irq_eoi(&desc->irq_data); 468 } 469 } 470 471 /** 472 * handle_fasteoi_irq - irq handler for transparent controllers 473 * @irq: the interrupt number 474 * @desc: the interrupt description structure for this irq 475 * 476 * Only a single callback will be issued to the chip: an ->eoi() 477 * call when the interrupt has been serviced. This enables support 478 * for modern forms of interrupt handlers, which handle the flow 479 * details in hardware, transparently. 480 */ 481 void 482 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 483 { 484 struct irq_chip *chip = desc->irq_data.chip; 485 486 raw_spin_lock(&desc->lock); 487 488 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 489 if (!irq_check_poll(desc)) 490 goto out; 491 492 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 493 kstat_incr_irqs_this_cpu(irq, desc); 494 495 /* 496 * If its disabled or no action available 497 * then mask it and get out of here: 498 */ 499 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 500 desc->istate |= IRQS_PENDING; 501 mask_irq(desc); 502 goto out; 503 } 504 505 if (desc->istate & IRQS_ONESHOT) 506 mask_irq(desc); 507 508 preflow_handler(desc); 509 handle_irq_event(desc); 510 511 cond_unmask_eoi_irq(desc, chip); 512 513 raw_spin_unlock(&desc->lock); 514 return; 515 out: 516 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 517 chip->irq_eoi(&desc->irq_data); 518 raw_spin_unlock(&desc->lock); 519 } 520 521 /** 522 * handle_edge_irq - edge type IRQ handler 523 * @irq: the interrupt number 524 * @desc: the interrupt description structure for this irq 525 * 526 * Interrupt occures on the falling and/or rising edge of a hardware 527 * signal. The occurrence is latched into the irq controller hardware 528 * and must be acked in order to be reenabled. After the ack another 529 * interrupt can happen on the same source even before the first one 530 * is handled by the associated event handler. If this happens it 531 * might be necessary to disable (mask) the interrupt depending on the 532 * controller hardware. This requires to reenable the interrupt inside 533 * of the loop which handles the interrupts which have arrived while 534 * the handler was running. If all pending interrupts are handled, the 535 * loop is left. 536 */ 537 void 538 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 539 { 540 raw_spin_lock(&desc->lock); 541 542 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 543 /* 544 * If we're currently running this IRQ, or its disabled, 545 * we shouldn't process the IRQ. Mark it pending, handle 546 * the necessary masking and go out 547 */ 548 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 549 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 550 if (!irq_check_poll(desc)) { 551 desc->istate |= IRQS_PENDING; 552 mask_ack_irq(desc); 553 goto out_unlock; 554 } 555 } 556 kstat_incr_irqs_this_cpu(irq, desc); 557 558 /* Start handling the irq */ 559 desc->irq_data.chip->irq_ack(&desc->irq_data); 560 561 do { 562 if (unlikely(!desc->action)) { 563 mask_irq(desc); 564 goto out_unlock; 565 } 566 567 /* 568 * When another irq arrived while we were handling 569 * one, we could have masked the irq. 570 * Renable it, if it was not disabled in meantime. 571 */ 572 if (unlikely(desc->istate & IRQS_PENDING)) { 573 if (!irqd_irq_disabled(&desc->irq_data) && 574 irqd_irq_masked(&desc->irq_data)) 575 unmask_irq(desc); 576 } 577 578 handle_irq_event(desc); 579 580 } while ((desc->istate & IRQS_PENDING) && 581 !irqd_irq_disabled(&desc->irq_data)); 582 583 out_unlock: 584 raw_spin_unlock(&desc->lock); 585 } 586 EXPORT_SYMBOL(handle_edge_irq); 587 588 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 589 /** 590 * handle_edge_eoi_irq - edge eoi type IRQ handler 591 * @irq: the interrupt number 592 * @desc: the interrupt description structure for this irq 593 * 594 * Similar as the above handle_edge_irq, but using eoi and w/o the 595 * mask/unmask logic. 596 */ 597 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 598 { 599 struct irq_chip *chip = irq_desc_get_chip(desc); 600 601 raw_spin_lock(&desc->lock); 602 603 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 604 /* 605 * If we're currently running this IRQ, or its disabled, 606 * we shouldn't process the IRQ. Mark it pending, handle 607 * the necessary masking and go out 608 */ 609 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 610 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 611 if (!irq_check_poll(desc)) { 612 desc->istate |= IRQS_PENDING; 613 goto out_eoi; 614 } 615 } 616 kstat_incr_irqs_this_cpu(irq, desc); 617 618 do { 619 if (unlikely(!desc->action)) 620 goto out_eoi; 621 622 handle_irq_event(desc); 623 624 } while ((desc->istate & IRQS_PENDING) && 625 !irqd_irq_disabled(&desc->irq_data)); 626 627 out_eoi: 628 chip->irq_eoi(&desc->irq_data); 629 raw_spin_unlock(&desc->lock); 630 } 631 #endif 632 633 /** 634 * handle_percpu_irq - Per CPU local irq handler 635 * @irq: the interrupt number 636 * @desc: the interrupt description structure for this irq 637 * 638 * Per CPU interrupts on SMP machines without locking requirements 639 */ 640 void 641 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 642 { 643 struct irq_chip *chip = irq_desc_get_chip(desc); 644 645 kstat_incr_irqs_this_cpu(irq, desc); 646 647 if (chip->irq_ack) 648 chip->irq_ack(&desc->irq_data); 649 650 handle_irq_event_percpu(desc, desc->action); 651 652 if (chip->irq_eoi) 653 chip->irq_eoi(&desc->irq_data); 654 } 655 656 /** 657 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 658 * @irq: the interrupt number 659 * @desc: the interrupt description structure for this irq 660 * 661 * Per CPU interrupts on SMP machines without locking requirements. Same as 662 * handle_percpu_irq() above but with the following extras: 663 * 664 * action->percpu_dev_id is a pointer to percpu variables which 665 * contain the real device id for the cpu on which this handler is 666 * called 667 */ 668 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 669 { 670 struct irq_chip *chip = irq_desc_get_chip(desc); 671 struct irqaction *action = desc->action; 672 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 673 irqreturn_t res; 674 675 kstat_incr_irqs_this_cpu(irq, desc); 676 677 if (chip->irq_ack) 678 chip->irq_ack(&desc->irq_data); 679 680 trace_irq_handler_entry(irq, action); 681 res = action->handler(irq, dev_id); 682 trace_irq_handler_exit(irq, action, res); 683 684 if (chip->irq_eoi) 685 chip->irq_eoi(&desc->irq_data); 686 } 687 688 void 689 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 690 const char *name) 691 { 692 unsigned long flags; 693 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 694 695 if (!desc) 696 return; 697 698 if (!handle) { 699 handle = handle_bad_irq; 700 } else { 701 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 702 goto out; 703 } 704 705 /* Uninstall? */ 706 if (handle == handle_bad_irq) { 707 if (desc->irq_data.chip != &no_irq_chip) 708 mask_ack_irq(desc); 709 irq_state_set_disabled(desc); 710 desc->depth = 1; 711 } 712 desc->handle_irq = handle; 713 desc->name = name; 714 715 if (handle != handle_bad_irq && is_chained) { 716 irq_settings_set_noprobe(desc); 717 irq_settings_set_norequest(desc); 718 irq_settings_set_nothread(desc); 719 irq_startup(desc, true); 720 } 721 out: 722 irq_put_desc_busunlock(desc, flags); 723 } 724 EXPORT_SYMBOL_GPL(__irq_set_handler); 725 726 void 727 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 728 irq_flow_handler_t handle, const char *name) 729 { 730 irq_set_chip(irq, chip); 731 __irq_set_handler(irq, handle, 0, name); 732 } 733 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 734 735 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 736 { 737 unsigned long flags; 738 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 739 740 if (!desc) 741 return; 742 irq_settings_clr_and_set(desc, clr, set); 743 744 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 745 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 746 if (irq_settings_has_no_balance_set(desc)) 747 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 748 if (irq_settings_is_per_cpu(desc)) 749 irqd_set(&desc->irq_data, IRQD_PER_CPU); 750 if (irq_settings_can_move_pcntxt(desc)) 751 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 752 if (irq_settings_is_level(desc)) 753 irqd_set(&desc->irq_data, IRQD_LEVEL); 754 755 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 756 757 irq_put_desc_unlock(desc, flags); 758 } 759 EXPORT_SYMBOL_GPL(irq_modify_status); 760 761 /** 762 * irq_cpu_online - Invoke all irq_cpu_online functions. 763 * 764 * Iterate through all irqs and invoke the chip.irq_cpu_online() 765 * for each. 766 */ 767 void irq_cpu_online(void) 768 { 769 struct irq_desc *desc; 770 struct irq_chip *chip; 771 unsigned long flags; 772 unsigned int irq; 773 774 for_each_active_irq(irq) { 775 desc = irq_to_desc(irq); 776 if (!desc) 777 continue; 778 779 raw_spin_lock_irqsave(&desc->lock, flags); 780 781 chip = irq_data_get_irq_chip(&desc->irq_data); 782 if (chip && chip->irq_cpu_online && 783 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 784 !irqd_irq_disabled(&desc->irq_data))) 785 chip->irq_cpu_online(&desc->irq_data); 786 787 raw_spin_unlock_irqrestore(&desc->lock, flags); 788 } 789 } 790 791 /** 792 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 793 * 794 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 795 * for each. 796 */ 797 void irq_cpu_offline(void) 798 { 799 struct irq_desc *desc; 800 struct irq_chip *chip; 801 unsigned long flags; 802 unsigned int irq; 803 804 for_each_active_irq(irq) { 805 desc = irq_to_desc(irq); 806 if (!desc) 807 continue; 808 809 raw_spin_lock_irqsave(&desc->lock, flags); 810 811 chip = irq_data_get_irq_chip(&desc->irq_data); 812 if (chip && chip->irq_cpu_offline && 813 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 814 !irqd_irq_disabled(&desc->irq_data))) 815 chip->irq_cpu_offline(&desc->irq_data); 816 817 raw_spin_unlock_irqrestore(&desc->lock, flags); 818 } 819 } 820