1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * irq_set_chip - set the irq chip for an irq 23 * @irq: irq number 24 * @chip: pointer to irq chip description structure 25 */ 26 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 27 { 28 unsigned long flags; 29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 30 31 if (!desc) 32 return -EINVAL; 33 34 if (!chip) 35 chip = &no_irq_chip; 36 37 desc->irq_data.chip = chip; 38 irq_put_desc_unlock(desc, flags); 39 /* 40 * For !CONFIG_SPARSE_IRQ make the irq show up in 41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 42 * already marked, and this call is harmless. 43 */ 44 irq_reserve_irq(irq); 45 return 0; 46 } 47 EXPORT_SYMBOL(irq_set_chip); 48 49 /** 50 * irq_set_type - set the irq trigger type for an irq 51 * @irq: irq number 52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 53 */ 54 int irq_set_irq_type(unsigned int irq, unsigned int type) 55 { 56 unsigned long flags; 57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 58 int ret = 0; 59 60 if (!desc) 61 return -EINVAL; 62 63 type &= IRQ_TYPE_SENSE_MASK; 64 if (type != IRQ_TYPE_NONE) 65 ret = __irq_set_trigger(desc, irq, type); 66 irq_put_desc_busunlock(desc, flags); 67 return ret; 68 } 69 EXPORT_SYMBOL(irq_set_irq_type); 70 71 /** 72 * irq_set_handler_data - set irq handler data for an irq 73 * @irq: Interrupt number 74 * @data: Pointer to interrupt specific data 75 * 76 * Set the hardware irq controller data for an irq 77 */ 78 int irq_set_handler_data(unsigned int irq, void *data) 79 { 80 unsigned long flags; 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 82 83 if (!desc) 84 return -EINVAL; 85 desc->irq_data.handler_data = data; 86 irq_put_desc_unlock(desc, flags); 87 return 0; 88 } 89 EXPORT_SYMBOL(irq_set_handler_data); 90 91 /** 92 * irq_set_msi_desc - set MSI descriptor data for an irq 93 * @irq: Interrupt number 94 * @entry: Pointer to MSI descriptor data 95 * 96 * Set the MSI descriptor entry for an irq 97 */ 98 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 99 { 100 unsigned long flags; 101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 102 103 if (!desc) 104 return -EINVAL; 105 desc->irq_data.msi_desc = entry; 106 if (entry) 107 entry->irq = irq; 108 irq_put_desc_unlock(desc, flags); 109 return 0; 110 } 111 112 /** 113 * irq_set_chip_data - set irq chip data for an irq 114 * @irq: Interrupt number 115 * @data: Pointer to chip specific data 116 * 117 * Set the hardware irq chip data for an irq 118 */ 119 int irq_set_chip_data(unsigned int irq, void *data) 120 { 121 unsigned long flags; 122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 123 124 if (!desc) 125 return -EINVAL; 126 desc->irq_data.chip_data = data; 127 irq_put_desc_unlock(desc, flags); 128 return 0; 129 } 130 EXPORT_SYMBOL(irq_set_chip_data); 131 132 struct irq_data *irq_get_irq_data(unsigned int irq) 133 { 134 struct irq_desc *desc = irq_to_desc(irq); 135 136 return desc ? &desc->irq_data : NULL; 137 } 138 EXPORT_SYMBOL_GPL(irq_get_irq_data); 139 140 static void irq_state_clr_disabled(struct irq_desc *desc) 141 { 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 143 } 144 145 static void irq_state_set_disabled(struct irq_desc *desc) 146 { 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 148 } 149 150 static void irq_state_clr_masked(struct irq_desc *desc) 151 { 152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 153 } 154 155 static void irq_state_set_masked(struct irq_desc *desc) 156 { 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 158 } 159 160 int irq_startup(struct irq_desc *desc, bool resend) 161 { 162 int ret = 0; 163 164 irq_state_clr_disabled(desc); 165 desc->depth = 0; 166 167 if (desc->irq_data.chip->irq_startup) { 168 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 169 irq_state_clr_masked(desc); 170 } else { 171 irq_enable(desc); 172 } 173 if (resend) 174 check_irq_resend(desc, desc->irq_data.irq); 175 return ret; 176 } 177 178 void irq_shutdown(struct irq_desc *desc) 179 { 180 irq_state_set_disabled(desc); 181 desc->depth = 1; 182 if (desc->irq_data.chip->irq_shutdown) 183 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 184 else if (desc->irq_data.chip->irq_disable) 185 desc->irq_data.chip->irq_disable(&desc->irq_data); 186 else 187 desc->irq_data.chip->irq_mask(&desc->irq_data); 188 irq_state_set_masked(desc); 189 } 190 191 void irq_enable(struct irq_desc *desc) 192 { 193 irq_state_clr_disabled(desc); 194 if (desc->irq_data.chip->irq_enable) 195 desc->irq_data.chip->irq_enable(&desc->irq_data); 196 else 197 desc->irq_data.chip->irq_unmask(&desc->irq_data); 198 irq_state_clr_masked(desc); 199 } 200 201 void irq_disable(struct irq_desc *desc) 202 { 203 irq_state_set_disabled(desc); 204 if (desc->irq_data.chip->irq_disable) { 205 desc->irq_data.chip->irq_disable(&desc->irq_data); 206 irq_state_set_masked(desc); 207 } 208 } 209 210 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 211 { 212 if (desc->irq_data.chip->irq_enable) 213 desc->irq_data.chip->irq_enable(&desc->irq_data); 214 else 215 desc->irq_data.chip->irq_unmask(&desc->irq_data); 216 cpumask_set_cpu(cpu, desc->percpu_enabled); 217 } 218 219 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 220 { 221 if (desc->irq_data.chip->irq_disable) 222 desc->irq_data.chip->irq_disable(&desc->irq_data); 223 else 224 desc->irq_data.chip->irq_mask(&desc->irq_data); 225 cpumask_clear_cpu(cpu, desc->percpu_enabled); 226 } 227 228 static inline void mask_ack_irq(struct irq_desc *desc) 229 { 230 if (desc->irq_data.chip->irq_mask_ack) 231 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 232 else { 233 desc->irq_data.chip->irq_mask(&desc->irq_data); 234 if (desc->irq_data.chip->irq_ack) 235 desc->irq_data.chip->irq_ack(&desc->irq_data); 236 } 237 irq_state_set_masked(desc); 238 } 239 240 void mask_irq(struct irq_desc *desc) 241 { 242 if (desc->irq_data.chip->irq_mask) { 243 desc->irq_data.chip->irq_mask(&desc->irq_data); 244 irq_state_set_masked(desc); 245 } 246 } 247 248 void unmask_irq(struct irq_desc *desc) 249 { 250 if (desc->irq_data.chip->irq_unmask) { 251 desc->irq_data.chip->irq_unmask(&desc->irq_data); 252 irq_state_clr_masked(desc); 253 } 254 } 255 256 /* 257 * handle_nested_irq - Handle a nested irq from a irq thread 258 * @irq: the interrupt number 259 * 260 * Handle interrupts which are nested into a threaded interrupt 261 * handler. The handler function is called inside the calling 262 * threads context. 263 */ 264 void handle_nested_irq(unsigned int irq) 265 { 266 struct irq_desc *desc = irq_to_desc(irq); 267 struct irqaction *action; 268 irqreturn_t action_ret; 269 270 might_sleep(); 271 272 raw_spin_lock_irq(&desc->lock); 273 274 kstat_incr_irqs_this_cpu(irq, desc); 275 276 action = desc->action; 277 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 278 goto out_unlock; 279 280 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 281 raw_spin_unlock_irq(&desc->lock); 282 283 action_ret = action->thread_fn(action->irq, action->dev_id); 284 if (!noirqdebug) 285 note_interrupt(irq, desc, action_ret); 286 287 raw_spin_lock_irq(&desc->lock); 288 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 289 290 out_unlock: 291 raw_spin_unlock_irq(&desc->lock); 292 } 293 EXPORT_SYMBOL_GPL(handle_nested_irq); 294 295 static bool irq_check_poll(struct irq_desc *desc) 296 { 297 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 298 return false; 299 return irq_wait_for_poll(desc); 300 } 301 302 /** 303 * handle_simple_irq - Simple and software-decoded IRQs. 304 * @irq: the interrupt number 305 * @desc: the interrupt description structure for this irq 306 * 307 * Simple interrupts are either sent from a demultiplexing interrupt 308 * handler or come from hardware, where no interrupt hardware control 309 * is necessary. 310 * 311 * Note: The caller is expected to handle the ack, clear, mask and 312 * unmask issues if necessary. 313 */ 314 void 315 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 316 { 317 raw_spin_lock(&desc->lock); 318 319 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 320 if (!irq_check_poll(desc)) 321 goto out_unlock; 322 323 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 324 kstat_incr_irqs_this_cpu(irq, desc); 325 326 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 327 goto out_unlock; 328 329 handle_irq_event(desc); 330 331 out_unlock: 332 raw_spin_unlock(&desc->lock); 333 } 334 EXPORT_SYMBOL_GPL(handle_simple_irq); 335 336 /* 337 * Called unconditionally from handle_level_irq() and only for oneshot 338 * interrupts from handle_fasteoi_irq() 339 */ 340 static void cond_unmask_irq(struct irq_desc *desc) 341 { 342 /* 343 * We need to unmask in the following cases: 344 * - Standard level irq (IRQF_ONESHOT is not set) 345 * - Oneshot irq which did not wake the thread (caused by a 346 * spurious interrupt or a primary handler handling it 347 * completely). 348 */ 349 if (!irqd_irq_disabled(&desc->irq_data) && 350 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 351 unmask_irq(desc); 352 } 353 354 /** 355 * handle_level_irq - Level type irq handler 356 * @irq: the interrupt number 357 * @desc: the interrupt description structure for this irq 358 * 359 * Level type interrupts are active as long as the hardware line has 360 * the active level. This may require to mask the interrupt and unmask 361 * it after the associated handler has acknowledged the device, so the 362 * interrupt line is back to inactive. 363 */ 364 void 365 handle_level_irq(unsigned int irq, struct irq_desc *desc) 366 { 367 raw_spin_lock(&desc->lock); 368 mask_ack_irq(desc); 369 370 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 371 if (!irq_check_poll(desc)) 372 goto out_unlock; 373 374 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 375 kstat_incr_irqs_this_cpu(irq, desc); 376 377 /* 378 * If its disabled or no action available 379 * keep it masked and get out of here 380 */ 381 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 382 goto out_unlock; 383 384 handle_irq_event(desc); 385 386 cond_unmask_irq(desc); 387 388 out_unlock: 389 raw_spin_unlock(&desc->lock); 390 } 391 EXPORT_SYMBOL_GPL(handle_level_irq); 392 393 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 394 static inline void preflow_handler(struct irq_desc *desc) 395 { 396 if (desc->preflow_handler) 397 desc->preflow_handler(&desc->irq_data); 398 } 399 #else 400 static inline void preflow_handler(struct irq_desc *desc) { } 401 #endif 402 403 /** 404 * handle_fasteoi_irq - irq handler for transparent controllers 405 * @irq: the interrupt number 406 * @desc: the interrupt description structure for this irq 407 * 408 * Only a single callback will be issued to the chip: an ->eoi() 409 * call when the interrupt has been serviced. This enables support 410 * for modern forms of interrupt handlers, which handle the flow 411 * details in hardware, transparently. 412 */ 413 void 414 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 415 { 416 raw_spin_lock(&desc->lock); 417 418 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 419 if (!irq_check_poll(desc)) 420 goto out; 421 422 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 423 kstat_incr_irqs_this_cpu(irq, desc); 424 425 /* 426 * If its disabled or no action available 427 * then mask it and get out of here: 428 */ 429 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 430 desc->istate |= IRQS_PENDING; 431 mask_irq(desc); 432 goto out; 433 } 434 435 if (desc->istate & IRQS_ONESHOT) 436 mask_irq(desc); 437 438 preflow_handler(desc); 439 handle_irq_event(desc); 440 441 if (desc->istate & IRQS_ONESHOT) 442 cond_unmask_irq(desc); 443 444 out_eoi: 445 desc->irq_data.chip->irq_eoi(&desc->irq_data); 446 out_unlock: 447 raw_spin_unlock(&desc->lock); 448 return; 449 out: 450 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 451 goto out_eoi; 452 goto out_unlock; 453 } 454 455 /** 456 * handle_edge_irq - edge type IRQ handler 457 * @irq: the interrupt number 458 * @desc: the interrupt description structure for this irq 459 * 460 * Interrupt occures on the falling and/or rising edge of a hardware 461 * signal. The occurrence is latched into the irq controller hardware 462 * and must be acked in order to be reenabled. After the ack another 463 * interrupt can happen on the same source even before the first one 464 * is handled by the associated event handler. If this happens it 465 * might be necessary to disable (mask) the interrupt depending on the 466 * controller hardware. This requires to reenable the interrupt inside 467 * of the loop which handles the interrupts which have arrived while 468 * the handler was running. If all pending interrupts are handled, the 469 * loop is left. 470 */ 471 void 472 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 473 { 474 raw_spin_lock(&desc->lock); 475 476 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 477 /* 478 * If we're currently running this IRQ, or its disabled, 479 * we shouldn't process the IRQ. Mark it pending, handle 480 * the necessary masking and go out 481 */ 482 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 483 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 484 if (!irq_check_poll(desc)) { 485 desc->istate |= IRQS_PENDING; 486 mask_ack_irq(desc); 487 goto out_unlock; 488 } 489 } 490 kstat_incr_irqs_this_cpu(irq, desc); 491 492 /* Start handling the irq */ 493 desc->irq_data.chip->irq_ack(&desc->irq_data); 494 495 do { 496 if (unlikely(!desc->action)) { 497 mask_irq(desc); 498 goto out_unlock; 499 } 500 501 /* 502 * When another irq arrived while we were handling 503 * one, we could have masked the irq. 504 * Renable it, if it was not disabled in meantime. 505 */ 506 if (unlikely(desc->istate & IRQS_PENDING)) { 507 if (!irqd_irq_disabled(&desc->irq_data) && 508 irqd_irq_masked(&desc->irq_data)) 509 unmask_irq(desc); 510 } 511 512 handle_irq_event(desc); 513 514 } while ((desc->istate & IRQS_PENDING) && 515 !irqd_irq_disabled(&desc->irq_data)); 516 517 out_unlock: 518 raw_spin_unlock(&desc->lock); 519 } 520 521 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 522 /** 523 * handle_edge_eoi_irq - edge eoi type IRQ handler 524 * @irq: the interrupt number 525 * @desc: the interrupt description structure for this irq 526 * 527 * Similar as the above handle_edge_irq, but using eoi and w/o the 528 * mask/unmask logic. 529 */ 530 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 531 { 532 struct irq_chip *chip = irq_desc_get_chip(desc); 533 534 raw_spin_lock(&desc->lock); 535 536 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 537 /* 538 * If we're currently running this IRQ, or its disabled, 539 * we shouldn't process the IRQ. Mark it pending, handle 540 * the necessary masking and go out 541 */ 542 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 543 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 544 if (!irq_check_poll(desc)) { 545 desc->istate |= IRQS_PENDING; 546 goto out_eoi; 547 } 548 } 549 kstat_incr_irqs_this_cpu(irq, desc); 550 551 do { 552 if (unlikely(!desc->action)) 553 goto out_eoi; 554 555 handle_irq_event(desc); 556 557 } while ((desc->istate & IRQS_PENDING) && 558 !irqd_irq_disabled(&desc->irq_data)); 559 560 out_eoi: 561 chip->irq_eoi(&desc->irq_data); 562 raw_spin_unlock(&desc->lock); 563 } 564 #endif 565 566 /** 567 * handle_percpu_irq - Per CPU local irq handler 568 * @irq: the interrupt number 569 * @desc: the interrupt description structure for this irq 570 * 571 * Per CPU interrupts on SMP machines without locking requirements 572 */ 573 void 574 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 575 { 576 struct irq_chip *chip = irq_desc_get_chip(desc); 577 578 kstat_incr_irqs_this_cpu(irq, desc); 579 580 if (chip->irq_ack) 581 chip->irq_ack(&desc->irq_data); 582 583 handle_irq_event_percpu(desc, desc->action); 584 585 if (chip->irq_eoi) 586 chip->irq_eoi(&desc->irq_data); 587 } 588 589 /** 590 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 591 * @irq: the interrupt number 592 * @desc: the interrupt description structure for this irq 593 * 594 * Per CPU interrupts on SMP machines without locking requirements. Same as 595 * handle_percpu_irq() above but with the following extras: 596 * 597 * action->percpu_dev_id is a pointer to percpu variables which 598 * contain the real device id for the cpu on which this handler is 599 * called 600 */ 601 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 602 { 603 struct irq_chip *chip = irq_desc_get_chip(desc); 604 struct irqaction *action = desc->action; 605 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 606 irqreturn_t res; 607 608 kstat_incr_irqs_this_cpu(irq, desc); 609 610 if (chip->irq_ack) 611 chip->irq_ack(&desc->irq_data); 612 613 trace_irq_handler_entry(irq, action); 614 res = action->handler(irq, dev_id); 615 trace_irq_handler_exit(irq, action, res); 616 617 if (chip->irq_eoi) 618 chip->irq_eoi(&desc->irq_data); 619 } 620 621 void 622 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 623 const char *name) 624 { 625 unsigned long flags; 626 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 627 628 if (!desc) 629 return; 630 631 if (!handle) { 632 handle = handle_bad_irq; 633 } else { 634 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 635 goto out; 636 } 637 638 /* Uninstall? */ 639 if (handle == handle_bad_irq) { 640 if (desc->irq_data.chip != &no_irq_chip) 641 mask_ack_irq(desc); 642 irq_state_set_disabled(desc); 643 desc->depth = 1; 644 } 645 desc->handle_irq = handle; 646 desc->name = name; 647 648 if (handle != handle_bad_irq && is_chained) { 649 irq_settings_set_noprobe(desc); 650 irq_settings_set_norequest(desc); 651 irq_settings_set_nothread(desc); 652 irq_startup(desc, true); 653 } 654 out: 655 irq_put_desc_busunlock(desc, flags); 656 } 657 EXPORT_SYMBOL_GPL(__irq_set_handler); 658 659 void 660 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 661 irq_flow_handler_t handle, const char *name) 662 { 663 irq_set_chip(irq, chip); 664 __irq_set_handler(irq, handle, 0, name); 665 } 666 667 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 668 { 669 unsigned long flags; 670 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 671 672 if (!desc) 673 return; 674 irq_settings_clr_and_set(desc, clr, set); 675 676 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 677 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 678 if (irq_settings_has_no_balance_set(desc)) 679 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 680 if (irq_settings_is_per_cpu(desc)) 681 irqd_set(&desc->irq_data, IRQD_PER_CPU); 682 if (irq_settings_can_move_pcntxt(desc)) 683 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 684 if (irq_settings_is_level(desc)) 685 irqd_set(&desc->irq_data, IRQD_LEVEL); 686 687 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 688 689 irq_put_desc_unlock(desc, flags); 690 } 691 EXPORT_SYMBOL_GPL(irq_modify_status); 692 693 /** 694 * irq_cpu_online - Invoke all irq_cpu_online functions. 695 * 696 * Iterate through all irqs and invoke the chip.irq_cpu_online() 697 * for each. 698 */ 699 void irq_cpu_online(void) 700 { 701 struct irq_desc *desc; 702 struct irq_chip *chip; 703 unsigned long flags; 704 unsigned int irq; 705 706 for_each_active_irq(irq) { 707 desc = irq_to_desc(irq); 708 if (!desc) 709 continue; 710 711 raw_spin_lock_irqsave(&desc->lock, flags); 712 713 chip = irq_data_get_irq_chip(&desc->irq_data); 714 if (chip && chip->irq_cpu_online && 715 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 716 !irqd_irq_disabled(&desc->irq_data))) 717 chip->irq_cpu_online(&desc->irq_data); 718 719 raw_spin_unlock_irqrestore(&desc->lock, flags); 720 } 721 } 722 723 /** 724 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 725 * 726 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 727 * for each. 728 */ 729 void irq_cpu_offline(void) 730 { 731 struct irq_desc *desc; 732 struct irq_chip *chip; 733 unsigned long flags; 734 unsigned int irq; 735 736 for_each_active_irq(irq) { 737 desc = irq_to_desc(irq); 738 if (!desc) 739 continue; 740 741 raw_spin_lock_irqsave(&desc->lock, flags); 742 743 chip = irq_data_get_irq_chip(&desc->irq_data); 744 if (chip && chip->irq_cpu_offline && 745 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 746 !irqd_irq_disabled(&desc->irq_data))) 747 chip->irq_cpu_offline(&desc->irq_data); 748 749 raw_spin_unlock_irqrestore(&desc->lock, flags); 750 } 751 } 752