1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * irq_set_chip - set the irq chip for an irq 23 * @irq: irq number 24 * @chip: pointer to irq chip description structure 25 */ 26 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 27 { 28 unsigned long flags; 29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 30 31 if (!desc) 32 return -EINVAL; 33 34 if (!chip) 35 chip = &no_irq_chip; 36 37 desc->irq_data.chip = chip; 38 irq_put_desc_unlock(desc, flags); 39 /* 40 * For !CONFIG_SPARSE_IRQ make the irq show up in 41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 42 * already marked, and this call is harmless. 43 */ 44 irq_reserve_irq(irq); 45 return 0; 46 } 47 EXPORT_SYMBOL(irq_set_chip); 48 49 /** 50 * irq_set_type - set the irq trigger type for an irq 51 * @irq: irq number 52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 53 */ 54 int irq_set_irq_type(unsigned int irq, unsigned int type) 55 { 56 unsigned long flags; 57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 58 int ret = 0; 59 60 if (!desc) 61 return -EINVAL; 62 63 type &= IRQ_TYPE_SENSE_MASK; 64 if (type != IRQ_TYPE_NONE) 65 ret = __irq_set_trigger(desc, irq, type); 66 irq_put_desc_busunlock(desc, flags); 67 return ret; 68 } 69 EXPORT_SYMBOL(irq_set_irq_type); 70 71 /** 72 * irq_set_handler_data - set irq handler data for an irq 73 * @irq: Interrupt number 74 * @data: Pointer to interrupt specific data 75 * 76 * Set the hardware irq controller data for an irq 77 */ 78 int irq_set_handler_data(unsigned int irq, void *data) 79 { 80 unsigned long flags; 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 82 83 if (!desc) 84 return -EINVAL; 85 desc->irq_data.handler_data = data; 86 irq_put_desc_unlock(desc, flags); 87 return 0; 88 } 89 EXPORT_SYMBOL(irq_set_handler_data); 90 91 /** 92 * irq_set_msi_desc - set MSI descriptor data for an irq 93 * @irq: Interrupt number 94 * @entry: Pointer to MSI descriptor data 95 * 96 * Set the MSI descriptor entry for an irq 97 */ 98 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 99 { 100 unsigned long flags; 101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 102 103 if (!desc) 104 return -EINVAL; 105 desc->irq_data.msi_desc = entry; 106 if (entry) 107 entry->irq = irq; 108 irq_put_desc_unlock(desc, flags); 109 return 0; 110 } 111 112 /** 113 * irq_set_chip_data - set irq chip data for an irq 114 * @irq: Interrupt number 115 * @data: Pointer to chip specific data 116 * 117 * Set the hardware irq chip data for an irq 118 */ 119 int irq_set_chip_data(unsigned int irq, void *data) 120 { 121 unsigned long flags; 122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 123 124 if (!desc) 125 return -EINVAL; 126 desc->irq_data.chip_data = data; 127 irq_put_desc_unlock(desc, flags); 128 return 0; 129 } 130 EXPORT_SYMBOL(irq_set_chip_data); 131 132 struct irq_data *irq_get_irq_data(unsigned int irq) 133 { 134 struct irq_desc *desc = irq_to_desc(irq); 135 136 return desc ? &desc->irq_data : NULL; 137 } 138 EXPORT_SYMBOL_GPL(irq_get_irq_data); 139 140 static void irq_state_clr_disabled(struct irq_desc *desc) 141 { 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 143 } 144 145 static void irq_state_set_disabled(struct irq_desc *desc) 146 { 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 148 } 149 150 static void irq_state_clr_masked(struct irq_desc *desc) 151 { 152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 153 } 154 155 static void irq_state_set_masked(struct irq_desc *desc) 156 { 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 158 } 159 160 int irq_startup(struct irq_desc *desc) 161 { 162 irq_state_clr_disabled(desc); 163 desc->depth = 0; 164 165 if (desc->irq_data.chip->irq_startup) { 166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 167 irq_state_clr_masked(desc); 168 return ret; 169 } 170 171 irq_enable(desc); 172 return 0; 173 } 174 175 void irq_shutdown(struct irq_desc *desc) 176 { 177 irq_state_set_disabled(desc); 178 desc->depth = 1; 179 if (desc->irq_data.chip->irq_shutdown) 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 181 else if (desc->irq_data.chip->irq_disable) 182 desc->irq_data.chip->irq_disable(&desc->irq_data); 183 else 184 desc->irq_data.chip->irq_mask(&desc->irq_data); 185 irq_state_set_masked(desc); 186 } 187 188 void irq_enable(struct irq_desc *desc) 189 { 190 irq_state_clr_disabled(desc); 191 if (desc->irq_data.chip->irq_enable) 192 desc->irq_data.chip->irq_enable(&desc->irq_data); 193 else 194 desc->irq_data.chip->irq_unmask(&desc->irq_data); 195 irq_state_clr_masked(desc); 196 } 197 198 void irq_disable(struct irq_desc *desc) 199 { 200 irq_state_set_disabled(desc); 201 if (desc->irq_data.chip->irq_disable) { 202 desc->irq_data.chip->irq_disable(&desc->irq_data); 203 irq_state_set_masked(desc); 204 } 205 } 206 207 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 208 { 209 if (desc->irq_data.chip->irq_enable) 210 desc->irq_data.chip->irq_enable(&desc->irq_data); 211 else 212 desc->irq_data.chip->irq_unmask(&desc->irq_data); 213 cpumask_set_cpu(cpu, desc->percpu_enabled); 214 } 215 216 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 217 { 218 if (desc->irq_data.chip->irq_disable) 219 desc->irq_data.chip->irq_disable(&desc->irq_data); 220 else 221 desc->irq_data.chip->irq_mask(&desc->irq_data); 222 cpumask_clear_cpu(cpu, desc->percpu_enabled); 223 } 224 225 static inline void mask_ack_irq(struct irq_desc *desc) 226 { 227 if (desc->irq_data.chip->irq_mask_ack) 228 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 229 else { 230 desc->irq_data.chip->irq_mask(&desc->irq_data); 231 if (desc->irq_data.chip->irq_ack) 232 desc->irq_data.chip->irq_ack(&desc->irq_data); 233 } 234 irq_state_set_masked(desc); 235 } 236 237 void mask_irq(struct irq_desc *desc) 238 { 239 if (desc->irq_data.chip->irq_mask) { 240 desc->irq_data.chip->irq_mask(&desc->irq_data); 241 irq_state_set_masked(desc); 242 } 243 } 244 245 void unmask_irq(struct irq_desc *desc) 246 { 247 if (desc->irq_data.chip->irq_unmask) { 248 desc->irq_data.chip->irq_unmask(&desc->irq_data); 249 irq_state_clr_masked(desc); 250 } 251 } 252 253 /* 254 * handle_nested_irq - Handle a nested irq from a irq thread 255 * @irq: the interrupt number 256 * 257 * Handle interrupts which are nested into a threaded interrupt 258 * handler. The handler function is called inside the calling 259 * threads context. 260 */ 261 void handle_nested_irq(unsigned int irq) 262 { 263 struct irq_desc *desc = irq_to_desc(irq); 264 struct irqaction *action; 265 irqreturn_t action_ret; 266 267 might_sleep(); 268 269 raw_spin_lock_irq(&desc->lock); 270 271 kstat_incr_irqs_this_cpu(irq, desc); 272 273 action = desc->action; 274 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 275 goto out_unlock; 276 277 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 278 raw_spin_unlock_irq(&desc->lock); 279 280 action_ret = action->thread_fn(action->irq, action->dev_id); 281 if (!noirqdebug) 282 note_interrupt(irq, desc, action_ret); 283 284 raw_spin_lock_irq(&desc->lock); 285 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 286 287 out_unlock: 288 raw_spin_unlock_irq(&desc->lock); 289 } 290 EXPORT_SYMBOL_GPL(handle_nested_irq); 291 292 static bool irq_check_poll(struct irq_desc *desc) 293 { 294 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 295 return false; 296 return irq_wait_for_poll(desc); 297 } 298 299 /** 300 * handle_simple_irq - Simple and software-decoded IRQs. 301 * @irq: the interrupt number 302 * @desc: the interrupt description structure for this irq 303 * 304 * Simple interrupts are either sent from a demultiplexing interrupt 305 * handler or come from hardware, where no interrupt hardware control 306 * is necessary. 307 * 308 * Note: The caller is expected to handle the ack, clear, mask and 309 * unmask issues if necessary. 310 */ 311 void 312 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 313 { 314 raw_spin_lock(&desc->lock); 315 316 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 317 if (!irq_check_poll(desc)) 318 goto out_unlock; 319 320 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 321 kstat_incr_irqs_this_cpu(irq, desc); 322 323 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 324 goto out_unlock; 325 326 handle_irq_event(desc); 327 328 out_unlock: 329 raw_spin_unlock(&desc->lock); 330 } 331 EXPORT_SYMBOL_GPL(handle_simple_irq); 332 333 /** 334 * handle_level_irq - Level type irq handler 335 * @irq: the interrupt number 336 * @desc: the interrupt description structure for this irq 337 * 338 * Level type interrupts are active as long as the hardware line has 339 * the active level. This may require to mask the interrupt and unmask 340 * it after the associated handler has acknowledged the device, so the 341 * interrupt line is back to inactive. 342 */ 343 void 344 handle_level_irq(unsigned int irq, struct irq_desc *desc) 345 { 346 raw_spin_lock(&desc->lock); 347 mask_ack_irq(desc); 348 349 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 350 if (!irq_check_poll(desc)) 351 goto out_unlock; 352 353 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 354 kstat_incr_irqs_this_cpu(irq, desc); 355 356 /* 357 * If its disabled or no action available 358 * keep it masked and get out of here 359 */ 360 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 361 goto out_unlock; 362 363 handle_irq_event(desc); 364 365 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) 366 unmask_irq(desc); 367 out_unlock: 368 raw_spin_unlock(&desc->lock); 369 } 370 EXPORT_SYMBOL_GPL(handle_level_irq); 371 372 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 373 static inline void preflow_handler(struct irq_desc *desc) 374 { 375 if (desc->preflow_handler) 376 desc->preflow_handler(&desc->irq_data); 377 } 378 #else 379 static inline void preflow_handler(struct irq_desc *desc) { } 380 #endif 381 382 /** 383 * handle_fasteoi_irq - irq handler for transparent controllers 384 * @irq: the interrupt number 385 * @desc: the interrupt description structure for this irq 386 * 387 * Only a single callback will be issued to the chip: an ->eoi() 388 * call when the interrupt has been serviced. This enables support 389 * for modern forms of interrupt handlers, which handle the flow 390 * details in hardware, transparently. 391 */ 392 void 393 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 394 { 395 raw_spin_lock(&desc->lock); 396 397 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 398 if (!irq_check_poll(desc)) 399 goto out; 400 401 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 402 kstat_incr_irqs_this_cpu(irq, desc); 403 404 /* 405 * If its disabled or no action available 406 * then mask it and get out of here: 407 */ 408 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 409 desc->istate |= IRQS_PENDING; 410 mask_irq(desc); 411 goto out; 412 } 413 414 if (desc->istate & IRQS_ONESHOT) 415 mask_irq(desc); 416 417 preflow_handler(desc); 418 handle_irq_event(desc); 419 420 out_eoi: 421 desc->irq_data.chip->irq_eoi(&desc->irq_data); 422 out_unlock: 423 raw_spin_unlock(&desc->lock); 424 return; 425 out: 426 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 427 goto out_eoi; 428 goto out_unlock; 429 } 430 431 /** 432 * handle_edge_irq - edge type IRQ handler 433 * @irq: the interrupt number 434 * @desc: the interrupt description structure for this irq 435 * 436 * Interrupt occures on the falling and/or rising edge of a hardware 437 * signal. The occurrence is latched into the irq controller hardware 438 * and must be acked in order to be reenabled. After the ack another 439 * interrupt can happen on the same source even before the first one 440 * is handled by the associated event handler. If this happens it 441 * might be necessary to disable (mask) the interrupt depending on the 442 * controller hardware. This requires to reenable the interrupt inside 443 * of the loop which handles the interrupts which have arrived while 444 * the handler was running. If all pending interrupts are handled, the 445 * loop is left. 446 */ 447 void 448 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 449 { 450 raw_spin_lock(&desc->lock); 451 452 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 453 /* 454 * If we're currently running this IRQ, or its disabled, 455 * we shouldn't process the IRQ. Mark it pending, handle 456 * the necessary masking and go out 457 */ 458 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 459 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 460 if (!irq_check_poll(desc)) { 461 desc->istate |= IRQS_PENDING; 462 mask_ack_irq(desc); 463 goto out_unlock; 464 } 465 } 466 kstat_incr_irqs_this_cpu(irq, desc); 467 468 /* Start handling the irq */ 469 desc->irq_data.chip->irq_ack(&desc->irq_data); 470 471 do { 472 if (unlikely(!desc->action)) { 473 mask_irq(desc); 474 goto out_unlock; 475 } 476 477 /* 478 * When another irq arrived while we were handling 479 * one, we could have masked the irq. 480 * Renable it, if it was not disabled in meantime. 481 */ 482 if (unlikely(desc->istate & IRQS_PENDING)) { 483 if (!irqd_irq_disabled(&desc->irq_data) && 484 irqd_irq_masked(&desc->irq_data)) 485 unmask_irq(desc); 486 } 487 488 handle_irq_event(desc); 489 490 } while ((desc->istate & IRQS_PENDING) && 491 !irqd_irq_disabled(&desc->irq_data)); 492 493 out_unlock: 494 raw_spin_unlock(&desc->lock); 495 } 496 497 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 498 /** 499 * handle_edge_eoi_irq - edge eoi type IRQ handler 500 * @irq: the interrupt number 501 * @desc: the interrupt description structure for this irq 502 * 503 * Similar as the above handle_edge_irq, but using eoi and w/o the 504 * mask/unmask logic. 505 */ 506 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 507 { 508 struct irq_chip *chip = irq_desc_get_chip(desc); 509 510 raw_spin_lock(&desc->lock); 511 512 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 513 /* 514 * If we're currently running this IRQ, or its disabled, 515 * we shouldn't process the IRQ. Mark it pending, handle 516 * the necessary masking and go out 517 */ 518 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 519 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 520 if (!irq_check_poll(desc)) { 521 desc->istate |= IRQS_PENDING; 522 goto out_eoi; 523 } 524 } 525 kstat_incr_irqs_this_cpu(irq, desc); 526 527 do { 528 if (unlikely(!desc->action)) 529 goto out_eoi; 530 531 handle_irq_event(desc); 532 533 } while ((desc->istate & IRQS_PENDING) && 534 !irqd_irq_disabled(&desc->irq_data)); 535 536 out_eoi: 537 chip->irq_eoi(&desc->irq_data); 538 raw_spin_unlock(&desc->lock); 539 } 540 #endif 541 542 /** 543 * handle_percpu_irq - Per CPU local irq handler 544 * @irq: the interrupt number 545 * @desc: the interrupt description structure for this irq 546 * 547 * Per CPU interrupts on SMP machines without locking requirements 548 */ 549 void 550 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 551 { 552 struct irq_chip *chip = irq_desc_get_chip(desc); 553 554 kstat_incr_irqs_this_cpu(irq, desc); 555 556 if (chip->irq_ack) 557 chip->irq_ack(&desc->irq_data); 558 559 handle_irq_event_percpu(desc, desc->action); 560 561 if (chip->irq_eoi) 562 chip->irq_eoi(&desc->irq_data); 563 } 564 565 /** 566 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 567 * @irq: the interrupt number 568 * @desc: the interrupt description structure for this irq 569 * 570 * Per CPU interrupts on SMP machines without locking requirements. Same as 571 * handle_percpu_irq() above but with the following extras: 572 * 573 * action->percpu_dev_id is a pointer to percpu variables which 574 * contain the real device id for the cpu on which this handler is 575 * called 576 */ 577 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 578 { 579 struct irq_chip *chip = irq_desc_get_chip(desc); 580 struct irqaction *action = desc->action; 581 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 582 irqreturn_t res; 583 584 kstat_incr_irqs_this_cpu(irq, desc); 585 586 if (chip->irq_ack) 587 chip->irq_ack(&desc->irq_data); 588 589 trace_irq_handler_entry(irq, action); 590 res = action->handler(irq, dev_id); 591 trace_irq_handler_exit(irq, action, res); 592 593 if (chip->irq_eoi) 594 chip->irq_eoi(&desc->irq_data); 595 } 596 597 void 598 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 599 const char *name) 600 { 601 unsigned long flags; 602 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 603 604 if (!desc) 605 return; 606 607 if (!handle) { 608 handle = handle_bad_irq; 609 } else { 610 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 611 goto out; 612 } 613 614 /* Uninstall? */ 615 if (handle == handle_bad_irq) { 616 if (desc->irq_data.chip != &no_irq_chip) 617 mask_ack_irq(desc); 618 irq_state_set_disabled(desc); 619 desc->depth = 1; 620 } 621 desc->handle_irq = handle; 622 desc->name = name; 623 624 if (handle != handle_bad_irq && is_chained) { 625 irq_settings_set_noprobe(desc); 626 irq_settings_set_norequest(desc); 627 irq_settings_set_nothread(desc); 628 irq_startup(desc); 629 } 630 out: 631 irq_put_desc_busunlock(desc, flags); 632 } 633 EXPORT_SYMBOL_GPL(__irq_set_handler); 634 635 void 636 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 637 irq_flow_handler_t handle, const char *name) 638 { 639 irq_set_chip(irq, chip); 640 __irq_set_handler(irq, handle, 0, name); 641 } 642 643 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 644 { 645 unsigned long flags; 646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 647 648 if (!desc) 649 return; 650 irq_settings_clr_and_set(desc, clr, set); 651 652 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 653 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 654 if (irq_settings_has_no_balance_set(desc)) 655 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 656 if (irq_settings_is_per_cpu(desc)) 657 irqd_set(&desc->irq_data, IRQD_PER_CPU); 658 if (irq_settings_can_move_pcntxt(desc)) 659 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 660 if (irq_settings_is_level(desc)) 661 irqd_set(&desc->irq_data, IRQD_LEVEL); 662 663 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 664 665 irq_put_desc_unlock(desc, flags); 666 } 667 EXPORT_SYMBOL_GPL(irq_modify_status); 668 669 /** 670 * irq_cpu_online - Invoke all irq_cpu_online functions. 671 * 672 * Iterate through all irqs and invoke the chip.irq_cpu_online() 673 * for each. 674 */ 675 void irq_cpu_online(void) 676 { 677 struct irq_desc *desc; 678 struct irq_chip *chip; 679 unsigned long flags; 680 unsigned int irq; 681 682 for_each_active_irq(irq) { 683 desc = irq_to_desc(irq); 684 if (!desc) 685 continue; 686 687 raw_spin_lock_irqsave(&desc->lock, flags); 688 689 chip = irq_data_get_irq_chip(&desc->irq_data); 690 if (chip && chip->irq_cpu_online && 691 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 692 !irqd_irq_disabled(&desc->irq_data))) 693 chip->irq_cpu_online(&desc->irq_data); 694 695 raw_spin_unlock_irqrestore(&desc->lock, flags); 696 } 697 } 698 699 /** 700 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 701 * 702 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 703 * for each. 704 */ 705 void irq_cpu_offline(void) 706 { 707 struct irq_desc *desc; 708 struct irq_chip *chip; 709 unsigned long flags; 710 unsigned int irq; 711 712 for_each_active_irq(irq) { 713 desc = irq_to_desc(irq); 714 if (!desc) 715 continue; 716 717 raw_spin_lock_irqsave(&desc->lock, flags); 718 719 chip = irq_data_get_irq_chip(&desc->irq_data); 720 if (chip && chip->irq_cpu_offline && 721 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 722 !irqd_irq_disabled(&desc->irq_data))) 723 chip->irq_cpu_offline(&desc->irq_data); 724 725 raw_spin_unlock_irqrestore(&desc->lock, flags); 726 } 727 } 728