1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 44 * already marked, and this call is harmless. 45 */ 46 irq_reserve_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, irq, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc - set MSI descriptor data for an irq 94 * @irq: Interrupt number 95 * @entry: Pointer to MSI descriptor data 96 * 97 * Set the MSI descriptor entry for an irq 98 */ 99 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 100 { 101 unsigned long flags; 102 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 103 104 if (!desc) 105 return -EINVAL; 106 desc->irq_data.msi_desc = entry; 107 if (entry) 108 entry->irq = irq; 109 irq_put_desc_unlock(desc, flags); 110 return 0; 111 } 112 113 /** 114 * irq_set_chip_data - set irq chip data for an irq 115 * @irq: Interrupt number 116 * @data: Pointer to chip specific data 117 * 118 * Set the hardware irq chip data for an irq 119 */ 120 int irq_set_chip_data(unsigned int irq, void *data) 121 { 122 unsigned long flags; 123 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 124 125 if (!desc) 126 return -EINVAL; 127 desc->irq_data.chip_data = data; 128 irq_put_desc_unlock(desc, flags); 129 return 0; 130 } 131 EXPORT_SYMBOL(irq_set_chip_data); 132 133 struct irq_data *irq_get_irq_data(unsigned int irq) 134 { 135 struct irq_desc *desc = irq_to_desc(irq); 136 137 return desc ? &desc->irq_data : NULL; 138 } 139 EXPORT_SYMBOL_GPL(irq_get_irq_data); 140 141 static void irq_state_clr_disabled(struct irq_desc *desc) 142 { 143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 144 } 145 146 static void irq_state_set_disabled(struct irq_desc *desc) 147 { 148 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 149 } 150 151 static void irq_state_clr_masked(struct irq_desc *desc) 152 { 153 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 154 } 155 156 static void irq_state_set_masked(struct irq_desc *desc) 157 { 158 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 159 } 160 161 int irq_startup(struct irq_desc *desc, bool resend) 162 { 163 int ret = 0; 164 165 irq_state_clr_disabled(desc); 166 desc->depth = 0; 167 168 if (desc->irq_data.chip->irq_startup) { 169 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 170 irq_state_clr_masked(desc); 171 } else { 172 irq_enable(desc); 173 } 174 if (resend) 175 check_irq_resend(desc, desc->irq_data.irq); 176 return ret; 177 } 178 179 void irq_shutdown(struct irq_desc *desc) 180 { 181 irq_state_set_disabled(desc); 182 desc->depth = 1; 183 if (desc->irq_data.chip->irq_shutdown) 184 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 185 else if (desc->irq_data.chip->irq_disable) 186 desc->irq_data.chip->irq_disable(&desc->irq_data); 187 else 188 desc->irq_data.chip->irq_mask(&desc->irq_data); 189 irq_state_set_masked(desc); 190 } 191 192 void irq_enable(struct irq_desc *desc) 193 { 194 irq_state_clr_disabled(desc); 195 if (desc->irq_data.chip->irq_enable) 196 desc->irq_data.chip->irq_enable(&desc->irq_data); 197 else 198 desc->irq_data.chip->irq_unmask(&desc->irq_data); 199 irq_state_clr_masked(desc); 200 } 201 202 void irq_disable(struct irq_desc *desc) 203 { 204 irq_state_set_disabled(desc); 205 if (desc->irq_data.chip->irq_disable) { 206 desc->irq_data.chip->irq_disable(&desc->irq_data); 207 irq_state_set_masked(desc); 208 } 209 } 210 211 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 212 { 213 if (desc->irq_data.chip->irq_enable) 214 desc->irq_data.chip->irq_enable(&desc->irq_data); 215 else 216 desc->irq_data.chip->irq_unmask(&desc->irq_data); 217 cpumask_set_cpu(cpu, desc->percpu_enabled); 218 } 219 220 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 221 { 222 if (desc->irq_data.chip->irq_disable) 223 desc->irq_data.chip->irq_disable(&desc->irq_data); 224 else 225 desc->irq_data.chip->irq_mask(&desc->irq_data); 226 cpumask_clear_cpu(cpu, desc->percpu_enabled); 227 } 228 229 static inline void mask_ack_irq(struct irq_desc *desc) 230 { 231 if (desc->irq_data.chip->irq_mask_ack) 232 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 233 else { 234 desc->irq_data.chip->irq_mask(&desc->irq_data); 235 if (desc->irq_data.chip->irq_ack) 236 desc->irq_data.chip->irq_ack(&desc->irq_data); 237 } 238 irq_state_set_masked(desc); 239 } 240 241 void mask_irq(struct irq_desc *desc) 242 { 243 if (desc->irq_data.chip->irq_mask) { 244 desc->irq_data.chip->irq_mask(&desc->irq_data); 245 irq_state_set_masked(desc); 246 } 247 } 248 249 void unmask_irq(struct irq_desc *desc) 250 { 251 if (desc->irq_data.chip->irq_unmask) { 252 desc->irq_data.chip->irq_unmask(&desc->irq_data); 253 irq_state_clr_masked(desc); 254 } 255 } 256 257 /* 258 * handle_nested_irq - Handle a nested irq from a irq thread 259 * @irq: the interrupt number 260 * 261 * Handle interrupts which are nested into a threaded interrupt 262 * handler. The handler function is called inside the calling 263 * threads context. 264 */ 265 void handle_nested_irq(unsigned int irq) 266 { 267 struct irq_desc *desc = irq_to_desc(irq); 268 struct irqaction *action; 269 irqreturn_t action_ret; 270 271 might_sleep(); 272 273 raw_spin_lock_irq(&desc->lock); 274 275 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 276 kstat_incr_irqs_this_cpu(irq, desc); 277 278 action = desc->action; 279 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 280 desc->istate |= IRQS_PENDING; 281 goto out_unlock; 282 } 283 284 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 285 raw_spin_unlock_irq(&desc->lock); 286 287 action_ret = action->thread_fn(action->irq, action->dev_id); 288 if (!noirqdebug) 289 note_interrupt(irq, desc, action_ret); 290 291 raw_spin_lock_irq(&desc->lock); 292 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 293 294 out_unlock: 295 raw_spin_unlock_irq(&desc->lock); 296 } 297 EXPORT_SYMBOL_GPL(handle_nested_irq); 298 299 static bool irq_check_poll(struct irq_desc *desc) 300 { 301 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 302 return false; 303 return irq_wait_for_poll(desc); 304 } 305 306 /** 307 * handle_simple_irq - Simple and software-decoded IRQs. 308 * @irq: the interrupt number 309 * @desc: the interrupt description structure for this irq 310 * 311 * Simple interrupts are either sent from a demultiplexing interrupt 312 * handler or come from hardware, where no interrupt hardware control 313 * is necessary. 314 * 315 * Note: The caller is expected to handle the ack, clear, mask and 316 * unmask issues if necessary. 317 */ 318 void 319 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 320 { 321 raw_spin_lock(&desc->lock); 322 323 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 324 if (!irq_check_poll(desc)) 325 goto out_unlock; 326 327 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 328 kstat_incr_irqs_this_cpu(irq, desc); 329 330 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 331 desc->istate |= IRQS_PENDING; 332 goto out_unlock; 333 } 334 335 handle_irq_event(desc); 336 337 out_unlock: 338 raw_spin_unlock(&desc->lock); 339 } 340 EXPORT_SYMBOL_GPL(handle_simple_irq); 341 342 /* 343 * Called unconditionally from handle_level_irq() and only for oneshot 344 * interrupts from handle_fasteoi_irq() 345 */ 346 static void cond_unmask_irq(struct irq_desc *desc) 347 { 348 /* 349 * We need to unmask in the following cases: 350 * - Standard level irq (IRQF_ONESHOT is not set) 351 * - Oneshot irq which did not wake the thread (caused by a 352 * spurious interrupt or a primary handler handling it 353 * completely). 354 */ 355 if (!irqd_irq_disabled(&desc->irq_data) && 356 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 357 unmask_irq(desc); 358 } 359 360 /** 361 * handle_level_irq - Level type irq handler 362 * @irq: the interrupt number 363 * @desc: the interrupt description structure for this irq 364 * 365 * Level type interrupts are active as long as the hardware line has 366 * the active level. This may require to mask the interrupt and unmask 367 * it after the associated handler has acknowledged the device, so the 368 * interrupt line is back to inactive. 369 */ 370 void 371 handle_level_irq(unsigned int irq, struct irq_desc *desc) 372 { 373 raw_spin_lock(&desc->lock); 374 mask_ack_irq(desc); 375 376 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 377 if (!irq_check_poll(desc)) 378 goto out_unlock; 379 380 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 381 kstat_incr_irqs_this_cpu(irq, desc); 382 383 /* 384 * If its disabled or no action available 385 * keep it masked and get out of here 386 */ 387 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 388 desc->istate |= IRQS_PENDING; 389 goto out_unlock; 390 } 391 392 handle_irq_event(desc); 393 394 cond_unmask_irq(desc); 395 396 out_unlock: 397 raw_spin_unlock(&desc->lock); 398 } 399 EXPORT_SYMBOL_GPL(handle_level_irq); 400 401 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 402 static inline void preflow_handler(struct irq_desc *desc) 403 { 404 if (desc->preflow_handler) 405 desc->preflow_handler(&desc->irq_data); 406 } 407 #else 408 static inline void preflow_handler(struct irq_desc *desc) { } 409 #endif 410 411 /** 412 * handle_fasteoi_irq - irq handler for transparent controllers 413 * @irq: the interrupt number 414 * @desc: the interrupt description structure for this irq 415 * 416 * Only a single callback will be issued to the chip: an ->eoi() 417 * call when the interrupt has been serviced. This enables support 418 * for modern forms of interrupt handlers, which handle the flow 419 * details in hardware, transparently. 420 */ 421 void 422 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 423 { 424 raw_spin_lock(&desc->lock); 425 426 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 427 if (!irq_check_poll(desc)) 428 goto out; 429 430 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 431 kstat_incr_irqs_this_cpu(irq, desc); 432 433 /* 434 * If its disabled or no action available 435 * then mask it and get out of here: 436 */ 437 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 438 desc->istate |= IRQS_PENDING; 439 mask_irq(desc); 440 goto out; 441 } 442 443 if (desc->istate & IRQS_ONESHOT) 444 mask_irq(desc); 445 446 preflow_handler(desc); 447 handle_irq_event(desc); 448 449 if (desc->istate & IRQS_ONESHOT) 450 cond_unmask_irq(desc); 451 452 out_eoi: 453 desc->irq_data.chip->irq_eoi(&desc->irq_data); 454 out_unlock: 455 raw_spin_unlock(&desc->lock); 456 return; 457 out: 458 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 459 goto out_eoi; 460 goto out_unlock; 461 } 462 463 /** 464 * handle_edge_irq - edge type IRQ handler 465 * @irq: the interrupt number 466 * @desc: the interrupt description structure for this irq 467 * 468 * Interrupt occures on the falling and/or rising edge of a hardware 469 * signal. The occurrence is latched into the irq controller hardware 470 * and must be acked in order to be reenabled. After the ack another 471 * interrupt can happen on the same source even before the first one 472 * is handled by the associated event handler. If this happens it 473 * might be necessary to disable (mask) the interrupt depending on the 474 * controller hardware. This requires to reenable the interrupt inside 475 * of the loop which handles the interrupts which have arrived while 476 * the handler was running. If all pending interrupts are handled, the 477 * loop is left. 478 */ 479 void 480 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 481 { 482 raw_spin_lock(&desc->lock); 483 484 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 485 /* 486 * If we're currently running this IRQ, or its disabled, 487 * we shouldn't process the IRQ. Mark it pending, handle 488 * the necessary masking and go out 489 */ 490 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 491 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 492 if (!irq_check_poll(desc)) { 493 desc->istate |= IRQS_PENDING; 494 mask_ack_irq(desc); 495 goto out_unlock; 496 } 497 } 498 kstat_incr_irqs_this_cpu(irq, desc); 499 500 /* Start handling the irq */ 501 desc->irq_data.chip->irq_ack(&desc->irq_data); 502 503 do { 504 if (unlikely(!desc->action)) { 505 mask_irq(desc); 506 goto out_unlock; 507 } 508 509 /* 510 * When another irq arrived while we were handling 511 * one, we could have masked the irq. 512 * Renable it, if it was not disabled in meantime. 513 */ 514 if (unlikely(desc->istate & IRQS_PENDING)) { 515 if (!irqd_irq_disabled(&desc->irq_data) && 516 irqd_irq_masked(&desc->irq_data)) 517 unmask_irq(desc); 518 } 519 520 handle_irq_event(desc); 521 522 } while ((desc->istate & IRQS_PENDING) && 523 !irqd_irq_disabled(&desc->irq_data)); 524 525 out_unlock: 526 raw_spin_unlock(&desc->lock); 527 } 528 EXPORT_SYMBOL(handle_edge_irq); 529 530 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 531 /** 532 * handle_edge_eoi_irq - edge eoi type IRQ handler 533 * @irq: the interrupt number 534 * @desc: the interrupt description structure for this irq 535 * 536 * Similar as the above handle_edge_irq, but using eoi and w/o the 537 * mask/unmask logic. 538 */ 539 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 540 { 541 struct irq_chip *chip = irq_desc_get_chip(desc); 542 543 raw_spin_lock(&desc->lock); 544 545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 546 /* 547 * If we're currently running this IRQ, or its disabled, 548 * we shouldn't process the IRQ. Mark it pending, handle 549 * the necessary masking and go out 550 */ 551 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 552 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 553 if (!irq_check_poll(desc)) { 554 desc->istate |= IRQS_PENDING; 555 goto out_eoi; 556 } 557 } 558 kstat_incr_irqs_this_cpu(irq, desc); 559 560 do { 561 if (unlikely(!desc->action)) 562 goto out_eoi; 563 564 handle_irq_event(desc); 565 566 } while ((desc->istate & IRQS_PENDING) && 567 !irqd_irq_disabled(&desc->irq_data)); 568 569 out_eoi: 570 chip->irq_eoi(&desc->irq_data); 571 raw_spin_unlock(&desc->lock); 572 } 573 #endif 574 575 /** 576 * handle_percpu_irq - Per CPU local irq handler 577 * @irq: the interrupt number 578 * @desc: the interrupt description structure for this irq 579 * 580 * Per CPU interrupts on SMP machines without locking requirements 581 */ 582 void 583 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 584 { 585 struct irq_chip *chip = irq_desc_get_chip(desc); 586 587 kstat_incr_irqs_this_cpu(irq, desc); 588 589 if (chip->irq_ack) 590 chip->irq_ack(&desc->irq_data); 591 592 handle_irq_event_percpu(desc, desc->action); 593 594 if (chip->irq_eoi) 595 chip->irq_eoi(&desc->irq_data); 596 } 597 598 /** 599 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 600 * @irq: the interrupt number 601 * @desc: the interrupt description structure for this irq 602 * 603 * Per CPU interrupts on SMP machines without locking requirements. Same as 604 * handle_percpu_irq() above but with the following extras: 605 * 606 * action->percpu_dev_id is a pointer to percpu variables which 607 * contain the real device id for the cpu on which this handler is 608 * called 609 */ 610 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 611 { 612 struct irq_chip *chip = irq_desc_get_chip(desc); 613 struct irqaction *action = desc->action; 614 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 615 irqreturn_t res; 616 617 kstat_incr_irqs_this_cpu(irq, desc); 618 619 if (chip->irq_ack) 620 chip->irq_ack(&desc->irq_data); 621 622 trace_irq_handler_entry(irq, action); 623 res = action->handler(irq, dev_id); 624 trace_irq_handler_exit(irq, action, res); 625 626 if (chip->irq_eoi) 627 chip->irq_eoi(&desc->irq_data); 628 } 629 630 void 631 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 632 const char *name) 633 { 634 unsigned long flags; 635 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 636 637 if (!desc) 638 return; 639 640 if (!handle) { 641 handle = handle_bad_irq; 642 } else { 643 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 644 goto out; 645 } 646 647 /* Uninstall? */ 648 if (handle == handle_bad_irq) { 649 if (desc->irq_data.chip != &no_irq_chip) 650 mask_ack_irq(desc); 651 irq_state_set_disabled(desc); 652 desc->depth = 1; 653 } 654 desc->handle_irq = handle; 655 desc->name = name; 656 657 if (handle != handle_bad_irq && is_chained) { 658 irq_settings_set_noprobe(desc); 659 irq_settings_set_norequest(desc); 660 irq_settings_set_nothread(desc); 661 irq_startup(desc, true); 662 } 663 out: 664 irq_put_desc_busunlock(desc, flags); 665 } 666 EXPORT_SYMBOL_GPL(__irq_set_handler); 667 668 void 669 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 670 irq_flow_handler_t handle, const char *name) 671 { 672 irq_set_chip(irq, chip); 673 __irq_set_handler(irq, handle, 0, name); 674 } 675 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 676 677 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 678 { 679 unsigned long flags; 680 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 681 682 if (!desc) 683 return; 684 irq_settings_clr_and_set(desc, clr, set); 685 686 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 687 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 688 if (irq_settings_has_no_balance_set(desc)) 689 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 690 if (irq_settings_is_per_cpu(desc)) 691 irqd_set(&desc->irq_data, IRQD_PER_CPU); 692 if (irq_settings_can_move_pcntxt(desc)) 693 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 694 if (irq_settings_is_level(desc)) 695 irqd_set(&desc->irq_data, IRQD_LEVEL); 696 697 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 698 699 irq_put_desc_unlock(desc, flags); 700 } 701 EXPORT_SYMBOL_GPL(irq_modify_status); 702 703 /** 704 * irq_cpu_online - Invoke all irq_cpu_online functions. 705 * 706 * Iterate through all irqs and invoke the chip.irq_cpu_online() 707 * for each. 708 */ 709 void irq_cpu_online(void) 710 { 711 struct irq_desc *desc; 712 struct irq_chip *chip; 713 unsigned long flags; 714 unsigned int irq; 715 716 for_each_active_irq(irq) { 717 desc = irq_to_desc(irq); 718 if (!desc) 719 continue; 720 721 raw_spin_lock_irqsave(&desc->lock, flags); 722 723 chip = irq_data_get_irq_chip(&desc->irq_data); 724 if (chip && chip->irq_cpu_online && 725 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 726 !irqd_irq_disabled(&desc->irq_data))) 727 chip->irq_cpu_online(&desc->irq_data); 728 729 raw_spin_unlock_irqrestore(&desc->lock, flags); 730 } 731 } 732 733 /** 734 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 735 * 736 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 737 * for each. 738 */ 739 void irq_cpu_offline(void) 740 { 741 struct irq_desc *desc; 742 struct irq_chip *chip; 743 unsigned long flags; 744 unsigned int irq; 745 746 for_each_active_irq(irq) { 747 desc = irq_to_desc(irq); 748 if (!desc) 749 continue; 750 751 raw_spin_lock_irqsave(&desc->lock, flags); 752 753 chip = irq_data_get_irq_chip(&desc->irq_data); 754 if (chip && chip->irq_cpu_offline && 755 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 756 !irqd_irq_disabled(&desc->irq_data))) 757 chip->irq_cpu_offline(&desc->irq_data); 758 759 raw_spin_unlock_irqrestore(&desc->lock, flags); 760 } 761 } 762