1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. 44 */ 45 irq_mark_irq(irq); 46 return 0; 47 } 48 EXPORT_SYMBOL(irq_set_chip); 49 50 /** 51 * irq_set_type - set the irq trigger type for an irq 52 * @irq: irq number 53 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 54 */ 55 int irq_set_irq_type(unsigned int irq, unsigned int type) 56 { 57 unsigned long flags; 58 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 59 int ret = 0; 60 61 if (!desc) 62 return -EINVAL; 63 64 type &= IRQ_TYPE_SENSE_MASK; 65 ret = __irq_set_trigger(desc, irq, type); 66 irq_put_desc_busunlock(desc, flags); 67 return ret; 68 } 69 EXPORT_SYMBOL(irq_set_irq_type); 70 71 /** 72 * irq_set_handler_data - set irq handler data for an irq 73 * @irq: Interrupt number 74 * @data: Pointer to interrupt specific data 75 * 76 * Set the hardware irq controller data for an irq 77 */ 78 int irq_set_handler_data(unsigned int irq, void *data) 79 { 80 unsigned long flags; 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 82 83 if (!desc) 84 return -EINVAL; 85 desc->irq_data.handler_data = data; 86 irq_put_desc_unlock(desc, flags); 87 return 0; 88 } 89 EXPORT_SYMBOL(irq_set_handler_data); 90 91 /** 92 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 93 * @irq_base: Interrupt number base 94 * @irq_offset: Interrupt number offset 95 * @entry: Pointer to MSI descriptor data 96 * 97 * Set the MSI descriptor entry for an irq at offset 98 */ 99 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 100 struct msi_desc *entry) 101 { 102 unsigned long flags; 103 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 104 105 if (!desc) 106 return -EINVAL; 107 desc->irq_data.msi_desc = entry; 108 if (entry && !irq_offset) 109 entry->irq = irq_base; 110 irq_put_desc_unlock(desc, flags); 111 return 0; 112 } 113 114 /** 115 * irq_set_msi_desc - set MSI descriptor data for an irq 116 * @irq: Interrupt number 117 * @entry: Pointer to MSI descriptor data 118 * 119 * Set the MSI descriptor entry for an irq 120 */ 121 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 122 { 123 return irq_set_msi_desc_off(irq, 0, entry); 124 } 125 126 /** 127 * irq_set_chip_data - set irq chip data for an irq 128 * @irq: Interrupt number 129 * @data: Pointer to chip specific data 130 * 131 * Set the hardware irq chip data for an irq 132 */ 133 int irq_set_chip_data(unsigned int irq, void *data) 134 { 135 unsigned long flags; 136 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 137 138 if (!desc) 139 return -EINVAL; 140 desc->irq_data.chip_data = data; 141 irq_put_desc_unlock(desc, flags); 142 return 0; 143 } 144 EXPORT_SYMBOL(irq_set_chip_data); 145 146 struct irq_data *irq_get_irq_data(unsigned int irq) 147 { 148 struct irq_desc *desc = irq_to_desc(irq); 149 150 return desc ? &desc->irq_data : NULL; 151 } 152 EXPORT_SYMBOL_GPL(irq_get_irq_data); 153 154 static void irq_state_clr_disabled(struct irq_desc *desc) 155 { 156 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 157 } 158 159 static void irq_state_set_disabled(struct irq_desc *desc) 160 { 161 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 162 } 163 164 static void irq_state_clr_masked(struct irq_desc *desc) 165 { 166 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 167 } 168 169 static void irq_state_set_masked(struct irq_desc *desc) 170 { 171 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 172 } 173 174 int irq_startup(struct irq_desc *desc, bool resend) 175 { 176 int ret = 0; 177 178 irq_state_clr_disabled(desc); 179 desc->depth = 0; 180 181 if (desc->irq_data.chip->irq_startup) { 182 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 183 irq_state_clr_masked(desc); 184 } else { 185 irq_enable(desc); 186 } 187 if (resend) 188 check_irq_resend(desc, desc->irq_data.irq); 189 return ret; 190 } 191 192 void irq_shutdown(struct irq_desc *desc) 193 { 194 irq_state_set_disabled(desc); 195 desc->depth = 1; 196 if (desc->irq_data.chip->irq_shutdown) 197 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 198 else if (desc->irq_data.chip->irq_disable) 199 desc->irq_data.chip->irq_disable(&desc->irq_data); 200 else 201 desc->irq_data.chip->irq_mask(&desc->irq_data); 202 irq_state_set_masked(desc); 203 } 204 205 void irq_enable(struct irq_desc *desc) 206 { 207 irq_state_clr_disabled(desc); 208 if (desc->irq_data.chip->irq_enable) 209 desc->irq_data.chip->irq_enable(&desc->irq_data); 210 else 211 desc->irq_data.chip->irq_unmask(&desc->irq_data); 212 irq_state_clr_masked(desc); 213 } 214 215 /** 216 * irq_disable - Mark interrupt disabled 217 * @desc: irq descriptor which should be disabled 218 * 219 * If the chip does not implement the irq_disable callback, we 220 * use a lazy disable approach. That means we mark the interrupt 221 * disabled, but leave the hardware unmasked. That's an 222 * optimization because we avoid the hardware access for the 223 * common case where no interrupt happens after we marked it 224 * disabled. If an interrupt happens, then the interrupt flow 225 * handler masks the line at the hardware level and marks it 226 * pending. 227 */ 228 void irq_disable(struct irq_desc *desc) 229 { 230 irq_state_set_disabled(desc); 231 if (desc->irq_data.chip->irq_disable) { 232 desc->irq_data.chip->irq_disable(&desc->irq_data); 233 irq_state_set_masked(desc); 234 } 235 } 236 237 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 238 { 239 if (desc->irq_data.chip->irq_enable) 240 desc->irq_data.chip->irq_enable(&desc->irq_data); 241 else 242 desc->irq_data.chip->irq_unmask(&desc->irq_data); 243 cpumask_set_cpu(cpu, desc->percpu_enabled); 244 } 245 246 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 247 { 248 if (desc->irq_data.chip->irq_disable) 249 desc->irq_data.chip->irq_disable(&desc->irq_data); 250 else 251 desc->irq_data.chip->irq_mask(&desc->irq_data); 252 cpumask_clear_cpu(cpu, desc->percpu_enabled); 253 } 254 255 static inline void mask_ack_irq(struct irq_desc *desc) 256 { 257 if (desc->irq_data.chip->irq_mask_ack) 258 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 259 else { 260 desc->irq_data.chip->irq_mask(&desc->irq_data); 261 if (desc->irq_data.chip->irq_ack) 262 desc->irq_data.chip->irq_ack(&desc->irq_data); 263 } 264 irq_state_set_masked(desc); 265 } 266 267 void mask_irq(struct irq_desc *desc) 268 { 269 if (desc->irq_data.chip->irq_mask) { 270 desc->irq_data.chip->irq_mask(&desc->irq_data); 271 irq_state_set_masked(desc); 272 } 273 } 274 275 void unmask_irq(struct irq_desc *desc) 276 { 277 if (desc->irq_data.chip->irq_unmask) { 278 desc->irq_data.chip->irq_unmask(&desc->irq_data); 279 irq_state_clr_masked(desc); 280 } 281 } 282 283 void unmask_threaded_irq(struct irq_desc *desc) 284 { 285 struct irq_chip *chip = desc->irq_data.chip; 286 287 if (chip->flags & IRQCHIP_EOI_THREADED) 288 chip->irq_eoi(&desc->irq_data); 289 290 if (chip->irq_unmask) { 291 chip->irq_unmask(&desc->irq_data); 292 irq_state_clr_masked(desc); 293 } 294 } 295 296 /* 297 * handle_nested_irq - Handle a nested irq from a irq thread 298 * @irq: the interrupt number 299 * 300 * Handle interrupts which are nested into a threaded interrupt 301 * handler. The handler function is called inside the calling 302 * threads context. 303 */ 304 void handle_nested_irq(unsigned int irq) 305 { 306 struct irq_desc *desc = irq_to_desc(irq); 307 struct irqaction *action; 308 irqreturn_t action_ret; 309 310 might_sleep(); 311 312 raw_spin_lock_irq(&desc->lock); 313 314 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 315 kstat_incr_irqs_this_cpu(irq, desc); 316 317 action = desc->action; 318 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 319 desc->istate |= IRQS_PENDING; 320 goto out_unlock; 321 } 322 323 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 324 raw_spin_unlock_irq(&desc->lock); 325 326 action_ret = action->thread_fn(action->irq, action->dev_id); 327 if (!noirqdebug) 328 note_interrupt(irq, desc, action_ret); 329 330 raw_spin_lock_irq(&desc->lock); 331 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 332 333 out_unlock: 334 raw_spin_unlock_irq(&desc->lock); 335 } 336 EXPORT_SYMBOL_GPL(handle_nested_irq); 337 338 static bool irq_check_poll(struct irq_desc *desc) 339 { 340 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 341 return false; 342 return irq_wait_for_poll(desc); 343 } 344 345 /** 346 * handle_simple_irq - Simple and software-decoded IRQs. 347 * @irq: the interrupt number 348 * @desc: the interrupt description structure for this irq 349 * 350 * Simple interrupts are either sent from a demultiplexing interrupt 351 * handler or come from hardware, where no interrupt hardware control 352 * is necessary. 353 * 354 * Note: The caller is expected to handle the ack, clear, mask and 355 * unmask issues if necessary. 356 */ 357 void 358 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 359 { 360 raw_spin_lock(&desc->lock); 361 362 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 363 if (!irq_check_poll(desc)) 364 goto out_unlock; 365 366 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 367 kstat_incr_irqs_this_cpu(irq, desc); 368 369 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 370 desc->istate |= IRQS_PENDING; 371 goto out_unlock; 372 } 373 374 handle_irq_event(desc); 375 376 out_unlock: 377 raw_spin_unlock(&desc->lock); 378 } 379 EXPORT_SYMBOL_GPL(handle_simple_irq); 380 381 /* 382 * Called unconditionally from handle_level_irq() and only for oneshot 383 * interrupts from handle_fasteoi_irq() 384 */ 385 static void cond_unmask_irq(struct irq_desc *desc) 386 { 387 /* 388 * We need to unmask in the following cases: 389 * - Standard level irq (IRQF_ONESHOT is not set) 390 * - Oneshot irq which did not wake the thread (caused by a 391 * spurious interrupt or a primary handler handling it 392 * completely). 393 */ 394 if (!irqd_irq_disabled(&desc->irq_data) && 395 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 396 unmask_irq(desc); 397 } 398 399 /** 400 * handle_level_irq - Level type irq handler 401 * @irq: the interrupt number 402 * @desc: the interrupt description structure for this irq 403 * 404 * Level type interrupts are active as long as the hardware line has 405 * the active level. This may require to mask the interrupt and unmask 406 * it after the associated handler has acknowledged the device, so the 407 * interrupt line is back to inactive. 408 */ 409 void 410 handle_level_irq(unsigned int irq, struct irq_desc *desc) 411 { 412 raw_spin_lock(&desc->lock); 413 mask_ack_irq(desc); 414 415 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 416 if (!irq_check_poll(desc)) 417 goto out_unlock; 418 419 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 420 kstat_incr_irqs_this_cpu(irq, desc); 421 422 /* 423 * If its disabled or no action available 424 * keep it masked and get out of here 425 */ 426 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 427 desc->istate |= IRQS_PENDING; 428 goto out_unlock; 429 } 430 431 handle_irq_event(desc); 432 433 cond_unmask_irq(desc); 434 435 out_unlock: 436 raw_spin_unlock(&desc->lock); 437 } 438 EXPORT_SYMBOL_GPL(handle_level_irq); 439 440 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 441 static inline void preflow_handler(struct irq_desc *desc) 442 { 443 if (desc->preflow_handler) 444 desc->preflow_handler(&desc->irq_data); 445 } 446 #else 447 static inline void preflow_handler(struct irq_desc *desc) { } 448 #endif 449 450 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 451 { 452 if (!(desc->istate & IRQS_ONESHOT)) { 453 chip->irq_eoi(&desc->irq_data); 454 return; 455 } 456 /* 457 * We need to unmask in the following cases: 458 * - Oneshot irq which did not wake the thread (caused by a 459 * spurious interrupt or a primary handler handling it 460 * completely). 461 */ 462 if (!irqd_irq_disabled(&desc->irq_data) && 463 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 464 chip->irq_eoi(&desc->irq_data); 465 unmask_irq(desc); 466 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 467 chip->irq_eoi(&desc->irq_data); 468 } 469 } 470 471 /** 472 * handle_fasteoi_irq - irq handler for transparent controllers 473 * @irq: the interrupt number 474 * @desc: the interrupt description structure for this irq 475 * 476 * Only a single callback will be issued to the chip: an ->eoi() 477 * call when the interrupt has been serviced. This enables support 478 * for modern forms of interrupt handlers, which handle the flow 479 * details in hardware, transparently. 480 */ 481 void 482 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 483 { 484 struct irq_chip *chip = desc->irq_data.chip; 485 486 raw_spin_lock(&desc->lock); 487 488 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 489 if (!irq_check_poll(desc)) 490 goto out; 491 492 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 493 kstat_incr_irqs_this_cpu(irq, desc); 494 495 /* 496 * If its disabled or no action available 497 * then mask it and get out of here: 498 */ 499 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 500 desc->istate |= IRQS_PENDING; 501 mask_irq(desc); 502 goto out; 503 } 504 505 if (desc->istate & IRQS_ONESHOT) 506 mask_irq(desc); 507 508 preflow_handler(desc); 509 handle_irq_event(desc); 510 511 cond_unmask_eoi_irq(desc, chip); 512 513 raw_spin_unlock(&desc->lock); 514 return; 515 out: 516 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 517 chip->irq_eoi(&desc->irq_data); 518 raw_spin_unlock(&desc->lock); 519 } 520 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 521 522 /** 523 * handle_edge_irq - edge type IRQ handler 524 * @irq: the interrupt number 525 * @desc: the interrupt description structure for this irq 526 * 527 * Interrupt occures on the falling and/or rising edge of a hardware 528 * signal. The occurrence is latched into the irq controller hardware 529 * and must be acked in order to be reenabled. After the ack another 530 * interrupt can happen on the same source even before the first one 531 * is handled by the associated event handler. If this happens it 532 * might be necessary to disable (mask) the interrupt depending on the 533 * controller hardware. This requires to reenable the interrupt inside 534 * of the loop which handles the interrupts which have arrived while 535 * the handler was running. If all pending interrupts are handled, the 536 * loop is left. 537 */ 538 void 539 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 540 { 541 raw_spin_lock(&desc->lock); 542 543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 544 /* 545 * If we're currently running this IRQ, or its disabled, 546 * we shouldn't process the IRQ. Mark it pending, handle 547 * the necessary masking and go out 548 */ 549 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 551 if (!irq_check_poll(desc)) { 552 desc->istate |= IRQS_PENDING; 553 mask_ack_irq(desc); 554 goto out_unlock; 555 } 556 } 557 kstat_incr_irqs_this_cpu(irq, desc); 558 559 /* Start handling the irq */ 560 desc->irq_data.chip->irq_ack(&desc->irq_data); 561 562 do { 563 if (unlikely(!desc->action)) { 564 mask_irq(desc); 565 goto out_unlock; 566 } 567 568 /* 569 * When another irq arrived while we were handling 570 * one, we could have masked the irq. 571 * Renable it, if it was not disabled in meantime. 572 */ 573 if (unlikely(desc->istate & IRQS_PENDING)) { 574 if (!irqd_irq_disabled(&desc->irq_data) && 575 irqd_irq_masked(&desc->irq_data)) 576 unmask_irq(desc); 577 } 578 579 handle_irq_event(desc); 580 581 } while ((desc->istate & IRQS_PENDING) && 582 !irqd_irq_disabled(&desc->irq_data)); 583 584 out_unlock: 585 raw_spin_unlock(&desc->lock); 586 } 587 EXPORT_SYMBOL(handle_edge_irq); 588 589 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 590 /** 591 * handle_edge_eoi_irq - edge eoi type IRQ handler 592 * @irq: the interrupt number 593 * @desc: the interrupt description structure for this irq 594 * 595 * Similar as the above handle_edge_irq, but using eoi and w/o the 596 * mask/unmask logic. 597 */ 598 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 599 { 600 struct irq_chip *chip = irq_desc_get_chip(desc); 601 602 raw_spin_lock(&desc->lock); 603 604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 605 /* 606 * If we're currently running this IRQ, or its disabled, 607 * we shouldn't process the IRQ. Mark it pending, handle 608 * the necessary masking and go out 609 */ 610 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 612 if (!irq_check_poll(desc)) { 613 desc->istate |= IRQS_PENDING; 614 goto out_eoi; 615 } 616 } 617 kstat_incr_irqs_this_cpu(irq, desc); 618 619 do { 620 if (unlikely(!desc->action)) 621 goto out_eoi; 622 623 handle_irq_event(desc); 624 625 } while ((desc->istate & IRQS_PENDING) && 626 !irqd_irq_disabled(&desc->irq_data)); 627 628 out_eoi: 629 chip->irq_eoi(&desc->irq_data); 630 raw_spin_unlock(&desc->lock); 631 } 632 #endif 633 634 /** 635 * handle_percpu_irq - Per CPU local irq handler 636 * @irq: the interrupt number 637 * @desc: the interrupt description structure for this irq 638 * 639 * Per CPU interrupts on SMP machines without locking requirements 640 */ 641 void 642 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 643 { 644 struct irq_chip *chip = irq_desc_get_chip(desc); 645 646 kstat_incr_irqs_this_cpu(irq, desc); 647 648 if (chip->irq_ack) 649 chip->irq_ack(&desc->irq_data); 650 651 handle_irq_event_percpu(desc, desc->action); 652 653 if (chip->irq_eoi) 654 chip->irq_eoi(&desc->irq_data); 655 } 656 657 /** 658 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 659 * @irq: the interrupt number 660 * @desc: the interrupt description structure for this irq 661 * 662 * Per CPU interrupts on SMP machines without locking requirements. Same as 663 * handle_percpu_irq() above but with the following extras: 664 * 665 * action->percpu_dev_id is a pointer to percpu variables which 666 * contain the real device id for the cpu on which this handler is 667 * called 668 */ 669 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 670 { 671 struct irq_chip *chip = irq_desc_get_chip(desc); 672 struct irqaction *action = desc->action; 673 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 674 irqreturn_t res; 675 676 kstat_incr_irqs_this_cpu(irq, desc); 677 678 if (chip->irq_ack) 679 chip->irq_ack(&desc->irq_data); 680 681 trace_irq_handler_entry(irq, action); 682 res = action->handler(irq, dev_id); 683 trace_irq_handler_exit(irq, action, res); 684 685 if (chip->irq_eoi) 686 chip->irq_eoi(&desc->irq_data); 687 } 688 689 void 690 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 691 const char *name) 692 { 693 unsigned long flags; 694 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 695 696 if (!desc) 697 return; 698 699 if (!handle) { 700 handle = handle_bad_irq; 701 } else { 702 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 703 goto out; 704 } 705 706 /* Uninstall? */ 707 if (handle == handle_bad_irq) { 708 if (desc->irq_data.chip != &no_irq_chip) 709 mask_ack_irq(desc); 710 irq_state_set_disabled(desc); 711 desc->depth = 1; 712 } 713 desc->handle_irq = handle; 714 desc->name = name; 715 716 if (handle != handle_bad_irq && is_chained) { 717 irq_settings_set_noprobe(desc); 718 irq_settings_set_norequest(desc); 719 irq_settings_set_nothread(desc); 720 irq_startup(desc, true); 721 } 722 out: 723 irq_put_desc_busunlock(desc, flags); 724 } 725 EXPORT_SYMBOL_GPL(__irq_set_handler); 726 727 void 728 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 729 irq_flow_handler_t handle, const char *name) 730 { 731 irq_set_chip(irq, chip); 732 __irq_set_handler(irq, handle, 0, name); 733 } 734 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 735 736 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 737 { 738 unsigned long flags; 739 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 740 741 if (!desc) 742 return; 743 irq_settings_clr_and_set(desc, clr, set); 744 745 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 746 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 747 if (irq_settings_has_no_balance_set(desc)) 748 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 749 if (irq_settings_is_per_cpu(desc)) 750 irqd_set(&desc->irq_data, IRQD_PER_CPU); 751 if (irq_settings_can_move_pcntxt(desc)) 752 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 753 if (irq_settings_is_level(desc)) 754 irqd_set(&desc->irq_data, IRQD_LEVEL); 755 756 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 757 758 irq_put_desc_unlock(desc, flags); 759 } 760 EXPORT_SYMBOL_GPL(irq_modify_status); 761 762 /** 763 * irq_cpu_online - Invoke all irq_cpu_online functions. 764 * 765 * Iterate through all irqs and invoke the chip.irq_cpu_online() 766 * for each. 767 */ 768 void irq_cpu_online(void) 769 { 770 struct irq_desc *desc; 771 struct irq_chip *chip; 772 unsigned long flags; 773 unsigned int irq; 774 775 for_each_active_irq(irq) { 776 desc = irq_to_desc(irq); 777 if (!desc) 778 continue; 779 780 raw_spin_lock_irqsave(&desc->lock, flags); 781 782 chip = irq_data_get_irq_chip(&desc->irq_data); 783 if (chip && chip->irq_cpu_online && 784 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 785 !irqd_irq_disabled(&desc->irq_data))) 786 chip->irq_cpu_online(&desc->irq_data); 787 788 raw_spin_unlock_irqrestore(&desc->lock, flags); 789 } 790 } 791 792 /** 793 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 794 * 795 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 796 * for each. 797 */ 798 void irq_cpu_offline(void) 799 { 800 struct irq_desc *desc; 801 struct irq_chip *chip; 802 unsigned long flags; 803 unsigned int irq; 804 805 for_each_active_irq(irq) { 806 desc = irq_to_desc(irq); 807 if (!desc) 808 continue; 809 810 raw_spin_lock_irqsave(&desc->lock, flags); 811 812 chip = irq_data_get_irq_chip(&desc->irq_data); 813 if (chip && chip->irq_cpu_offline && 814 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 815 !irqd_irq_disabled(&desc->irq_data))) 816 chip->irq_cpu_offline(&desc->irq_data); 817 818 raw_spin_unlock_irqrestore(&desc->lock, flags); 819 } 820 } 821