Lines Matching +full:percpu +full:- +full:devid
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
42 irq_work_sync(&desc->redirect.work); in synchronize_irqwork()
60 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
63 /* Ok, that indicated we're done: double-check carefully. */ in __synchronize_hardirq()
64 guard(raw_spinlock_irqsave)(&desc->lock); in __synchronize_hardirq()
65 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
85 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
98 * This function may be called - with care - from IRQ context.
111 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
127 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); in __synchronize_irq()
131 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
159 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
160 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
166 * irq_can_set_affinity - Check if the affinity of a given irq can be set
176 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
187 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
191 * irq_set_thread_affinity - Notify irq threads to adjust affinity
196 * we hold desc->lock and this code can be called from hard interrupt
204 if (action->thread) { in irq_set_thread_affinity()
205 set_bit(IRQTF_AFFINITY, &action->thread_flags); in irq_set_thread_affinity()
206 wake_up_process(action->thread); in irq_set_thread_affinity()
208 if (action->secondary && action->secondary->thread) { in irq_set_thread_affinity()
209 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); in irq_set_thread_affinity()
210 wake_up_process(action->secondary->thread); in irq_set_thread_affinity()
224 chip->name, data->irq); in irq_validate_effective_affinity()
240 if (!chip || !chip->irq_set_affinity) in irq_do_set_affinity()
241 return -EINVAL; in irq_do_set_affinity()
284 ret = chip->irq_set_affinity(data, tmp_mask, force); in irq_do_set_affinity()
286 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity()
288 ret = -EINVAL; in irq_do_set_affinity()
293 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
318 return -EBUSY; in irq_set_affinity_pending()
332 if (ret == -EBUSY && !force) in irq_try_set_affinity()
355 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
362 * irq_affinity_schedule_notify_work - Schedule work to notify about affinity change
367 lockdep_assert_held(&desc->lock); in irq_affinity_schedule_notify_work()
369 kref_get(&desc->affinity_notify->kref); in irq_affinity_schedule_notify_work()
370 if (!schedule_work(&desc->affinity_notify->work)) { in irq_affinity_schedule_notify_work()
372 kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release); in irq_affinity_schedule_notify_work()
383 if (!chip || !chip->irq_set_affinity) in irq_set_affinity_locked()
384 return -EINVAL; in irq_set_affinity_locked()
396 if (desc->affinity_notify) in irq_set_affinity_locked()
405 * irq_update_affinity_desc - Update affinity management for an interrupt
412 * There are certain limitations on when it may be used - attempts to use it
415 * managed/non-managed interrupt accounting. In addition, attempts to use it on
426 return -EOPNOTSUPP; in irq_update_affinity_desc()
433 if (irqd_is_started(&desc->irq_data)) in irq_update_affinity_desc()
434 return -EBUSY; in irq_update_affinity_desc()
437 if (irqd_affinity_is_managed(&desc->irq_data)) in irq_update_affinity_desc()
438 return -EBUSY; in irq_update_affinity_desc()
443 activated = irqd_is_activated(&desc->irq_data); in irq_update_affinity_desc()
445 irq_domain_deactivate_irq(&desc->irq_data); in irq_update_affinity_desc()
447 if (affinity->is_managed) { in irq_update_affinity_desc()
448 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); in irq_update_affinity_desc()
449 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); in irq_update_affinity_desc()
452 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); in irq_update_affinity_desc()
456 irq_domain_activate_irq(&desc->irq_data, false); in irq_update_affinity_desc()
459 return -EINVAL; in irq_update_affinity_desc()
468 return -EINVAL; in __irq_set_affinity()
470 guard(raw_spinlock_irqsave)(&desc->lock); in __irq_set_affinity()
475 * irq_set_affinity - Set the irq affinity of a given irq
488 * irq_force_affinity - Force the irq affinity of a given irq
506 int ret = -EINVAL; in __irq_apply_affinity_hint()
509 scoped_irqdesc->affinity_hint = m; in __irq_apply_affinity_hint()
522 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify()
528 scoped_guard(raw_spinlock_irqsave, &desc->lock) { in irq_affinity_notify()
529 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
532 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
535 notify->notify(notify, cpumask); in irq_affinity_notify()
539 kref_put(¬ify->kref, notify->release); in irq_affinity_notify()
543 * irq_set_affinity_notifier - control notification of IRQ affinity changes
562 return -EINVAL; in irq_set_affinity_notifier()
566 notify->irq = irq; in irq_set_affinity_notifier()
567 kref_init(¬ify->kref); in irq_set_affinity_notifier()
568 INIT_WORK(¬ify->work, irq_affinity_notify); in irq_set_affinity_notifier()
571 scoped_guard(raw_spinlock_irq, &desc->lock) { in irq_set_affinity_notifier()
572 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
573 desc->affinity_notify = notify; in irq_set_affinity_notifier()
577 if (cancel_work_sync(&old_notify->work)) { in irq_set_affinity_notifier()
579 kref_put(&old_notify->kref, old_notify->release); in irq_set_affinity_notifier()
581 kref_put(&old_notify->kref, old_notify->release); in irq_set_affinity_notifier()
609 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
610 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
611 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
613 set = desc->irq_common_data.affinity; in irq_setup_affinity()
615 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
629 return irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
642 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
644 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
649 * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity().
661 if (chip && chip->irq_set_vcpu_affinity) in irq_set_vcpu_affinity()
668 return -ENOSYS; in irq_set_vcpu_affinity()
669 return chip->irq_set_vcpu_affinity(data, vcpu_info); in irq_set_vcpu_affinity()
671 return -EINVAL; in irq_set_vcpu_affinity()
677 if (!desc->depth++) in __disable_irq()
687 return -EINVAL; in __disable_irq_nosync()
691 * disable_irq_nosync - disable an irq without waiting
708 * disable_irq - disable an irq and wait for completion
730 * disable_hardirq - disables an irq and waits for hardirq completion
744 * This function may be called - with care - from IRQ context.
755 * disable_nmi_nosync - disable an nmi without waiting
771 switch (desc->depth) { in __enable_irq()
778 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
797 desc->depth--; in __enable_irq()
802 * enable_irq - enable handling of an irq
806 * last disable, processing of interrupts on this IRQ line is re-enabled.
809 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
816 if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) in enable_irq()
824 * enable_nmi - enable handling of an nmi
829 * disable, processing of interrupts on this IRQ line is re-enabled.
839 int ret = -ENXIO; in set_irq_wake_real()
841 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
844 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
845 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
851 * irq_set_irq_wake - control irq power management wakeup
857 * non-wakeup mode support.
876 return -EINVAL; in irq_set_irq_wake()
879 * wakeup-capable irqs can be shared between drivers that in irq_set_irq_wake()
883 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
886 desc->wake_depth = 0; in irq_set_irq_wake()
888 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
891 if (desc->wake_depth == 0) { in irq_set_irq_wake()
893 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
896 desc->wake_depth = 1; in irq_set_irq_wake()
898 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
903 return -EINVAL; in irq_set_irq_wake()
918 if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
927 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
930 if (!chip || !chip->irq_set_type) { in __irq_set_trigger()
933 * flow-types? in __irq_set_trigger()
937 chip ? (chip->name ? : "unknown") : "unknown"); in __irq_set_trigger()
941 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { in __irq_set_trigger()
942 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
944 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
950 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
955 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
956 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
960 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
962 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
966 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
973 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
984 scoped_irqdesc->parent_irq = parent_irq; in irq_set_parent()
987 return -EINVAL; in irq_set_parent()
1026 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) in irq_thread_check_affinity()
1036 set_bit(IRQTF_AFFINITY, &action->thread_flags); in irq_thread_check_affinity()
1040 scoped_guard(raw_spinlock_irq, &desc->lock) { in irq_thread_check_affinity()
1043 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1064 &action->thread_flags)) { in irq_wait_for_interrupt()
1069 return -1; in irq_wait_for_interrupt()
1073 &action->thread_flags)) { in irq_wait_for_interrupt()
1089 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
1090 action->handler == irq_forced_secondary_handler) in irq_finalize_oneshot()
1094 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
1106 * versus "desc->threads_oneshot |= action->thread_mask;" in in irq_finalize_oneshot()
1110 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1111 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1122 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) in irq_finalize_oneshot()
1125 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
1127 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1128 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1132 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1138 * preemptible - many of them need to sleep and wait for slow busses to
1143 irqreturn_t ret = action->thread_fn(action->irq, action->dev_id); in irq_thread_fn()
1146 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1174 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1175 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1184 if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) in irq_thread_dtor()
1190 tsk->comm, tsk->pid, action->irq); in irq_thread_dtor()
1193 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1196 * desc->threads_active and wake possible waiters. in irq_thread_dtor()
1198 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) in irq_thread_dtor()
1201 /* Prevent a stale desc->threads_oneshot */ in irq_thread_dtor()
1207 struct irqaction *secondary = action->secondary; in irq_wake_secondary()
1212 guard(raw_spinlock_irq)(&desc->lock); in irq_wake_secondary()
1222 set_bit(IRQTF_READY, &action->thread_flags); in irq_thread_set_ready()
1223 wake_up(&desc->wait_for_threads); in irq_thread_set_ready()
1233 if (!action || !action->thread) in wake_up_and_wait_for_irq_thread_ready()
1236 wake_up_process(action->thread); in wake_up_and_wait_for_irq_thread_ready()
1237 wait_event(desc->wait_for_threads, in wake_up_and_wait_for_irq_thread_ready()
1238 test_bit(IRQTF_READY, &action->thread_flags)); in wake_up_and_wait_for_irq_thread_ready()
1248 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread()
1254 if (action->handler == irq_forced_secondary_handler) in irq_thread()
1260 &action->thread_flags)) in irq_thread()
1289 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1301 guard(raw_spinlock_irqsave)(&desc->lock); in irq_wake_thread()
1303 if (action->dev_id == dev_id) { in irq_wake_thread()
1304 if (action->thread) in irq_wake_thread()
1316 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) in irq_setup_forced_threading()
1323 if (new->handler == irq_default_primary_handler) in irq_setup_forced_threading()
1326 new->flags |= IRQF_ONESHOT; in irq_setup_forced_threading()
1333 if (new->handler && new->thread_fn) { in irq_setup_forced_threading()
1335 new->secondary = kzalloc_obj(struct irqaction); in irq_setup_forced_threading()
1336 if (!new->secondary) in irq_setup_forced_threading()
1337 return -ENOMEM; in irq_setup_forced_threading()
1338 new->secondary->handler = irq_forced_secondary_handler; in irq_setup_forced_threading()
1339 new->secondary->thread_fn = new->thread_fn; in irq_setup_forced_threading()
1340 new->secondary->dev_id = new->dev_id; in irq_setup_forced_threading()
1341 new->secondary->irq = new->irq; in irq_setup_forced_threading()
1342 new->secondary->name = new->name; in irq_setup_forced_threading()
1345 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); in irq_setup_forced_threading()
1346 new->thread_fn = new->handler; in irq_setup_forced_threading()
1347 new->handler = irq_default_primary_handler; in irq_setup_forced_threading()
1353 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1354 struct irq_chip *c = d->chip; in irq_request_resources()
1356 return c->irq_request_resources ? c->irq_request_resources(d) : 0; in irq_request_resources()
1361 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1362 struct irq_chip *c = d->chip; in irq_release_resources()
1364 if (c->irq_release_resources) in irq_release_resources()
1365 c->irq_release_resources(d); in irq_release_resources()
1374 if (d->parent_data) in irq_supports_nmi()
1378 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) in irq_supports_nmi()
1381 return d->chip->flags & IRQCHIP_SUPPORTS_NMI; in irq_supports_nmi()
1387 struct irq_chip *c = d->chip; in irq_nmi_setup()
1389 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; in irq_nmi_setup()
1395 struct irq_chip *c = d->chip; in irq_nmi_teardown()
1397 if (c->irq_nmi_teardown) in irq_nmi_teardown()
1398 c->irq_nmi_teardown(d); in irq_nmi_teardown()
1407 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, in setup_irq_thread()
1408 new->name); in setup_irq_thread()
1410 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, in setup_irq_thread()
1411 new->name); in setup_irq_thread()
1422 new->thread = get_task_struct(t); in setup_irq_thread()
1428 * it from ever being re-affined directly by cpuset or in setup_irq_thread()
1429 * housekeeping. The proper way to do it is to re-affine the whole in setup_irq_thread()
1438 set_bit(IRQTF_AFFINITY, &new->thread_flags); in setup_irq_thread()
1446 if (cpumask_intersects(old->affinity, new->affinity) || in valid_percpu_irqaction()
1447 old->percpu_dev_id == new->percpu_dev_id) in valid_percpu_irqaction()
1450 old = old->next; in valid_percpu_irqaction()
1457 * Internal function to register an irqaction - typically used to
1462 * desc->request_mutex Provides serialization against a concurrent free_irq()
1464 * desc->lock Provides serialization against hard interrupts
1466 * chip_bus_lock and desc->lock are sufficient for all other management and
1467 * interrupt related functions. desc->request_mutex solely serializes
1479 return -EINVAL; in __setup_irq()
1481 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1482 return -ENOSYS; in __setup_irq()
1483 if (!try_module_get(desc->owner)) in __setup_irq()
1484 return -ENODEV; in __setup_irq()
1488 new->irq = irq; in __setup_irq()
1494 if (!(new->flags & IRQF_TRIGGER_MASK)) in __setup_irq()
1495 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1502 WARN_ON_ONCE(new->flags & IRQF_ONESHOT && !new->thread_fn); in __setup_irq()
1510 if (!new->thread_fn) { in __setup_irq()
1511 ret = -EINVAL; in __setup_irq()
1519 new->handler = irq_nested_primary_handler; in __setup_irq()
1533 if (new->thread_fn && !nested) { in __setup_irq()
1537 if (new->secondary) { in __setup_irq()
1538 ret = setup_irq_thread(new->secondary, irq, true); in __setup_irq()
1553 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1554 new->flags &= ~IRQF_ONESHOT; in __setup_irq()
1559 * chip bus lock and desc->lock. Also protects against handing out in __setup_irq()
1563 mutex_lock(&desc->request_mutex); in __setup_irq()
1573 if (!desc->action) { in __setup_irq()
1577 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1586 * desc->request_mutex or the optional bus lock. in __setup_irq()
1588 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1589 old_ptr = &desc->action; in __setup_irq()
1604 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1605 ret = -EINVAL; in __setup_irq()
1611 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1612 ret = -EINVAL; in __setup_irq()
1620 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1621 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1623 oldtype = new->flags & IRQF_TRIGGER_MASK; in __setup_irq()
1624 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1627 if (!((old->flags & new->flags) & IRQF_SHARED) || in __setup_irq()
1628 (oldtype != (new->flags & IRQF_TRIGGER_MASK))) in __setup_irq()
1631 if ((old->flags & IRQF_ONESHOT) && in __setup_irq()
1632 (new->flags & IRQF_COND_ONESHOT)) in __setup_irq()
1633 new->flags |= IRQF_ONESHOT; in __setup_irq()
1634 else if ((old->flags ^ new->flags) & IRQF_ONESHOT) in __setup_irq()
1637 /* All handlers must agree on per-cpuness */ in __setup_irq()
1638 if ((old->flags & IRQF_PERCPU) != in __setup_irq()
1639 (new->flags & IRQF_PERCPU)) in __setup_irq()
1645 * Or all existing action->thread_mask bits, in __setup_irq()
1649 thread_mask |= old->thread_mask; in __setup_irq()
1650 old_ptr = &old->next; in __setup_irq()
1661 if (new->flags & IRQF_ONESHOT) { in __setup_irq()
1667 ret = -EBUSY; in __setup_irq()
1672 * desc->thread_active to indicate that the in __setup_irq()
1676 * line have completed desc->threads_active becomes in __setup_irq()
1681 * interrupt handlers, then desc->threads_active is in __setup_irq()
1688 * all existing action->thread_mask bits. in __setup_irq()
1690 new->thread_mask = 1UL << ffz(thread_mask); in __setup_irq()
1692 } else if (new->handler == irq_default_primary_handler && in __setup_irq()
1693 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1710 new->name, irq); in __setup_irq()
1711 ret = -EINVAL; in __setup_irq()
1717 if (new->flags & IRQF_TRIGGER_MASK) { in __setup_irq()
1719 new->flags & IRQF_TRIGGER_MASK); in __setup_irq()
1740 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1742 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1744 if (new->flags & IRQF_PERCPU) { in __setup_irq()
1745 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1747 if (new->flags & IRQF_NO_DEBUG) in __setup_irq()
1754 if (new->flags & IRQF_ONESHOT) in __setup_irq()
1755 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1758 if (new->flags & IRQF_NOBALANCING) { in __setup_irq()
1760 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1763 if (!(new->flags & IRQF_NO_AUTOEN) && in __setup_irq()
1773 WARN_ON_ONCE(new->flags & IRQF_SHARED); in __setup_irq()
1775 desc->depth = 1; in __setup_irq()
1778 } else if (new->flags & IRQF_TRIGGER_MASK) { in __setup_irq()
1779 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; in __setup_irq()
1780 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1793 desc->irq_count = 0; in __setup_irq()
1794 desc->irqs_unhandled = 0; in __setup_irq()
1800 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1801 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1805 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1807 mutex_unlock(&desc->request_mutex); in __setup_irq()
1810 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); in __setup_irq()
1813 new->dir = NULL; in __setup_irq()
1818 if (!(new->flags & IRQF_PROBE_SHARED)) { in __setup_irq()
1820 irq, new->flags, new->name, old->flags, old->name); in __setup_irq()
1825 ret = -EBUSY; in __setup_irq()
1828 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1830 if (!desc->action) in __setup_irq()
1834 mutex_unlock(&desc->request_mutex); in __setup_irq()
1837 if (new->thread) { in __setup_irq()
1838 struct task_struct *t = new->thread; in __setup_irq()
1840 new->thread = NULL; in __setup_irq()
1843 if (new->secondary && new->secondary->thread) { in __setup_irq()
1844 struct task_struct *t = new->secondary->thread; in __setup_irq()
1846 new->secondary->thread = NULL; in __setup_irq()
1850 module_put(desc->owner); in __setup_irq()
1855 * Internal function to unregister an irqaction - used to free
1860 unsigned irq = desc->irq_data.irq; in __free_irq()
1866 mutex_lock(&desc->request_mutex); in __free_irq()
1868 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1874 action_ptr = &desc->action; in __free_irq()
1879 WARN(1, "Trying to free already-free IRQ %d\n", irq); in __free_irq()
1880 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1882 mutex_unlock(&desc->request_mutex); in __free_irq()
1886 if (action->dev_id == dev_id) in __free_irq()
1888 action_ptr = &action->next; in __free_irq()
1891 /* Found it - now remove it from the list of entries: */ in __free_irq()
1892 *action_ptr = action->next; in __free_irq()
1897 if (!desc->action) { in __free_irq()
1905 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1906 desc->affinity_hint = NULL; in __free_irq()
1909 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1920 * The still held desc->request_mutex() protects against a in __free_irq()
1937 * It's a shared IRQ -- the driver ought to be prepared for an IRQ in __free_irq()
1944 if (action->flags & IRQF_SHARED) { in __free_irq()
1946 action->handler(irq, dev_id); in __free_irq()
1957 if (action->thread) { in __free_irq()
1958 kthread_stop_put(action->thread); in __free_irq()
1959 if (action->secondary && action->secondary->thread) in __free_irq()
1960 kthread_stop_put(action->secondary->thread); in __free_irq()
1964 if (!desc->action) { in __free_irq()
1974 scoped_guard(raw_spinlock_irqsave, &desc->lock) in __free_irq()
1975 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1981 mutex_unlock(&desc->request_mutex); in __free_irq()
1983 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1984 module_put(desc->owner); in __free_irq()
1985 kfree(action->secondary); in __free_irq()
1990 * free_irq - free an interrupt allocated with request_irq
2014 if (WARN_ON(desc->affinity_notify)) in free_irq()
2015 desc->affinity_notify = NULL; in free_irq()
2023 devname = action->name; in free_irq()
2029 /* This function must be called with desc->lock held */
2034 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
2036 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
2037 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
2038 devname = desc->action->name; in __cleanup_nmi()
2039 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
2041 kfree(desc->action); in __cleanup_nmi()
2042 desc->action = NULL; in __cleanup_nmi()
2050 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2051 module_put(desc->owner); in __cleanup_nmi()
2067 if (WARN_ON(desc->depth == 0)) in free_nmi()
2070 guard(raw_spinlock_irqsave)(&desc->lock); in free_nmi()
2076 * request_threaded_irq - allocate an interrupt line
2124 return -ENOTCONN; in request_threaded_irq()
2127 * Sanity-check: shared interrupts must pass in a real dev-ID, in request_threaded_irq()
2143 return -EINVAL; in request_threaded_irq()
2147 return -EINVAL; in request_threaded_irq()
2151 return -EINVAL; in request_threaded_irq()
2155 return -EINVAL; in request_threaded_irq()
2161 return -ENOMEM; in request_threaded_irq()
2163 action->handler = handler; in request_threaded_irq()
2164 action->thread_fn = thread_fn; in request_threaded_irq()
2165 action->flags = irqflags; in request_threaded_irq()
2166 action->name = devname; in request_threaded_irq()
2167 action->dev_id = dev_id; in request_threaded_irq()
2169 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2178 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2179 kfree(action->secondary); in request_threaded_irq()
2186 * It's a shared IRQ -- the driver ought to be prepared for it in request_threaded_irq()
2207 * request_any_context_irq - allocate an interrupt line
2229 return -ENOTCONN; in request_any_context_irq()
2233 return -EINVAL; in request_any_context_irq()
2247 * request_nmi - allocate an interrupt line for NMI delivery
2279 return -ENOTCONN; in request_nmi()
2283 return -EINVAL; in request_nmi()
2286 return -EINVAL; in request_nmi()
2289 return -EINVAL; in request_nmi()
2298 return -EINVAL; in request_nmi()
2302 return -ENOMEM; in request_nmi()
2304 action->handler = handler; in request_nmi()
2305 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; in request_nmi()
2306 action->name = name; in request_nmi()
2307 action->dev_id = dev_id; in request_nmi()
2309 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2317 scoped_guard(raw_spinlock_irqsave, &desc->lock) { in request_nmi()
2319 desc->istate |= IRQS_NMI; in request_nmi()
2323 return -EINVAL; in request_nmi()
2329 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2347 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2366 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2375 return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); in irq_percpu_is_enabled()
2393 * Internal function to unregister a percpu irqaction.
2405 scoped_guard(raw_spinlock_irqsave, &desc->lock) { in __free_percpu_irq()
2406 action_ptr = &desc->action; in __free_percpu_irq()
2411 WARN(1, "Trying to free already-free IRQ %d\n", irq); in __free_percpu_irq()
2415 if (action->percpu_dev_id == dev_id) in __free_percpu_irq()
2418 action_ptr = &action->next; in __free_percpu_irq()
2421 if (cpumask_intersects(desc->percpu_enabled, action->affinity)) { in __free_percpu_irq()
2422 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq, in __free_percpu_irq()
2423 cpumask_first_and(desc->percpu_enabled, action->affinity)); in __free_percpu_irq()
2427 /* Found it - now remove it from the list of entries: */ in __free_percpu_irq()
2428 *action_ptr = action->next; in __free_percpu_irq()
2431 if (!desc->action) in __free_percpu_irq()
2432 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2436 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2437 module_put(desc->owner); in __free_percpu_irq()
2442 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2446 * Remove a percpu interrupt handler. The handler is removed, but the
2493 action->handler = handler; in create_percpu_irqaction()
2494 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; in create_percpu_irqaction()
2495 action->name = devname; in create_percpu_irqaction()
2496 action->percpu_dev_id = dev_id; in create_percpu_irqaction()
2497 action->affinity = affinity; in create_percpu_irqaction()
2500 * We allow some form of sharing for non-overlapping affinity in create_percpu_irqaction()
2505 action->flags |= IRQF_SHARED; in create_percpu_irqaction()
2511 * request_percpu_irq_affinity - allocate a percpu interrupt line
2516 * @dev_id: A percpu cookie passed back to the handler function
2519 * on any CPU, as all percpu-devid interrupts are flagged with IRQ_NOAUTOEN.
2522 * @dev_id must be globally unique. It is a per-cpu variable, and
2534 return -EINVAL; in request_percpu_irq_affinity()
2539 return -EINVAL; in request_percpu_irq_affinity()
2543 return -ENOMEM; in request_percpu_irq_affinity()
2545 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_irq_affinity()
2554 irq_chip_pm_put(&desc->irq_data); in request_percpu_irq_affinity()
2563 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2568 * @dev_id: A percpu cookie passed back to the handler function
2574 * @dev_id must be globally unique. It is a per-cpu variable, and the
2592 return -EINVAL; in request_percpu_nmi()
2600 return -EINVAL; in request_percpu_nmi()
2605 return -EINVAL; in request_percpu_nmi()
2610 return -ENOMEM; in request_percpu_nmi()
2612 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2620 scoped_guard(raw_spinlock_irqsave, &desc->lock) in request_percpu_nmi()
2621 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2625 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2633 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2639 * As a CPU local operation, this should be called from non-preemptible
2647 int ret = -EINVAL; in prepare_percpu_nmi()
2653 "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) in prepare_percpu_nmi()
2654 return -EINVAL; in prepare_percpu_nmi()
2664 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2670 * As a CPU local operation, this should be called from non-preemptible
2687 int err = -EINVAL; in __irq_get_irqchip_state()
2692 return -ENODEV; in __irq_get_irqchip_state()
2693 if (chip->irq_get_irqchip_state) in __irq_get_irqchip_state()
2696 data = data->parent_data; in __irq_get_irqchip_state()
2703 err = chip->irq_get_irqchip_state(data, which, state); in __irq_get_irqchip_state()
2708 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2717 * controller has per-cpu registers.
2726 return -EINVAL; in irq_get_irqchip_state()
2731 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2740 * controller has per-cpu registers.
2752 return -ENODEV; in irq_set_irqchip_state()
2754 if (chip->irq_set_irqchip_state) in irq_set_irqchip_state()
2761 return chip->irq_set_irqchip_state(data, which, val); in irq_set_irqchip_state()
2763 return -EINVAL; in irq_set_irqchip_state()
2768 * irq_has_action - Check whether an interrupt is requested
2785 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2799 res = !!(desc->status_use_accessors & bitmask); in irq_check_status_bit()