1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_IRQDESC_H 3 #define _LINUX_IRQDESC_H 4 5 #include <linux/irq_work.h> 6 #include <linux/kobject.h> 7 #include <linux/mutex.h> 8 #include <linux/rcupdate.h> 9 10 /* 11 * Core internal functions to deal with irq descriptors 12 */ 13 14 struct irq_affinity_notify; 15 struct proc_dir_entry; 16 struct module; 17 struct irq_desc; 18 struct irq_domain; 19 struct pt_regs; 20 21 /** 22 * struct irqstat - interrupt statistics 23 * @cnt: real-time interrupt count 24 * @ref: snapshot of interrupt count 25 */ 26 struct irqstat { 27 unsigned int cnt; 28 #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT 29 unsigned int ref; 30 #endif 31 }; 32 33 /** 34 * struct irq_redirect - interrupt redirection metadata 35 * @work: Harg irq_work item for handler execution on a different CPU 36 * @target_cpu: CPU to run irq handler on in case the current CPU is not part 37 * of the irq affinity mask 38 */ 39 struct irq_redirect { 40 struct irq_work work; 41 unsigned int target_cpu; 42 }; 43 44 /** 45 * struct irq_desc - interrupt descriptor 46 * @irq_common_data: per irq and chip data passed down to chip functions 47 * @kstat_irqs: irq stats per cpu 48 * @handle_irq: highlevel irq-events handler 49 * @action: the irq action chain 50 * @status_use_accessors: status information 51 * @core_internal_state__do_not_mess_with_it: core internal status information 52 * @depth: disable-depth, for nested irq_disable() calls 53 * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers 54 * @tot_count: stats field for non-percpu irqs 55 * @irq_count: stats field to detect stalled irqs 56 * @last_unhandled: aging timer for unhandled count 57 * @irqs_unhandled: stats field for spurious unhandled interrupts 58 * @threads_handled: stats field for deferred spurious detection of threaded handlers 59 * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers 60 * @lock: locking for SMP 61 * @redirect: Facility for redirecting interrupts via irq_work 62 * @affinity_hint: hint to user space for preferred irq affinity 63 * @affinity_notify: context for notification of affinity changes 64 * @pending_mask: pending rebalanced interrupts 65 * @threads_oneshot: bitfield to handle shared oneshot threads 66 * @threads_active: number of irqaction threads currently running 67 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 68 * @nr_actions: number of installed actions on this descriptor 69 * @no_suspend_depth: number of irqactions on a irq descriptor with 70 * IRQF_NO_SUSPEND set 71 * @force_resume_depth: number of irqactions on a irq descriptor with 72 * IRQF_FORCE_RESUME set 73 * @rcu: rcu head for delayed free 74 * @kobj: kobject used to represent this struct in sysfs 75 * @request_mutex: mutex to protect request/free before locking desc->lock 76 * @dir: /proc/irq/ procfs entry 77 * @debugfs_file: dentry for the debugfs file 78 * @name: flow handler name for /proc/interrupts output 79 */ 80 struct irq_desc { 81 struct irq_common_data irq_common_data; 82 struct irq_data irq_data; 83 struct irqstat __percpu *kstat_irqs; 84 irq_flow_handler_t handle_irq; 85 struct irqaction *action; /* IRQ action list */ 86 unsigned int status_use_accessors; 87 unsigned int core_internal_state__do_not_mess_with_it; 88 unsigned int depth; /* nested irq disables */ 89 unsigned int wake_depth; /* nested wake enables */ 90 unsigned int tot_count; 91 unsigned int irq_count; /* For detecting broken IRQs */ 92 unsigned long last_unhandled; /* Aging timer for unhandled count */ 93 unsigned int irqs_unhandled; 94 atomic_t threads_handled; 95 int threads_handled_last; 96 raw_spinlock_t lock; 97 struct cpumask *percpu_enabled; 98 #ifdef CONFIG_SMP 99 struct irq_redirect redirect; 100 const struct cpumask *affinity_hint; 101 struct irq_affinity_notify *affinity_notify; 102 #ifdef CONFIG_GENERIC_PENDING_IRQ 103 cpumask_var_t pending_mask; 104 #endif 105 #endif 106 unsigned long threads_oneshot; 107 atomic_t threads_active; 108 wait_queue_head_t wait_for_threads; 109 #ifdef CONFIG_PM_SLEEP 110 unsigned int nr_actions; 111 unsigned int no_suspend_depth; 112 unsigned int cond_suspend_depth; 113 unsigned int force_resume_depth; 114 #endif 115 #ifdef CONFIG_PROC_FS 116 struct proc_dir_entry *dir; 117 #endif 118 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 119 struct dentry *debugfs_file; 120 const char *dev_name; 121 #endif 122 #ifdef CONFIG_SPARSE_IRQ 123 struct rcu_head rcu; 124 struct kobject kobj; 125 #endif 126 struct mutex request_mutex; 127 int parent_irq; 128 struct module *owner; 129 const char *name; 130 #ifdef CONFIG_HARDIRQS_SW_RESEND 131 struct hlist_node resend_node; 132 #endif 133 } ____cacheline_internodealigned_in_smp; 134 135 #ifdef CONFIG_SPARSE_IRQ 136 extern void irq_lock_sparse(void); 137 extern void irq_unlock_sparse(void); 138 #else 139 static inline void irq_lock_sparse(void) { } 140 static inline void irq_unlock_sparse(void) { } 141 extern struct irq_desc irq_desc[NR_IRQS]; 142 #endif 143 144 static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, 145 unsigned int cpu) 146 { 147 return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; 148 } 149 150 static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) 151 { 152 return container_of(data->common, struct irq_desc, irq_common_data); 153 } 154 155 static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) 156 { 157 return desc->irq_data.irq; 158 } 159 160 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) 161 { 162 return &desc->irq_data; 163 } 164 165 static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) 166 { 167 return desc->irq_data.chip; 168 } 169 170 static inline void *irq_desc_get_chip_data(struct irq_desc *desc) 171 { 172 return desc->irq_data.chip_data; 173 } 174 175 static inline void *irq_desc_get_handler_data(struct irq_desc *desc) 176 { 177 return desc->irq_common_data.handler_data; 178 } 179 180 /* 181 * Architectures call this to let the generic IRQ layer 182 * handle an interrupt. 183 */ 184 static inline void generic_handle_irq_desc(struct irq_desc *desc) 185 { 186 desc->handle_irq(desc); 187 } 188 189 int handle_irq_desc(struct irq_desc *desc); 190 int generic_handle_irq(unsigned int irq); 191 int generic_handle_irq_safe(unsigned int irq); 192 193 #ifdef CONFIG_IRQ_DOMAIN 194 /* 195 * Convert a HW interrupt number to a logical one using a IRQ domain, 196 * and handle the result interrupt number. Return -EINVAL if 197 * conversion failed. 198 */ 199 int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq); 200 int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq); 201 int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq); 202 bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq); 203 #endif 204 205 /* Test to see if a driver has successfully requested an irq */ 206 static inline int irq_desc_has_action(struct irq_desc *desc) 207 { 208 return desc && desc->action != NULL; 209 } 210 211 /** 212 * irq_set_handler_locked - Set irq handler from a locked region 213 * @data: Pointer to the irq_data structure which identifies the irq 214 * @handler: Flow control handler function for this interrupt 215 * 216 * Sets the handler in the irq descriptor associated to @data. 217 * 218 * Must be called with irq_desc locked and valid parameters. Typical 219 * call site is the irq_set_type() callback. 220 */ 221 static inline void irq_set_handler_locked(struct irq_data *data, 222 irq_flow_handler_t handler) 223 { 224 struct irq_desc *desc = irq_data_to_desc(data); 225 226 desc->handle_irq = handler; 227 } 228 229 /** 230 * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region 231 * @data: Pointer to the irq_data structure for which the chip is set 232 * @chip: Pointer to the new irq chip 233 * @handler: Flow control handler function for this interrupt 234 * @name: Name of the interrupt 235 * 236 * Replace the irq chip at the proper hierarchy level in @data and 237 * sets the handler and name in the associated irq descriptor. 238 * 239 * Must be called with irq_desc locked and valid parameters. 240 */ 241 static inline void 242 irq_set_chip_handler_name_locked(struct irq_data *data, 243 const struct irq_chip *chip, 244 irq_flow_handler_t handler, const char *name) 245 { 246 struct irq_desc *desc = irq_data_to_desc(data); 247 248 desc->handle_irq = handler; 249 desc->name = name; 250 data->chip = (struct irq_chip *)chip; 251 } 252 253 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); 254 255 static inline bool irq_balancing_disabled(unsigned int irq) 256 { 257 return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK); 258 } 259 260 static inline bool irq_is_percpu(unsigned int irq) 261 { 262 return irq_check_status_bit(irq, IRQ_PER_CPU); 263 } 264 265 static inline bool irq_is_percpu_devid(unsigned int irq) 266 { 267 return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID); 268 } 269 270 void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 271 struct lock_class_key *request_class); 272 static inline void 273 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 274 struct lock_class_key *request_class) 275 { 276 if (IS_ENABLED(CONFIG_LOCKDEP)) 277 __irq_set_lockdep_class(irq, lock_class, request_class); 278 } 279 280 #endif 281