1 /* 2 * linux/kernel/irq/spurious.c 3 * 4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 5 * 6 * This file contains spurious interrupt handling. 7 */ 8 9 #include <linux/jiffies.h> 10 #include <linux/irq.h> 11 #include <linux/module.h> 12 #include <linux/kallsyms.h> 13 #include <linux/interrupt.h> 14 #include <linux/moduleparam.h> 15 #include <linux/timer.h> 16 17 static int irqfixup __read_mostly; 18 19 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 20 static void poll_spurious_irqs(unsigned long dummy); 21 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); 22 23 /* 24 * Recovery handler for misrouted interrupts. 25 */ 26 static int try_one_irq(int irq, struct irq_desc *desc) 27 { 28 struct irqaction *action; 29 int ok = 0, work = 0; 30 31 spin_lock(&desc->lock); 32 /* Already running on another processor */ 33 if (desc->status & IRQ_INPROGRESS) { 34 /* 35 * Already running: If it is shared get the other 36 * CPU to go looking for our mystery interrupt too 37 */ 38 if (desc->action && (desc->action->flags & IRQF_SHARED)) 39 desc->status |= IRQ_PENDING; 40 spin_unlock(&desc->lock); 41 return ok; 42 } 43 /* Honour the normal IRQ locking */ 44 desc->status |= IRQ_INPROGRESS; 45 action = desc->action; 46 spin_unlock(&desc->lock); 47 48 while (action) { 49 /* Only shared IRQ handlers are safe to call */ 50 if (action->flags & IRQF_SHARED) { 51 if (action->handler(irq, action->dev_id) == 52 IRQ_HANDLED) 53 ok = 1; 54 } 55 action = action->next; 56 } 57 local_irq_disable(); 58 /* Now clean up the flags */ 59 spin_lock(&desc->lock); 60 action = desc->action; 61 62 /* 63 * While we were looking for a fixup someone queued a real 64 * IRQ clashing with our walk: 65 */ 66 while ((desc->status & IRQ_PENDING) && action) { 67 /* 68 * Perform real IRQ processing for the IRQ we deferred 69 */ 70 work = 1; 71 spin_unlock(&desc->lock); 72 handle_IRQ_event(irq, action); 73 spin_lock(&desc->lock); 74 desc->status &= ~IRQ_PENDING; 75 } 76 desc->status &= ~IRQ_INPROGRESS; 77 /* 78 * If we did actual work for the real IRQ line we must let the 79 * IRQ controller clean up too 80 */ 81 if (work && desc->chip && desc->chip->end) 82 desc->chip->end(irq); 83 spin_unlock(&desc->lock); 84 85 return ok; 86 } 87 88 static int misrouted_irq(int irq) 89 { 90 struct irq_desc *desc; 91 int i, ok = 0; 92 93 for_each_irq_desc(i, desc) { 94 if (!i) 95 continue; 96 97 if (i == irq) /* Already tried */ 98 continue; 99 100 if (try_one_irq(i, desc)) 101 ok = 1; 102 } 103 /* So the caller can adjust the irq error counts */ 104 return ok; 105 } 106 107 static void poll_all_shared_irqs(void) 108 { 109 struct irq_desc *desc; 110 int i; 111 112 for_each_irq_desc(i, desc) { 113 unsigned int status; 114 115 if (!i) 116 continue; 117 118 /* Racy but it doesn't matter */ 119 status = desc->status; 120 barrier(); 121 if (!(status & IRQ_SPURIOUS_DISABLED)) 122 continue; 123 124 local_irq_disable(); 125 try_one_irq(i, desc); 126 local_irq_enable(); 127 } 128 } 129 130 static void poll_spurious_irqs(unsigned long dummy) 131 { 132 poll_all_shared_irqs(); 133 134 mod_timer(&poll_spurious_irq_timer, 135 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 136 } 137 138 #ifdef CONFIG_DEBUG_SHIRQ 139 void debug_poll_all_shared_irqs(void) 140 { 141 poll_all_shared_irqs(); 142 } 143 #endif 144 145 /* 146 * If 99,900 of the previous 100,000 interrupts have not been handled 147 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 148 * and try to turn the IRQ off. 149 * 150 * (The other 100-of-100,000 interrupts may have been a correctly 151 * functioning device sharing an IRQ with the failing one) 152 * 153 * Called under desc->lock 154 */ 155 156 static void 157 __report_bad_irq(unsigned int irq, struct irq_desc *desc, 158 irqreturn_t action_ret) 159 { 160 struct irqaction *action; 161 162 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 163 printk(KERN_ERR "irq event %d: bogus return value %x\n", 164 irq, action_ret); 165 } else { 166 printk(KERN_ERR "irq %d: nobody cared (try booting with " 167 "the \"irqpoll\" option)\n", irq); 168 } 169 dump_stack(); 170 printk(KERN_ERR "handlers:\n"); 171 172 action = desc->action; 173 while (action) { 174 printk(KERN_ERR "[<%p>]", action->handler); 175 print_symbol(" (%s)", 176 (unsigned long)action->handler); 177 printk("\n"); 178 action = action->next; 179 } 180 } 181 182 static void 183 report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) 184 { 185 static int count = 100; 186 187 if (count > 0) { 188 count--; 189 __report_bad_irq(irq, desc, action_ret); 190 } 191 } 192 193 static inline int 194 try_misrouted_irq(unsigned int irq, struct irq_desc *desc, 195 irqreturn_t action_ret) 196 { 197 struct irqaction *action; 198 199 if (!irqfixup) 200 return 0; 201 202 /* We didn't actually handle the IRQ - see if it was misrouted? */ 203 if (action_ret == IRQ_NONE) 204 return 1; 205 206 /* 207 * But for 'irqfixup == 2' we also do it for handled interrupts if 208 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the 209 * traditional PC timer interrupt.. Legacy) 210 */ 211 if (irqfixup < 2) 212 return 0; 213 214 if (!irq) 215 return 1; 216 217 /* 218 * Since we don't get the descriptor lock, "action" can 219 * change under us. We don't really care, but we don't 220 * want to follow a NULL pointer. So tell the compiler to 221 * just load it once by using a barrier. 222 */ 223 action = desc->action; 224 barrier(); 225 return action && (action->flags & IRQF_IRQPOLL); 226 } 227 228 void note_interrupt(unsigned int irq, struct irq_desc *desc, 229 irqreturn_t action_ret) 230 { 231 if (unlikely(action_ret != IRQ_HANDLED)) { 232 /* 233 * If we are seeing only the odd spurious IRQ caused by 234 * bus asynchronicity then don't eventually trigger an error, 235 * otherwise the couter becomes a doomsday timer for otherwise 236 * working systems 237 */ 238 if (time_after(jiffies, desc->last_unhandled + HZ/10)) 239 desc->irqs_unhandled = 1; 240 else 241 desc->irqs_unhandled++; 242 desc->last_unhandled = jiffies; 243 if (unlikely(action_ret != IRQ_NONE)) 244 report_bad_irq(irq, desc, action_ret); 245 } 246 247 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 248 int ok = misrouted_irq(irq); 249 if (action_ret == IRQ_NONE) 250 desc->irqs_unhandled -= ok; 251 } 252 253 desc->irq_count++; 254 if (likely(desc->irq_count < 100000)) 255 return; 256 257 desc->irq_count = 0; 258 if (unlikely(desc->irqs_unhandled > 99900)) { 259 /* 260 * The interrupt is stuck 261 */ 262 __report_bad_irq(irq, desc, action_ret); 263 /* 264 * Now kill the IRQ 265 */ 266 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 267 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 268 desc->depth++; 269 desc->chip->disable(irq); 270 271 mod_timer(&poll_spurious_irq_timer, 272 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 273 } 274 desc->irqs_unhandled = 0; 275 } 276 277 int noirqdebug __read_mostly; 278 279 int noirqdebug_setup(char *str) 280 { 281 noirqdebug = 1; 282 printk(KERN_INFO "IRQ lockup detection disabled\n"); 283 284 return 1; 285 } 286 287 __setup("noirqdebug", noirqdebug_setup); 288 module_param(noirqdebug, bool, 0644); 289 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); 290 291 static int __init irqfixup_setup(char *str) 292 { 293 irqfixup = 1; 294 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); 295 printk(KERN_WARNING "This may impact system performance.\n"); 296 297 return 1; 298 } 299 300 __setup("irqfixup", irqfixup_setup); 301 module_param(irqfixup, int, 0644); 302 303 static int __init irqpoll_setup(char *str) 304 { 305 irqfixup = 2; 306 printk(KERN_WARNING "Misrouted IRQ fixup and polling support " 307 "enabled\n"); 308 printk(KERN_WARNING "This may significantly impact system " 309 "performance\n"); 310 return 1; 311 } 312 313 __setup("irqpoll", irqpoll_setup); 314