1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #include <linux/irq.h> 11 #include <linux/module.h> 12 #include <linux/random.h> 13 #include <linux/interrupt.h> 14 15 #include "internals.h" 16 17 #ifdef CONFIG_SMP 18 19 /** 20 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 21 * @irq: interrupt number to wait for 22 * 23 * This function waits for any pending IRQ handlers for this interrupt 24 * to complete before returning. If you use this function while 25 * holding a resource the IRQ handler may need you will deadlock. 26 * 27 * This function may be called - with care - from IRQ context. 28 */ 29 void synchronize_irq(unsigned int irq) 30 { 31 struct irq_desc *desc = irq_desc + irq; 32 33 if (irq >= NR_IRQS) 34 return; 35 36 while (desc->status & IRQ_INPROGRESS) 37 cpu_relax(); 38 } 39 EXPORT_SYMBOL(synchronize_irq); 40 41 #endif 42 43 /** 44 * disable_irq_nosync - disable an irq without waiting 45 * @irq: Interrupt to disable 46 * 47 * Disable the selected interrupt line. Disables and Enables are 48 * nested. 49 * Unlike disable_irq(), this function does not ensure existing 50 * instances of the IRQ handler have completed before returning. 51 * 52 * This function may be called from IRQ context. 53 */ 54 void disable_irq_nosync(unsigned int irq) 55 { 56 struct irq_desc *desc = irq_desc + irq; 57 unsigned long flags; 58 59 if (irq >= NR_IRQS) 60 return; 61 62 spin_lock_irqsave(&desc->lock, flags); 63 if (!desc->depth++) { 64 desc->status |= IRQ_DISABLED; 65 desc->chip->disable(irq); 66 } 67 spin_unlock_irqrestore(&desc->lock, flags); 68 } 69 EXPORT_SYMBOL(disable_irq_nosync); 70 71 /** 72 * disable_irq - disable an irq and wait for completion 73 * @irq: Interrupt to disable 74 * 75 * Disable the selected interrupt line. Enables and Disables are 76 * nested. 77 * This function waits for any pending IRQ handlers for this interrupt 78 * to complete before returning. If you use this function while 79 * holding a resource the IRQ handler may need you will deadlock. 80 * 81 * This function may be called - with care - from IRQ context. 82 */ 83 void disable_irq(unsigned int irq) 84 { 85 struct irq_desc *desc = irq_desc + irq; 86 87 if (irq >= NR_IRQS) 88 return; 89 90 disable_irq_nosync(irq); 91 if (desc->action) 92 synchronize_irq(irq); 93 } 94 EXPORT_SYMBOL(disable_irq); 95 96 /** 97 * enable_irq - enable handling of an irq 98 * @irq: Interrupt to enable 99 * 100 * Undoes the effect of one call to disable_irq(). If this 101 * matches the last disable, processing of interrupts on this 102 * IRQ line is re-enabled. 103 * 104 * This function may be called from IRQ context. 105 */ 106 void enable_irq(unsigned int irq) 107 { 108 struct irq_desc *desc = irq_desc + irq; 109 unsigned long flags; 110 111 if (irq >= NR_IRQS) 112 return; 113 114 spin_lock_irqsave(&desc->lock, flags); 115 switch (desc->depth) { 116 case 0: 117 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 118 WARN_ON(1); 119 break; 120 case 1: { 121 unsigned int status = desc->status & ~IRQ_DISABLED; 122 123 /* Prevent probing on this irq: */ 124 desc->status = status | IRQ_NOPROBE; 125 check_irq_resend(desc, irq); 126 /* fall-through */ 127 } 128 default: 129 desc->depth--; 130 } 131 spin_unlock_irqrestore(&desc->lock, flags); 132 } 133 EXPORT_SYMBOL(enable_irq); 134 135 /** 136 * set_irq_wake - control irq power management wakeup 137 * @irq: interrupt to control 138 * @on: enable/disable power management wakeup 139 * 140 * Enable/disable power management wakeup mode, which is 141 * disabled by default. Enables and disables must match, 142 * just as they match for non-wakeup mode support. 143 * 144 * Wakeup mode lets this IRQ wake the system from sleep 145 * states like "suspend to RAM". 146 */ 147 int set_irq_wake(unsigned int irq, unsigned int on) 148 { 149 struct irq_desc *desc = irq_desc + irq; 150 unsigned long flags; 151 int ret = -ENXIO; 152 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; 153 154 /* wakeup-capable irqs can be shared between drivers that 155 * don't need to have the same sleep mode behaviors. 156 */ 157 spin_lock_irqsave(&desc->lock, flags); 158 if (on) { 159 if (desc->wake_depth++ == 0) 160 desc->status |= IRQ_WAKEUP; 161 else 162 set_wake = NULL; 163 } else { 164 if (desc->wake_depth == 0) { 165 printk(KERN_WARNING "Unbalanced IRQ %d " 166 "wake disable\n", irq); 167 WARN_ON(1); 168 } else if (--desc->wake_depth == 0) 169 desc->status &= ~IRQ_WAKEUP; 170 else 171 set_wake = NULL; 172 } 173 if (set_wake) 174 ret = desc->chip->set_wake(irq, on); 175 spin_unlock_irqrestore(&desc->lock, flags); 176 return ret; 177 } 178 EXPORT_SYMBOL(set_irq_wake); 179 180 /* 181 * Internal function that tells the architecture code whether a 182 * particular irq has been exclusively allocated or is available 183 * for driver use. 184 */ 185 int can_request_irq(unsigned int irq, unsigned long irqflags) 186 { 187 struct irqaction *action; 188 189 if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) 190 return 0; 191 192 action = irq_desc[irq].action; 193 if (action) 194 if (irqflags & action->flags & IRQF_SHARED) 195 action = NULL; 196 197 return !action; 198 } 199 200 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 201 { 202 /* 203 * If the architecture still has not overriden 204 * the flow handler then zap the default. This 205 * should catch incorrect flow-type setting. 206 */ 207 if (desc->handle_irq == &handle_bad_irq) 208 desc->handle_irq = NULL; 209 } 210 211 /* 212 * Internal function to register an irqaction - typically used to 213 * allocate special interrupts that are part of the architecture. 214 */ 215 int setup_irq(unsigned int irq, struct irqaction *new) 216 { 217 struct irq_desc *desc = irq_desc + irq; 218 struct irqaction *old, **p; 219 const char *old_name = NULL; 220 unsigned long flags; 221 int shared = 0; 222 223 if (irq >= NR_IRQS) 224 return -EINVAL; 225 226 if (desc->chip == &no_irq_chip) 227 return -ENOSYS; 228 /* 229 * Some drivers like serial.c use request_irq() heavily, 230 * so we have to be careful not to interfere with a 231 * running system. 232 */ 233 if (new->flags & IRQF_SAMPLE_RANDOM) { 234 /* 235 * This function might sleep, we want to call it first, 236 * outside of the atomic block. 237 * Yes, this might clear the entropy pool if the wrong 238 * driver is attempted to be loaded, without actually 239 * installing a new handler, but is this really a problem, 240 * only the sysadmin is able to do this. 241 */ 242 rand_initialize_irq(irq); 243 } 244 245 /* 246 * The following block of code has to be executed atomically 247 */ 248 spin_lock_irqsave(&desc->lock, flags); 249 p = &desc->action; 250 old = *p; 251 if (old) { 252 /* 253 * Can't share interrupts unless both agree to and are 254 * the same type (level, edge, polarity). So both flag 255 * fields must have IRQF_SHARED set and the bits which 256 * set the trigger type must match. 257 */ 258 if (!((old->flags & new->flags) & IRQF_SHARED) || 259 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 260 old_name = old->name; 261 goto mismatch; 262 } 263 264 #if defined(CONFIG_IRQ_PER_CPU) 265 /* All handlers must agree on per-cpuness */ 266 if ((old->flags & IRQF_PERCPU) != 267 (new->flags & IRQF_PERCPU)) 268 goto mismatch; 269 #endif 270 271 /* add new interrupt at end of irq queue */ 272 do { 273 p = &old->next; 274 old = *p; 275 } while (old); 276 shared = 1; 277 } 278 279 *p = new; 280 #if defined(CONFIG_IRQ_PER_CPU) 281 if (new->flags & IRQF_PERCPU) 282 desc->status |= IRQ_PER_CPU; 283 #endif 284 if (!shared) { 285 irq_chip_set_defaults(desc->chip); 286 287 /* Setup the type (level, edge polarity) if configured: */ 288 if (new->flags & IRQF_TRIGGER_MASK) { 289 if (desc->chip && desc->chip->set_type) 290 desc->chip->set_type(irq, 291 new->flags & IRQF_TRIGGER_MASK); 292 else 293 /* 294 * IRQF_TRIGGER_* but the PIC does not support 295 * multiple flow-types? 296 */ 297 printk(KERN_WARNING "No IRQF_TRIGGER set_type " 298 "function for IRQ %d (%s)\n", irq, 299 desc->chip ? desc->chip->name : 300 "unknown"); 301 } else 302 compat_irq_chip_set_default_handler(desc); 303 304 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 305 IRQ_INPROGRESS); 306 307 if (!(desc->status & IRQ_NOAUTOEN)) { 308 desc->depth = 0; 309 desc->status &= ~IRQ_DISABLED; 310 if (desc->chip->startup) 311 desc->chip->startup(irq); 312 else 313 desc->chip->enable(irq); 314 } else 315 /* Undo nested disables: */ 316 desc->depth = 1; 317 } 318 /* Reset broken irq detection when installing new handler */ 319 desc->irq_count = 0; 320 desc->irqs_unhandled = 0; 321 spin_unlock_irqrestore(&desc->lock, flags); 322 323 new->irq = irq; 324 register_irq_proc(irq); 325 new->dir = NULL; 326 register_handler_proc(irq, new); 327 328 return 0; 329 330 mismatch: 331 if (!(new->flags & IRQF_PROBE_SHARED)) { 332 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 333 if (old_name) 334 printk(KERN_ERR "current handler: %s\n", old_name); 335 dump_stack(); 336 } 337 spin_unlock_irqrestore(&desc->lock, flags); 338 return -EBUSY; 339 } 340 341 /** 342 * free_irq - free an interrupt 343 * @irq: Interrupt line to free 344 * @dev_id: Device identity to free 345 * 346 * Remove an interrupt handler. The handler is removed and if the 347 * interrupt line is no longer in use by any driver it is disabled. 348 * On a shared IRQ the caller must ensure the interrupt is disabled 349 * on the card it drives before calling this function. The function 350 * does not return until any executing interrupts for this IRQ 351 * have completed. 352 * 353 * This function must not be called from interrupt context. 354 */ 355 void free_irq(unsigned int irq, void *dev_id) 356 { 357 struct irq_desc *desc; 358 struct irqaction **p; 359 unsigned long flags; 360 361 WARN_ON(in_interrupt()); 362 if (irq >= NR_IRQS) 363 return; 364 365 desc = irq_desc + irq; 366 spin_lock_irqsave(&desc->lock, flags); 367 p = &desc->action; 368 for (;;) { 369 struct irqaction *action = *p; 370 371 if (action) { 372 struct irqaction **pp = p; 373 374 p = &action->next; 375 if (action->dev_id != dev_id) 376 continue; 377 378 /* Found it - now remove it from the list of entries */ 379 *pp = action->next; 380 381 /* Currently used only by UML, might disappear one day.*/ 382 #ifdef CONFIG_IRQ_RELEASE_METHOD 383 if (desc->chip->release) 384 desc->chip->release(irq, dev_id); 385 #endif 386 387 if (!desc->action) { 388 desc->status |= IRQ_DISABLED; 389 if (desc->chip->shutdown) 390 desc->chip->shutdown(irq); 391 else 392 desc->chip->disable(irq); 393 } 394 spin_unlock_irqrestore(&desc->lock, flags); 395 unregister_handler_proc(irq, action); 396 397 /* Make sure it's not being used on another CPU */ 398 synchronize_irq(irq); 399 kfree(action); 400 return; 401 } 402 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); 403 spin_unlock_irqrestore(&desc->lock, flags); 404 return; 405 } 406 } 407 EXPORT_SYMBOL(free_irq); 408 409 /** 410 * request_irq - allocate an interrupt line 411 * @irq: Interrupt line to allocate 412 * @handler: Function to be called when the IRQ occurs 413 * @irqflags: Interrupt type flags 414 * @devname: An ascii name for the claiming device 415 * @dev_id: A cookie passed back to the handler function 416 * 417 * This call allocates interrupt resources and enables the 418 * interrupt line and IRQ handling. From the point this 419 * call is made your handler function may be invoked. Since 420 * your handler function must clear any interrupt the board 421 * raises, you must take care both to initialise your hardware 422 * and to set up the interrupt handler in the right order. 423 * 424 * Dev_id must be globally unique. Normally the address of the 425 * device data structure is used as the cookie. Since the handler 426 * receives this value it makes sense to use it. 427 * 428 * If your interrupt is shared you must pass a non NULL dev_id 429 * as this is required when freeing the interrupt. 430 * 431 * Flags: 432 * 433 * IRQF_SHARED Interrupt is shared 434 * IRQF_DISABLED Disable local interrupts while processing 435 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 436 * 437 */ 438 int request_irq(unsigned int irq, irq_handler_t handler, 439 unsigned long irqflags, const char *devname, void *dev_id) 440 { 441 struct irqaction *action; 442 int retval; 443 444 #ifdef CONFIG_LOCKDEP 445 /* 446 * Lockdep wants atomic interrupt handlers: 447 */ 448 irqflags |= SA_INTERRUPT; 449 #endif 450 /* 451 * Sanity-check: shared interrupts must pass in a real dev-ID, 452 * otherwise we'll have trouble later trying to figure out 453 * which interrupt is which (messes up the interrupt freeing 454 * logic etc). 455 */ 456 if ((irqflags & IRQF_SHARED) && !dev_id) 457 return -EINVAL; 458 if (irq >= NR_IRQS) 459 return -EINVAL; 460 if (irq_desc[irq].status & IRQ_NOREQUEST) 461 return -EINVAL; 462 if (!handler) 463 return -EINVAL; 464 465 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 466 if (!action) 467 return -ENOMEM; 468 469 action->handler = handler; 470 action->flags = irqflags; 471 cpus_clear(action->mask); 472 action->name = devname; 473 action->next = NULL; 474 action->dev_id = dev_id; 475 476 select_smp_affinity(irq); 477 478 retval = setup_irq(irq, action); 479 if (retval) 480 kfree(action); 481 482 return retval; 483 } 484 EXPORT_SYMBOL(request_irq); 485