1 /* 2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 3 * 4 * This file contains the lowest level x86-specific interrupt 5 * entry, irq-stacks and irq statistics code. All the remaining 6 * irq logic is done by the generic kernel/irq/ code and 7 * by the x86-specific irq controller code. (e.g. i8259.c and 8 * io_apic.c.) 9 */ 10 11 #include <linux/module.h> 12 #include <linux/seq_file.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/notifier.h> 16 #include <linux/cpu.h> 17 #include <linux/delay.h> 18 #include <linux/uaccess.h> 19 #include <linux/percpu.h> 20 21 #include <asm/apic.h> 22 23 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 24 EXPORT_PER_CPU_SYMBOL(irq_stat); 25 26 DEFINE_PER_CPU(struct pt_regs *, irq_regs); 27 EXPORT_PER_CPU_SYMBOL(irq_regs); 28 29 #ifdef CONFIG_DEBUG_STACKOVERFLOW 30 /* Debugging check for stack overflow: is there less than 1KB free? */ 31 static int check_stack_overflow(void) 32 { 33 long sp; 34 35 __asm__ __volatile__("andl %%esp,%0" : 36 "=r" (sp) : "0" (THREAD_SIZE - 1)); 37 38 return sp < (sizeof(struct thread_info) + STACK_WARN); 39 } 40 41 static void print_stack_overflow(void) 42 { 43 printk(KERN_WARNING "low stack detected by irq handler\n"); 44 dump_stack(); 45 } 46 47 #else 48 static inline int check_stack_overflow(void) { return 0; } 49 static inline void print_stack_overflow(void) { } 50 #endif 51 52 /* 53 * per-CPU IRQ handling contexts (thread information and stack) 54 */ 55 union irq_ctx { 56 struct thread_info tinfo; 57 u32 stack[THREAD_SIZE/sizeof(u32)]; 58 } __attribute__((aligned(THREAD_SIZE))); 59 60 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); 61 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); 62 63 static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE); 64 static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE); 65 66 static void call_on_stack(void *func, void *stack) 67 { 68 asm volatile("xchgl %%ebx,%%esp \n" 69 "call *%%edi \n" 70 "movl %%ebx,%%esp \n" 71 : "=b" (stack) 72 : "0" (stack), 73 "D"(func) 74 : "memory", "cc", "edx", "ecx", "eax"); 75 } 76 77 static inline int 78 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) 79 { 80 union irq_ctx *curctx, *irqctx; 81 u32 *isp, arg1, arg2; 82 83 curctx = (union irq_ctx *) current_thread_info(); 84 irqctx = __get_cpu_var(hardirq_ctx); 85 86 /* 87 * this is where we switch to the IRQ stack. However, if we are 88 * already using the IRQ stack (because we interrupted a hardirq 89 * handler) we can't do that and just have to keep using the 90 * current stack (which is the irq stack already after all) 91 */ 92 if (unlikely(curctx == irqctx)) 93 return 0; 94 95 /* build the stack frame on the IRQ stack */ 96 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 97 irqctx->tinfo.task = curctx->tinfo.task; 98 irqctx->tinfo.previous_esp = current_stack_pointer; 99 100 /* 101 * Copy the softirq bits in preempt_count so that the 102 * softirq checks work in the hardirq context. 103 */ 104 irqctx->tinfo.preempt_count = 105 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | 106 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 107 108 if (unlikely(overflow)) 109 call_on_stack(print_stack_overflow, isp); 110 111 asm volatile("xchgl %%ebx,%%esp \n" 112 "call *%%edi \n" 113 "movl %%ebx,%%esp \n" 114 : "=a" (arg1), "=d" (arg2), "=b" (isp) 115 : "0" (irq), "1" (desc), "2" (isp), 116 "D" (desc->handle_irq) 117 : "memory", "cc", "ecx"); 118 return 1; 119 } 120 121 /* 122 * allocate per-cpu stacks for hardirq and for softirq processing 123 */ 124 void __cpuinit irq_ctx_init(int cpu) 125 { 126 union irq_ctx *irqctx; 127 128 if (per_cpu(hardirq_ctx, cpu)) 129 return; 130 131 irqctx = &per_cpu(hardirq_stack, cpu); 132 irqctx->tinfo.task = NULL; 133 irqctx->tinfo.exec_domain = NULL; 134 irqctx->tinfo.cpu = cpu; 135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 137 138 per_cpu(hardirq_ctx, cpu) = irqctx; 139 140 irqctx = &per_cpu(softirq_stack, cpu); 141 irqctx->tinfo.task = NULL; 142 irqctx->tinfo.exec_domain = NULL; 143 irqctx->tinfo.cpu = cpu; 144 irqctx->tinfo.preempt_count = 0; 145 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 146 147 per_cpu(softirq_ctx, cpu) = irqctx; 148 149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 150 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 151 } 152 153 void irq_ctx_exit(int cpu) 154 { 155 per_cpu(hardirq_ctx, cpu) = NULL; 156 } 157 158 asmlinkage void do_softirq(void) 159 { 160 unsigned long flags; 161 struct thread_info *curctx; 162 union irq_ctx *irqctx; 163 u32 *isp; 164 165 if (in_interrupt()) 166 return; 167 168 local_irq_save(flags); 169 170 if (local_softirq_pending()) { 171 curctx = current_thread_info(); 172 irqctx = __get_cpu_var(softirq_ctx); 173 irqctx->tinfo.task = curctx->task; 174 irqctx->tinfo.previous_esp = current_stack_pointer; 175 176 /* build the stack frame on the softirq stack */ 177 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 178 179 call_on_stack(__do_softirq, isp); 180 /* 181 * Shouldnt happen, we returned above if in_interrupt(): 182 */ 183 WARN_ON_ONCE(softirq_count()); 184 } 185 186 local_irq_restore(flags); 187 } 188 189 bool handle_irq(unsigned irq, struct pt_regs *regs) 190 { 191 struct irq_desc *desc; 192 int overflow; 193 194 overflow = check_stack_overflow(); 195 196 desc = irq_to_desc(irq); 197 if (unlikely(!desc)) 198 return false; 199 200 if (!execute_on_irq_stack(overflow, desc, irq)) { 201 if (unlikely(overflow)) 202 print_stack_overflow(); 203 desc->handle_irq(irq, desc); 204 } 205 206 return true; 207 } 208