1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/linkage.h> 3 #include <linux/errno.h> 4 #include <linux/signal.h> 5 #include <linux/sched.h> 6 #include <linux/ioport.h> 7 #include <linux/interrupt.h> 8 #include <linux/timex.h> 9 #include <linux/random.h> 10 #include <linux/init.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/syscore_ops.h> 13 #include <linux/bitops.h> 14 #include <linux/acpi.h> 15 #include <linux/io.h> 16 #include <linux/delay.h> 17 18 #include <linux/atomic.h> 19 #include <asm/timer.h> 20 #include <asm/hw_irq.h> 21 #include <asm/pgtable.h> 22 #include <asm/desc.h> 23 #include <asm/apic.h> 24 #include <asm/i8259.h> 25 26 /* 27 * This is the 'legacy' 8259A Programmable Interrupt Controller, 28 * present in the majority of PC/AT boxes. 29 * plus some generic x86 specific things if generic specifics makes 30 * any sense at all. 31 */ 32 static void init_8259A(int auto_eoi); 33 34 static int i8259A_auto_eoi; 35 DEFINE_RAW_SPINLOCK(i8259A_lock); 36 37 /* 38 * 8259A PIC functions to handle ISA devices: 39 */ 40 41 /* 42 * This contains the irq mask for both 8259A irq controllers, 43 */ 44 unsigned int cached_irq_mask = 0xffff; 45 46 /* 47 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) 48 * boards the timer interrupt is not really connected to any IO-APIC pin, 49 * it's fed to the master 8259A's IR0 line only. 50 * 51 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. 52 * this 'mixed mode' IRQ handling costs nothing because it's only used 53 * at IRQ setup time. 54 */ 55 unsigned long io_apic_irqs; 56 57 static void mask_8259A_irq(unsigned int irq) 58 { 59 unsigned int mask = 1 << irq; 60 unsigned long flags; 61 62 raw_spin_lock_irqsave(&i8259A_lock, flags); 63 cached_irq_mask |= mask; 64 if (irq & 8) 65 outb(cached_slave_mask, PIC_SLAVE_IMR); 66 else 67 outb(cached_master_mask, PIC_MASTER_IMR); 68 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 69 } 70 71 static void disable_8259A_irq(struct irq_data *data) 72 { 73 mask_8259A_irq(data->irq); 74 } 75 76 static void unmask_8259A_irq(unsigned int irq) 77 { 78 unsigned int mask = ~(1 << irq); 79 unsigned long flags; 80 81 raw_spin_lock_irqsave(&i8259A_lock, flags); 82 cached_irq_mask &= mask; 83 if (irq & 8) 84 outb(cached_slave_mask, PIC_SLAVE_IMR); 85 else 86 outb(cached_master_mask, PIC_MASTER_IMR); 87 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 88 } 89 90 static void enable_8259A_irq(struct irq_data *data) 91 { 92 unmask_8259A_irq(data->irq); 93 } 94 95 static int i8259A_irq_pending(unsigned int irq) 96 { 97 unsigned int mask = 1<<irq; 98 unsigned long flags; 99 int ret; 100 101 raw_spin_lock_irqsave(&i8259A_lock, flags); 102 if (irq < 8) 103 ret = inb(PIC_MASTER_CMD) & mask; 104 else 105 ret = inb(PIC_SLAVE_CMD) & (mask >> 8); 106 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 107 108 return ret; 109 } 110 111 static void make_8259A_irq(unsigned int irq) 112 { 113 disable_irq_nosync(irq); 114 io_apic_irqs &= ~(1<<irq); 115 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 116 enable_irq(irq); 117 } 118 119 /* 120 * This function assumes to be called rarely. Switching between 121 * 8259A registers is slow. 122 * This has to be protected by the irq controller spinlock 123 * before being called. 124 */ 125 static inline int i8259A_irq_real(unsigned int irq) 126 { 127 int value; 128 int irqmask = 1<<irq; 129 130 if (irq < 8) { 131 outb(0x0B, PIC_MASTER_CMD); /* ISR register */ 132 value = inb(PIC_MASTER_CMD) & irqmask; 133 outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ 134 return value; 135 } 136 outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ 137 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); 138 outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ 139 return value; 140 } 141 142 /* 143 * Careful! The 8259A is a fragile beast, it pretty 144 * much _has_ to be done exactly like this (mask it 145 * first, _then_ send the EOI, and the order of EOI 146 * to the two 8259s is important! 147 */ 148 static void mask_and_ack_8259A(struct irq_data *data) 149 { 150 unsigned int irq = data->irq; 151 unsigned int irqmask = 1 << irq; 152 unsigned long flags; 153 154 raw_spin_lock_irqsave(&i8259A_lock, flags); 155 /* 156 * Lightweight spurious IRQ detection. We do not want 157 * to overdo spurious IRQ handling - it's usually a sign 158 * of hardware problems, so we only do the checks we can 159 * do without slowing down good hardware unnecessarily. 160 * 161 * Note that IRQ7 and IRQ15 (the two spurious IRQs 162 * usually resulting from the 8259A-1|2 PICs) occur 163 * even if the IRQ is masked in the 8259A. Thus we 164 * can check spurious 8259A IRQs without doing the 165 * quite slow i8259A_irq_real() call for every IRQ. 166 * This does not cover 100% of spurious interrupts, 167 * but should be enough to warn the user that there 168 * is something bad going on ... 169 */ 170 if (cached_irq_mask & irqmask) 171 goto spurious_8259A_irq; 172 cached_irq_mask |= irqmask; 173 174 handle_real_irq: 175 if (irq & 8) { 176 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ 177 outb(cached_slave_mask, PIC_SLAVE_IMR); 178 /* 'Specific EOI' to slave */ 179 outb(0x60+(irq&7), PIC_SLAVE_CMD); 180 /* 'Specific EOI' to master-IRQ2 */ 181 outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); 182 } else { 183 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ 184 outb(cached_master_mask, PIC_MASTER_IMR); 185 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 186 } 187 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 188 return; 189 190 spurious_8259A_irq: 191 /* 192 * this is the slow path - should happen rarely. 193 */ 194 if (i8259A_irq_real(irq)) 195 /* 196 * oops, the IRQ _is_ in service according to the 197 * 8259A - not spurious, go handle it. 198 */ 199 goto handle_real_irq; 200 201 { 202 static int spurious_irq_mask; 203 /* 204 * At this point we can be sure the IRQ is spurious, 205 * lets ACK and report it. [once per IRQ] 206 */ 207 if (!(spurious_irq_mask & irqmask)) { 208 printk(KERN_DEBUG 209 "spurious 8259A interrupt: IRQ%d.\n", irq); 210 spurious_irq_mask |= irqmask; 211 } 212 atomic_inc(&irq_err_count); 213 /* 214 * Theoretically we do not have to handle this IRQ, 215 * but in Linux this does not cause problems and is 216 * simpler for us. 217 */ 218 goto handle_real_irq; 219 } 220 } 221 222 struct irq_chip i8259A_chip = { 223 .name = "XT-PIC", 224 .irq_mask = disable_8259A_irq, 225 .irq_disable = disable_8259A_irq, 226 .irq_unmask = enable_8259A_irq, 227 .irq_mask_ack = mask_and_ack_8259A, 228 }; 229 230 static char irq_trigger[2]; 231 /** 232 * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ 233 */ 234 static void restore_ELCR(char *trigger) 235 { 236 outb(trigger[0], 0x4d0); 237 outb(trigger[1], 0x4d1); 238 } 239 240 static void save_ELCR(char *trigger) 241 { 242 /* IRQ 0,1,2,8,13 are marked as reserved */ 243 trigger[0] = inb(0x4d0) & 0xF8; 244 trigger[1] = inb(0x4d1) & 0xDE; 245 } 246 247 static void i8259A_resume(void) 248 { 249 init_8259A(i8259A_auto_eoi); 250 restore_ELCR(irq_trigger); 251 } 252 253 static int i8259A_suspend(void) 254 { 255 save_ELCR(irq_trigger); 256 return 0; 257 } 258 259 static void i8259A_shutdown(void) 260 { 261 /* Put the i8259A into a quiescent state that 262 * the kernel initialization code can get it 263 * out of. 264 */ 265 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 266 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 267 } 268 269 static struct syscore_ops i8259_syscore_ops = { 270 .suspend = i8259A_suspend, 271 .resume = i8259A_resume, 272 .shutdown = i8259A_shutdown, 273 }; 274 275 static void mask_8259A(void) 276 { 277 unsigned long flags; 278 279 raw_spin_lock_irqsave(&i8259A_lock, flags); 280 281 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 282 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 283 284 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 285 } 286 287 static void unmask_8259A(void) 288 { 289 unsigned long flags; 290 291 raw_spin_lock_irqsave(&i8259A_lock, flags); 292 293 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 294 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 295 296 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 297 } 298 299 static int probe_8259A(void) 300 { 301 unsigned long flags; 302 unsigned char probe_val = ~(1 << PIC_CASCADE_IR); 303 unsigned char new_val; 304 /* 305 * Check to see if we have a PIC. 306 * Mask all except the cascade and read 307 * back the value we just wrote. If we don't 308 * have a PIC, we will read 0xff as opposed to the 309 * value we wrote. 310 */ 311 raw_spin_lock_irqsave(&i8259A_lock, flags); 312 313 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 314 outb(probe_val, PIC_MASTER_IMR); 315 new_val = inb(PIC_MASTER_IMR); 316 if (new_val != probe_val) { 317 printk(KERN_INFO "Using NULL legacy PIC\n"); 318 legacy_pic = &null_legacy_pic; 319 } 320 321 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 322 return nr_legacy_irqs(); 323 } 324 325 static void init_8259A(int auto_eoi) 326 { 327 unsigned long flags; 328 329 i8259A_auto_eoi = auto_eoi; 330 331 raw_spin_lock_irqsave(&i8259A_lock, flags); 332 333 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 334 335 /* 336 * outb_pic - this has to work on a wide range of PC hardware. 337 */ 338 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ 339 340 /* ICW2: 8259A-1 IR0-7 mapped to ISA_IRQ_VECTOR(0) */ 341 outb_pic(ISA_IRQ_VECTOR(0), PIC_MASTER_IMR); 342 343 /* 8259A-1 (the master) has a slave on IR2 */ 344 outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); 345 346 if (auto_eoi) /* master does Auto EOI */ 347 outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); 348 else /* master expects normal EOI */ 349 outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); 350 351 outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ 352 353 /* ICW2: 8259A-2 IR0-7 mapped to ISA_IRQ_VECTOR(8) */ 354 outb_pic(ISA_IRQ_VECTOR(8), PIC_SLAVE_IMR); 355 /* 8259A-2 is a slave on master's IR2 */ 356 outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); 357 /* (slave's support for AEOI in flat mode is to be investigated) */ 358 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); 359 360 if (auto_eoi) 361 /* 362 * In AEOI mode we just have to mask the interrupt 363 * when acking. 364 */ 365 i8259A_chip.irq_mask_ack = disable_8259A_irq; 366 else 367 i8259A_chip.irq_mask_ack = mask_and_ack_8259A; 368 369 udelay(100); /* wait for 8259A to initialize */ 370 371 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 372 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 373 374 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 375 } 376 377 /* 378 * make i8259 a driver so that we can select pic functions at run time. the goal 379 * is to make x86 binary compatible among pc compatible and non-pc compatible 380 * platforms, such as x86 MID. 381 */ 382 383 static void legacy_pic_noop(void) { }; 384 static void legacy_pic_uint_noop(unsigned int unused) { }; 385 static void legacy_pic_int_noop(int unused) { }; 386 static int legacy_pic_irq_pending_noop(unsigned int irq) 387 { 388 return 0; 389 } 390 static int legacy_pic_probe(void) 391 { 392 return 0; 393 } 394 395 struct legacy_pic null_legacy_pic = { 396 .nr_legacy_irqs = 0, 397 .chip = &dummy_irq_chip, 398 .mask = legacy_pic_uint_noop, 399 .unmask = legacy_pic_uint_noop, 400 .mask_all = legacy_pic_noop, 401 .restore_mask = legacy_pic_noop, 402 .init = legacy_pic_int_noop, 403 .probe = legacy_pic_probe, 404 .irq_pending = legacy_pic_irq_pending_noop, 405 .make_irq = legacy_pic_uint_noop, 406 }; 407 408 struct legacy_pic default_legacy_pic = { 409 .nr_legacy_irqs = NR_IRQS_LEGACY, 410 .chip = &i8259A_chip, 411 .mask = mask_8259A_irq, 412 .unmask = unmask_8259A_irq, 413 .mask_all = mask_8259A, 414 .restore_mask = unmask_8259A, 415 .init = init_8259A, 416 .probe = probe_8259A, 417 .irq_pending = i8259A_irq_pending, 418 .make_irq = make_8259A_irq, 419 }; 420 421 struct legacy_pic *legacy_pic = &default_legacy_pic; 422 EXPORT_SYMBOL(legacy_pic); 423 424 static int __init i8259A_init_ops(void) 425 { 426 if (legacy_pic == &default_legacy_pic) 427 register_syscore_ops(&i8259_syscore_ops); 428 429 return 0; 430 } 431 432 device_initcall(i8259A_init_ops); 433