1 /* 2 * linux/arch/alpha/kernel/sys_titan.c 3 * 4 * Copyright (C) 1995 David A Rusling 5 * Copyright (C) 1996, 1999 Jay A Estabrook 6 * Copyright (C) 1998, 1999 Richard Henderson 7 * Copyright (C) 1999, 2000 Jeff Wiedemeier 8 * 9 * Code supporting TITAN systems (EV6+TITAN), currently: 10 * Privateer 11 * Falcon 12 * Granite 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/mm.h> 18 #include <linux/sched.h> 19 #include <linux/pci.h> 20 #include <linux/init.h> 21 #include <linux/bitops.h> 22 23 #include <asm/ptrace.h> 24 #include <asm/system.h> 25 #include <asm/dma.h> 26 #include <asm/irq.h> 27 #include <asm/mmu_context.h> 28 #include <asm/io.h> 29 #include <asm/pgtable.h> 30 #include <asm/core_titan.h> 31 #include <asm/hwrpb.h> 32 #include <asm/tlbflush.h> 33 34 #include "proto.h" 35 #include "irq_impl.h" 36 #include "pci_impl.h" 37 #include "machvec_impl.h" 38 #include "err_impl.h" 39 40 41 /* 42 * Titan generic 43 */ 44 45 /* 46 * Titan supports up to 4 CPUs 47 */ 48 static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL }; 49 50 /* 51 * Mask is set (1) if enabled 52 */ 53 static unsigned long titan_cached_irq_mask; 54 55 /* 56 * Need SMP-safe access to interrupt CSRs 57 */ 58 DEFINE_SPINLOCK(titan_irq_lock); 59 60 static void 61 titan_update_irq_hw(unsigned long mask) 62 { 63 register titan_cchip *cchip = TITAN_cchip; 64 unsigned long isa_enable = 1UL << 55; 65 register int bcpu = boot_cpuid; 66 67 #ifdef CONFIG_SMP 68 cpumask_t cpm = cpu_present_map; 69 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 70 unsigned long mask0, mask1, mask2, mask3, dummy; 71 72 mask &= ~isa_enable; 73 mask0 = mask & titan_cpu_irq_affinity[0]; 74 mask1 = mask & titan_cpu_irq_affinity[1]; 75 mask2 = mask & titan_cpu_irq_affinity[2]; 76 mask3 = mask & titan_cpu_irq_affinity[3]; 77 78 if (bcpu == 0) mask0 |= isa_enable; 79 else if (bcpu == 1) mask1 |= isa_enable; 80 else if (bcpu == 2) mask2 |= isa_enable; 81 else mask3 |= isa_enable; 82 83 dim0 = &cchip->dim0.csr; 84 dim1 = &cchip->dim1.csr; 85 dim2 = &cchip->dim2.csr; 86 dim3 = &cchip->dim3.csr; 87 if (!cpu_isset(0, cpm)) dim0 = &dummy; 88 if (!cpu_isset(1, cpm)) dim1 = &dummy; 89 if (!cpu_isset(2, cpm)) dim2 = &dummy; 90 if (!cpu_isset(3, cpm)) dim3 = &dummy; 91 92 *dim0 = mask0; 93 *dim1 = mask1; 94 *dim2 = mask2; 95 *dim3 = mask3; 96 mb(); 97 *dim0; 98 *dim1; 99 *dim2; 100 *dim3; 101 #else 102 volatile unsigned long *dimB; 103 dimB = &cchip->dim0.csr; 104 if (bcpu == 1) dimB = &cchip->dim1.csr; 105 else if (bcpu == 2) dimB = &cchip->dim2.csr; 106 else if (bcpu == 3) dimB = &cchip->dim3.csr; 107 108 *dimB = mask | isa_enable; 109 mb(); 110 *dimB; 111 #endif 112 } 113 114 static inline void 115 titan_enable_irq(struct irq_data *d) 116 { 117 unsigned int irq = d->irq; 118 spin_lock(&titan_irq_lock); 119 titan_cached_irq_mask |= 1UL << (irq - 16); 120 titan_update_irq_hw(titan_cached_irq_mask); 121 spin_unlock(&titan_irq_lock); 122 } 123 124 static inline void 125 titan_disable_irq(struct irq_data *d) 126 { 127 unsigned int irq = d->irq; 128 spin_lock(&titan_irq_lock); 129 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 130 titan_update_irq_hw(titan_cached_irq_mask); 131 spin_unlock(&titan_irq_lock); 132 } 133 134 static void 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) 136 { 137 int cpu; 138 139 for (cpu = 0; cpu < 4; cpu++) { 140 if (cpu_isset(cpu, affinity)) 141 titan_cpu_irq_affinity[cpu] |= 1UL << irq; 142 else 143 titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); 144 } 145 146 } 147 148 static int 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 150 bool force) 151 { 152 spin_lock(&titan_irq_lock); 153 titan_cpu_set_irq_affinity(irq - 16, *affinity); 154 titan_update_irq_hw(titan_cached_irq_mask); 155 spin_unlock(&titan_irq_lock); 156 157 return 0; 158 } 159 160 static void 161 titan_device_interrupt(unsigned long vector) 162 { 163 printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n"); 164 } 165 166 static void 167 titan_srm_device_interrupt(unsigned long vector) 168 { 169 int irq; 170 171 irq = (vector - 0x800) >> 4; 172 handle_irq(irq); 173 } 174 175 176 static void __init 177 init_titan_irqs(struct irq_chip * ops, int imin, int imax) 178 { 179 long i; 180 for (i = imin; i <= imax; ++i) { 181 set_irq_chip_and_handler(i, ops, handle_level_irq); 182 irq_set_status_flags(i, IRQ_LEVEL); 183 } 184 } 185 186 static struct irq_chip titan_irq_type = { 187 .name = "TITAN", 188 .irq_unmask = titan_enable_irq, 189 .irq_mask = titan_disable_irq, 190 .irq_mask_ack = titan_disable_irq, 191 .irq_set_affinity = titan_set_irq_affinity, 192 }; 193 194 static irqreturn_t 195 titan_intr_nop(int irq, void *dev_id) 196 { 197 /* 198 * This is a NOP interrupt handler for the purposes of 199 * event counting -- just return. 200 */ 201 return IRQ_HANDLED; 202 } 203 204 static void __init 205 titan_init_irq(void) 206 { 207 if (alpha_using_srm && !alpha_mv.device_interrupt) 208 alpha_mv.device_interrupt = titan_srm_device_interrupt; 209 if (!alpha_mv.device_interrupt) 210 alpha_mv.device_interrupt = titan_device_interrupt; 211 212 titan_update_irq_hw(0); 213 214 init_titan_irqs(&titan_irq_type, 16, 63 + 16); 215 } 216 217 static void __init 218 titan_legacy_init_irq(void) 219 { 220 /* init the legacy dma controller */ 221 outb(0, DMA1_RESET_REG); 222 outb(0, DMA2_RESET_REG); 223 outb(DMA_MODE_CASCADE, DMA2_MODE_REG); 224 outb(0, DMA2_MASK_REG); 225 226 /* init the legacy irq controller */ 227 init_i8259a_irqs(); 228 229 /* init the titan irqs */ 230 titan_init_irq(); 231 } 232 233 void 234 titan_dispatch_irqs(u64 mask) 235 { 236 unsigned long vector; 237 238 /* 239 * Mask down to those interrupts which are enable on this processor 240 */ 241 mask &= titan_cpu_irq_affinity[smp_processor_id()]; 242 243 /* 244 * Dispatch all requested interrupts 245 */ 246 while (mask) { 247 /* convert to SRM vector... priority is <63> -> <0> */ 248 vector = 63 - __kernel_ctlz(mask); 249 mask &= ~(1UL << vector); /* clear it out */ 250 vector = 0x900 + (vector << 4); /* convert to SRM vector */ 251 252 /* dispatch it */ 253 alpha_mv.device_interrupt(vector); 254 } 255 } 256 257 258 /* 259 * Titan Family 260 */ 261 static void __init 262 titan_request_irq(unsigned int irq, irq_handler_t handler, 263 unsigned long irqflags, const char *devname, 264 void *dev_id) 265 { 266 int err; 267 err = request_irq(irq, handler, irqflags, devname, dev_id); 268 if (err) { 269 printk("titan_request_irq for IRQ %d returned %d; ignoring\n", 270 irq, err); 271 } 272 } 273 274 static void __init 275 titan_late_init(void) 276 { 277 /* 278 * Enable the system error interrupts. These interrupts are 279 * all reported to the kernel as machine checks, so the handler 280 * is a nop so it can be called to count the individual events. 281 */ 282 titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, 283 "CChip Error", NULL); 284 titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, 285 "PChip 0 H_Error", NULL); 286 titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, 287 "PChip 1 H_Error", NULL); 288 titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, 289 "PChip 0 C_Error", NULL); 290 titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, 291 "PChip 1 C_Error", NULL); 292 293 /* 294 * Register our error handlers. 295 */ 296 titan_register_error_handlers(); 297 298 /* 299 * Check if the console left us any error logs. 300 */ 301 cdl_check_console_data_log(); 302 303 } 304 305 static int __devinit 306 titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 307 { 308 u8 intline; 309 int irq; 310 311 /* Get the current intline. */ 312 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); 313 irq = intline; 314 315 /* Is it explicitly routed through ISA? */ 316 if ((irq & 0xF0) == 0xE0) 317 return irq; 318 319 /* Offset by 16 to make room for ISA interrupts 0 - 15. */ 320 return irq + 16; 321 } 322 323 static void __init 324 titan_init_pci(void) 325 { 326 /* 327 * This isn't really the right place, but there's some init 328 * that needs to be done after everything is basically up. 329 */ 330 titan_late_init(); 331 332 pci_probe_only = 1; 333 common_init_pci(); 334 SMC669_Init(0); 335 locate_and_init_vga(NULL); 336 } 337 338 339 /* 340 * Privateer 341 */ 342 static void __init 343 privateer_init_pci(void) 344 { 345 /* 346 * Hook a couple of extra err interrupts that the 347 * common titan code won't. 348 */ 349 titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, 350 "NMI", NULL); 351 titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, 352 "Temperature Warning", NULL); 353 354 /* 355 * Finish with the common version. 356 */ 357 return titan_init_pci(); 358 } 359 360 361 /* 362 * The System Vectors. 363 */ 364 struct alpha_machine_vector titan_mv __initmv = { 365 .vector_name = "TITAN", 366 DO_EV6_MMU, 367 DO_DEFAULT_RTC, 368 DO_TITAN_IO, 369 .machine_check = titan_machine_check, 370 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, 371 .min_io_address = DEFAULT_IO_BASE, 372 .min_mem_address = DEFAULT_MEM_BASE, 373 .pci_dac_offset = TITAN_DAC_OFFSET, 374 375 .nr_irqs = 80, /* 64 + 16 */ 376 /* device_interrupt will be filled in by titan_init_irq */ 377 378 .agp_info = titan_agp_info, 379 380 .init_arch = titan_init_arch, 381 .init_irq = titan_legacy_init_irq, 382 .init_rtc = common_init_rtc, 383 .init_pci = titan_init_pci, 384 385 .kill_arch = titan_kill_arch, 386 .pci_map_irq = titan_map_irq, 387 .pci_swizzle = common_swizzle, 388 }; 389 ALIAS_MV(titan) 390 391 struct alpha_machine_vector privateer_mv __initmv = { 392 .vector_name = "PRIVATEER", 393 DO_EV6_MMU, 394 DO_DEFAULT_RTC, 395 DO_TITAN_IO, 396 .machine_check = privateer_machine_check, 397 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, 398 .min_io_address = DEFAULT_IO_BASE, 399 .min_mem_address = DEFAULT_MEM_BASE, 400 .pci_dac_offset = TITAN_DAC_OFFSET, 401 402 .nr_irqs = 80, /* 64 + 16 */ 403 /* device_interrupt will be filled in by titan_init_irq */ 404 405 .agp_info = titan_agp_info, 406 407 .init_arch = titan_init_arch, 408 .init_irq = titan_legacy_init_irq, 409 .init_rtc = common_init_rtc, 410 .init_pci = privateer_init_pci, 411 412 .kill_arch = titan_kill_arch, 413 .pci_map_irq = titan_map_irq, 414 .pci_swizzle = common_swizzle, 415 }; 416 /* No alpha_mv alias for privateer since we compile it 417 in unconditionally with titan; setup_arch knows how to cope. */ 418