1 /* 2 * Common prep/pmac/chrp boot and setup code. 3 */ 4 5 #include <linux/module.h> 6 #include <linux/string.h> 7 #include <linux/sched.h> 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/reboot.h> 11 #include <linux/delay.h> 12 #include <linux/initrd.h> 13 #include <linux/tty.h> 14 #include <linux/bootmem.h> 15 #include <linux/seq_file.h> 16 #include <linux/root_dev.h> 17 #include <linux/cpu.h> 18 #include <linux/console.h> 19 #include <linux/lmb.h> 20 21 #include <asm/io.h> 22 #include <asm/prom.h> 23 #include <asm/processor.h> 24 #include <asm/pgtable.h> 25 #include <asm/setup.h> 26 #include <asm/smp.h> 27 #include <asm/elf.h> 28 #include <asm/cputable.h> 29 #include <asm/bootx.h> 30 #include <asm/btext.h> 31 #include <asm/machdep.h> 32 #include <asm/uaccess.h> 33 #include <asm/system.h> 34 #include <asm/pmac_feature.h> 35 #include <asm/sections.h> 36 #include <asm/nvram.h> 37 #include <asm/xmon.h> 38 #include <asm/time.h> 39 #include <asm/serial.h> 40 #include <asm/udbg.h> 41 42 #include "setup.h" 43 44 #define DBG(fmt...) 45 46 #if defined CONFIG_KGDB 47 #include <asm/kgdb.h> 48 #endif 49 50 extern void bootx_init(unsigned long r4, unsigned long phys); 51 52 int boot_cpuid; 53 EXPORT_SYMBOL_GPL(boot_cpuid); 54 int boot_cpuid_phys; 55 56 unsigned long ISA_DMA_THRESHOLD; 57 unsigned int DMA_MODE_READ; 58 unsigned int DMA_MODE_WRITE; 59 60 int have_of = 1; 61 62 #ifdef CONFIG_VGA_CONSOLE 63 unsigned long vgacon_remap_base; 64 EXPORT_SYMBOL(vgacon_remap_base); 65 #endif 66 67 /* 68 * These are used in binfmt_elf.c to put aux entries on the stack 69 * for each elf executable being started. 70 */ 71 int dcache_bsize; 72 int icache_bsize; 73 int ucache_bsize; 74 75 /* 76 * We're called here very early in the boot. We determine the machine 77 * type and call the appropriate low-level setup functions. 78 * -- Cort <cort@fsmlabs.com> 79 * 80 * Note that the kernel may be running at an address which is different 81 * from the address that it was linked at, so we must use RELOC/PTRRELOC 82 * to access static data (including strings). -- paulus 83 */ 84 notrace unsigned long __init early_init(unsigned long dt_ptr) 85 { 86 unsigned long offset = reloc_offset(); 87 struct cpu_spec *spec; 88 89 /* First zero the BSS -- use memset_io, some platforms don't have 90 * caches on yet */ 91 memset_io((void __iomem *)PTRRELOC(&__bss_start), 0, 92 __bss_stop - __bss_start); 93 94 /* 95 * Identify the CPU type and fix up code sections 96 * that depend on which cpu we have. 97 */ 98 spec = identify_cpu(offset, mfspr(SPRN_PVR)); 99 100 do_feature_fixups(spec->cpu_features, 101 PTRRELOC(&__start___ftr_fixup), 102 PTRRELOC(&__stop___ftr_fixup)); 103 104 do_lwsync_fixups(spec->cpu_features, 105 PTRRELOC(&__start___lwsync_fixup), 106 PTRRELOC(&__stop___lwsync_fixup)); 107 108 return KERNELBASE + offset; 109 } 110 111 112 /* 113 * Find out what kind of machine we're on and save any data we need 114 * from the early boot process (devtree is copied on pmac by prom_init()). 115 * This is called very early on the boot process, after a minimal 116 * MMU environment has been set up but before MMU_init is called. 117 */ 118 notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys) 119 { 120 /* Enable early debugging if any specified (see udbg.h) */ 121 udbg_early_init(); 122 123 /* Do some early initialization based on the flat device tree */ 124 early_init_devtree(__va(dt_ptr)); 125 126 probe_machine(); 127 128 #ifdef CONFIG_6xx 129 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || 130 cpu_has_feature(CPU_FTR_CAN_NAP)) 131 ppc_md.power_save = ppc6xx_idle; 132 #endif 133 134 #ifdef CONFIG_E500 135 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || 136 cpu_has_feature(CPU_FTR_CAN_NAP)) 137 ppc_md.power_save = e500_idle; 138 #endif 139 if (ppc_md.progress) 140 ppc_md.progress("id mach(): done", 0x200); 141 } 142 143 #ifdef CONFIG_BOOKE_WDT 144 /* Checks wdt=x and wdt_period=xx command-line option */ 145 notrace int __init early_parse_wdt(char *p) 146 { 147 if (p && strncmp(p, "0", 1) != 0) 148 booke_wdt_enabled = 1; 149 150 return 0; 151 } 152 early_param("wdt", early_parse_wdt); 153 154 int __init early_parse_wdt_period (char *p) 155 { 156 if (p) 157 booke_wdt_period = simple_strtoul(p, NULL, 0); 158 159 return 0; 160 } 161 early_param("wdt_period", early_parse_wdt_period); 162 #endif /* CONFIG_BOOKE_WDT */ 163 164 /* Checks "l2cr=xxxx" command-line option */ 165 int __init ppc_setup_l2cr(char *str) 166 { 167 if (cpu_has_feature(CPU_FTR_L2CR)) { 168 unsigned long val = simple_strtoul(str, NULL, 0); 169 printk(KERN_INFO "l2cr set to %lx\n", val); 170 _set_L2CR(0); /* force invalidate by disable cache */ 171 _set_L2CR(val); /* and enable it */ 172 } 173 return 1; 174 } 175 __setup("l2cr=", ppc_setup_l2cr); 176 177 /* Checks "l3cr=xxxx" command-line option */ 178 int __init ppc_setup_l3cr(char *str) 179 { 180 if (cpu_has_feature(CPU_FTR_L3CR)) { 181 unsigned long val = simple_strtoul(str, NULL, 0); 182 printk(KERN_INFO "l3cr set to %lx\n", val); 183 _set_L3CR(val); /* and enable it */ 184 } 185 return 1; 186 } 187 __setup("l3cr=", ppc_setup_l3cr); 188 189 #ifdef CONFIG_GENERIC_NVRAM 190 191 /* Generic nvram hooks used by drivers/char/gen_nvram.c */ 192 unsigned char nvram_read_byte(int addr) 193 { 194 if (ppc_md.nvram_read_val) 195 return ppc_md.nvram_read_val(addr); 196 return 0xff; 197 } 198 EXPORT_SYMBOL(nvram_read_byte); 199 200 void nvram_write_byte(unsigned char val, int addr) 201 { 202 if (ppc_md.nvram_write_val) 203 ppc_md.nvram_write_val(addr, val); 204 } 205 EXPORT_SYMBOL(nvram_write_byte); 206 207 void nvram_sync(void) 208 { 209 if (ppc_md.nvram_sync) 210 ppc_md.nvram_sync(); 211 } 212 EXPORT_SYMBOL(nvram_sync); 213 214 #endif /* CONFIG_NVRAM */ 215 216 static DEFINE_PER_CPU(struct cpu, cpu_devices); 217 218 int __init ppc_init(void) 219 { 220 int cpu; 221 222 /* clear the progress line */ 223 if (ppc_md.progress) 224 ppc_md.progress(" ", 0xffff); 225 226 /* register CPU devices */ 227 for_each_possible_cpu(cpu) { 228 struct cpu *c = &per_cpu(cpu_devices, cpu); 229 c->hotpluggable = 1; 230 register_cpu(c, cpu); 231 } 232 233 /* call platform init */ 234 if (ppc_md.init != NULL) { 235 ppc_md.init(); 236 } 237 return 0; 238 } 239 240 arch_initcall(ppc_init); 241 242 #ifdef CONFIG_IRQSTACKS 243 static void __init irqstack_early_init(void) 244 { 245 unsigned int i; 246 247 /* interrupt stacks must be in lowmem, we get that for free on ppc32 248 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ 249 for_each_possible_cpu(i) { 250 softirq_ctx[i] = (struct thread_info *) 251 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 252 hardirq_ctx[i] = (struct thread_info *) 253 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 254 } 255 } 256 #else 257 #define irqstack_early_init() 258 #endif 259 260 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 261 static void __init exc_lvl_early_init(void) 262 { 263 unsigned int i; 264 265 /* interrupt stacks must be in lowmem, we get that for free on ppc32 266 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ 267 for_each_possible_cpu(i) { 268 critirq_ctx[i] = (struct thread_info *) 269 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 270 #ifdef CONFIG_BOOKE 271 dbgirq_ctx[i] = (struct thread_info *) 272 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 273 mcheckirq_ctx[i] = (struct thread_info *) 274 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 275 #endif 276 } 277 } 278 #else 279 #define exc_lvl_early_init() 280 #endif 281 282 /* Warning, IO base is not yet inited */ 283 void __init setup_arch(char **cmdline_p) 284 { 285 *cmdline_p = cmd_line; 286 287 /* so udelay does something sensible, assume <= 1000 bogomips */ 288 loops_per_jiffy = 500000000 / HZ; 289 290 unflatten_device_tree(); 291 check_for_initrd(); 292 293 if (ppc_md.init_early) 294 ppc_md.init_early(); 295 296 find_legacy_serial_ports(); 297 298 smp_setup_cpu_maps(); 299 300 /* Register early console */ 301 register_early_udbg_console(); 302 303 xmon_setup(); 304 305 #if defined(CONFIG_KGDB) 306 if (ppc_md.kgdb_map_scc) 307 ppc_md.kgdb_map_scc(); 308 set_debug_traps(); 309 if (strstr(cmd_line, "gdb")) { 310 if (ppc_md.progress) 311 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000); 312 printk("kgdb breakpoint activated\n"); 313 breakpoint(); 314 } 315 #endif 316 317 /* 318 * Set cache line size based on type of cpu as a default. 319 * Systems with OF can look in the properties on the cpu node(s) 320 * for a possibly more accurate value. 321 */ 322 dcache_bsize = cur_cpu_spec->dcache_bsize; 323 icache_bsize = cur_cpu_spec->icache_bsize; 324 ucache_bsize = 0; 325 if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) 326 ucache_bsize = icache_bsize = dcache_bsize; 327 328 /* reboot on panic */ 329 panic_timeout = 180; 330 331 if (ppc_md.panic) 332 setup_panic(); 333 334 init_mm.start_code = (unsigned long)_stext; 335 init_mm.end_code = (unsigned long) _etext; 336 init_mm.end_data = (unsigned long) _edata; 337 init_mm.brk = klimit; 338 339 exc_lvl_early_init(); 340 341 irqstack_early_init(); 342 343 /* set up the bootmem stuff with available memory */ 344 do_init_bootmem(); 345 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); 346 347 #ifdef CONFIG_DUMMY_CONSOLE 348 conswitchp = &dummy_con; 349 #endif 350 351 if (ppc_md.setup_arch) 352 ppc_md.setup_arch(); 353 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 354 355 paging_init(); 356 } 357