1 /* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * 6 * Memory region support 7 * David Parsons <orc@pell.chi.il.us>, July-August 1999 8 * 9 * Added E820 sanitization routine (removes overlapping memory regions); 10 * Brian Moyle <bmoyle@mvista.com>, February 2001 11 * 12 * Moved CPU detection code to cpu/${cpu}.c 13 * Patrick Mochel <mochel@osdl.org>, March 2002 14 * 15 * Provisions for empty E820 memory regions (reported by certain BIOSes). 16 * Alex Achenbach <xela@slit.de>, December 2002. 17 * 18 */ 19 20 /* 21 * This file handles the architecture-dependent parts of initialization 22 */ 23 24 #include <linux/sched.h> 25 #include <linux/mm.h> 26 #include <linux/mmzone.h> 27 #include <linux/screen_info.h> 28 #include <linux/ioport.h> 29 #include <linux/acpi.h> 30 #include <linux/apm_bios.h> 31 #include <linux/initrd.h> 32 #include <linux/bootmem.h> 33 #include <linux/seq_file.h> 34 #include <linux/console.h> 35 #include <linux/mca.h> 36 #include <linux/root_dev.h> 37 #include <linux/highmem.h> 38 #include <linux/module.h> 39 #include <linux/efi.h> 40 #include <linux/init.h> 41 #include <linux/edd.h> 42 #include <linux/iscsi_ibft.h> 43 #include <linux/nodemask.h> 44 #include <linux/kexec.h> 45 #include <linux/dmi.h> 46 #include <linux/pfn.h> 47 #include <linux/pci.h> 48 #include <asm/pci-direct.h> 49 #include <linux/init_ohci1394_dma.h> 50 #include <linux/kvm_para.h> 51 52 #include <linux/errno.h> 53 #include <linux/kernel.h> 54 #include <linux/stddef.h> 55 #include <linux/unistd.h> 56 #include <linux/ptrace.h> 57 #include <linux/slab.h> 58 #include <linux/user.h> 59 #include <linux/delay.h> 60 61 #include <linux/kallsyms.h> 62 #include <linux/cpufreq.h> 63 #include <linux/dma-mapping.h> 64 #include <linux/ctype.h> 65 #include <linux/uaccess.h> 66 67 #include <linux/percpu.h> 68 #include <linux/crash_dump.h> 69 70 #include <video/edid.h> 71 72 #include <asm/mtrr.h> 73 #include <asm/apic.h> 74 #include <asm/e820.h> 75 #include <asm/mpspec.h> 76 #include <asm/setup.h> 77 #include <asm/arch_hooks.h> 78 #include <asm/efi.h> 79 #include <asm/sections.h> 80 #include <asm/dmi.h> 81 #include <asm/io_apic.h> 82 #include <asm/ist.h> 83 #include <asm/vmi.h> 84 #include <asm/setup_arch.h> 85 #include <asm/bios_ebda.h> 86 #include <asm/cacheflush.h> 87 #include <asm/processor.h> 88 #include <asm/bugs.h> 89 90 #include <asm/system.h> 91 #include <asm/vsyscall.h> 92 #include <asm/cpu.h> 93 #include <asm/desc.h> 94 #include <asm/dma.h> 95 #include <asm/iommu.h> 96 #include <asm/gart.h> 97 #include <asm/mmu_context.h> 98 #include <asm/proto.h> 99 100 #include <asm/genapic.h> 101 #include <asm/paravirt.h> 102 #include <asm/hypervisor.h> 103 104 #include <asm/percpu.h> 105 #include <asm/topology.h> 106 #include <asm/apicdef.h> 107 #ifdef CONFIG_X86_64 108 #include <asm/numa_64.h> 109 #endif 110 111 #ifndef ARCH_SETUP 112 #define ARCH_SETUP 113 #endif 114 115 #ifndef CONFIG_DEBUG_BOOT_PARAMS 116 struct boot_params __initdata boot_params; 117 #else 118 struct boot_params boot_params; 119 #endif 120 121 /* 122 * Machine setup.. 123 */ 124 static struct resource data_resource = { 125 .name = "Kernel data", 126 .start = 0, 127 .end = 0, 128 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 129 }; 130 131 static struct resource code_resource = { 132 .name = "Kernel code", 133 .start = 0, 134 .end = 0, 135 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 136 }; 137 138 static struct resource bss_resource = { 139 .name = "Kernel bss", 140 .start = 0, 141 .end = 0, 142 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 143 }; 144 145 146 #ifdef CONFIG_X86_32 147 /* This value is set up by the early boot code to point to the value 148 immediately after the boot time page tables. It contains a *physical* 149 address, and must not be in the .bss segment! */ 150 unsigned long init_pg_tables_start __initdata = ~0UL; 151 unsigned long init_pg_tables_end __initdata = ~0UL; 152 153 static struct resource video_ram_resource = { 154 .name = "Video RAM area", 155 .start = 0xa0000, 156 .end = 0xbffff, 157 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 158 }; 159 160 /* cpu data as detected by the assembly code in head.S */ 161 struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 162 /* common cpu data for all cpus */ 163 struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 164 EXPORT_SYMBOL(boot_cpu_data); 165 static void set_mca_bus(int x) 166 { 167 #ifdef CONFIG_MCA 168 MCA_bus = x; 169 #endif 170 } 171 172 unsigned int def_to_bigsmp; 173 174 /* for MCA, but anyone else can use it if they want */ 175 unsigned int machine_id; 176 unsigned int machine_submodel_id; 177 unsigned int BIOS_revision; 178 179 struct apm_info apm_info; 180 EXPORT_SYMBOL(apm_info); 181 182 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ 183 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) 184 struct ist_info ist_info; 185 EXPORT_SYMBOL(ist_info); 186 #else 187 struct ist_info ist_info; 188 #endif 189 190 #else 191 struct cpuinfo_x86 boot_cpu_data __read_mostly; 192 EXPORT_SYMBOL(boot_cpu_data); 193 #endif 194 195 196 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 197 unsigned long mmu_cr4_features; 198 #else 199 unsigned long mmu_cr4_features = X86_CR4_PAE; 200 #endif 201 202 /* Boot loader ID as an integer, for the benefit of proc_dointvec */ 203 int bootloader_type; 204 205 /* 206 * Early DMI memory 207 */ 208 int dmi_alloc_index; 209 char dmi_alloc_data[DMI_MAX_DATA]; 210 211 /* 212 * Setup options 213 */ 214 struct screen_info screen_info; 215 EXPORT_SYMBOL(screen_info); 216 struct edid_info edid_info; 217 EXPORT_SYMBOL_GPL(edid_info); 218 219 extern int root_mountflags; 220 221 unsigned long saved_video_mode; 222 223 #define RAMDISK_IMAGE_START_MASK 0x07FF 224 #define RAMDISK_PROMPT_FLAG 0x8000 225 #define RAMDISK_LOAD_FLAG 0x4000 226 227 static char __initdata command_line[COMMAND_LINE_SIZE]; 228 #ifdef CONFIG_CMDLINE_BOOL 229 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; 230 #endif 231 232 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 233 struct edd edd; 234 #ifdef CONFIG_EDD_MODULE 235 EXPORT_SYMBOL(edd); 236 #endif 237 /** 238 * copy_edd() - Copy the BIOS EDD information 239 * from boot_params into a safe place. 240 * 241 */ 242 static inline void copy_edd(void) 243 { 244 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, 245 sizeof(edd.mbr_signature)); 246 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); 247 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; 248 edd.edd_info_nr = boot_params.eddbuf_entries; 249 } 250 #else 251 static inline void copy_edd(void) 252 { 253 } 254 #endif 255 256 #ifdef CONFIG_BLK_DEV_INITRD 257 258 #ifdef CONFIG_X86_32 259 260 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 261 static void __init relocate_initrd(void) 262 { 263 264 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 265 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 266 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; 267 u64 ramdisk_here; 268 unsigned long slop, clen, mapaddr; 269 char *p, *q; 270 271 /* We need to move the initrd down into lowmem */ 272 ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, 273 PAGE_SIZE); 274 275 if (ramdisk_here == -1ULL) 276 panic("Cannot find place for new RAMDISK of size %lld\n", 277 ramdisk_size); 278 279 /* Note: this includes all the lowmem currently occupied by 280 the initrd, we rely on that fact to keep the data intact. */ 281 reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, 282 "NEW RAMDISK"); 283 initrd_start = ramdisk_here + PAGE_OFFSET; 284 initrd_end = initrd_start + ramdisk_size; 285 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", 286 ramdisk_here, ramdisk_here + ramdisk_size); 287 288 q = (char *)initrd_start; 289 290 /* Copy any lowmem portion of the initrd */ 291 if (ramdisk_image < end_of_lowmem) { 292 clen = end_of_lowmem - ramdisk_image; 293 p = (char *)__va(ramdisk_image); 294 memcpy(q, p, clen); 295 q += clen; 296 ramdisk_image += clen; 297 ramdisk_size -= clen; 298 } 299 300 /* Copy the highmem portion of the initrd */ 301 while (ramdisk_size) { 302 slop = ramdisk_image & ~PAGE_MASK; 303 clen = ramdisk_size; 304 if (clen > MAX_MAP_CHUNK-slop) 305 clen = MAX_MAP_CHUNK-slop; 306 mapaddr = ramdisk_image & PAGE_MASK; 307 p = early_memremap(mapaddr, clen+slop); 308 memcpy(q, p+slop, clen); 309 early_iounmap(p, clen+slop); 310 q += clen; 311 ramdisk_image += clen; 312 ramdisk_size -= clen; 313 } 314 /* high pages is not converted by early_res_to_bootmem */ 315 ramdisk_image = boot_params.hdr.ramdisk_image; 316 ramdisk_size = boot_params.hdr.ramdisk_size; 317 printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to" 318 " %08llx - %08llx\n", 319 ramdisk_image, ramdisk_image + ramdisk_size - 1, 320 ramdisk_here, ramdisk_here + ramdisk_size - 1); 321 } 322 #endif 323 324 static void __init reserve_initrd(void) 325 { 326 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 327 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 328 u64 ramdisk_end = ramdisk_image + ramdisk_size; 329 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; 330 331 if (!boot_params.hdr.type_of_loader || 332 !ramdisk_image || !ramdisk_size) 333 return; /* No initrd provided by bootloader */ 334 335 initrd_start = 0; 336 337 if (ramdisk_size >= (end_of_lowmem>>1)) { 338 free_early(ramdisk_image, ramdisk_end); 339 printk(KERN_ERR "initrd too large to handle, " 340 "disabling initrd\n"); 341 return; 342 } 343 344 printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image, 345 ramdisk_end); 346 347 348 if (ramdisk_end <= end_of_lowmem) { 349 /* All in lowmem, easy case */ 350 /* 351 * don't need to reserve again, already reserved early 352 * in i386_start_kernel 353 */ 354 initrd_start = ramdisk_image + PAGE_OFFSET; 355 initrd_end = initrd_start + ramdisk_size; 356 return; 357 } 358 359 #ifdef CONFIG_X86_32 360 relocate_initrd(); 361 #else 362 printk(KERN_ERR "initrd extends beyond end of memory " 363 "(0x%08llx > 0x%08llx)\ndisabling initrd\n", 364 ramdisk_end, end_of_lowmem); 365 initrd_start = 0; 366 #endif 367 free_early(ramdisk_image, ramdisk_end); 368 } 369 #else 370 static void __init reserve_initrd(void) 371 { 372 } 373 #endif /* CONFIG_BLK_DEV_INITRD */ 374 375 static void __init parse_setup_data(void) 376 { 377 struct setup_data *data; 378 u64 pa_data; 379 380 if (boot_params.hdr.version < 0x0209) 381 return; 382 pa_data = boot_params.hdr.setup_data; 383 while (pa_data) { 384 data = early_memremap(pa_data, PAGE_SIZE); 385 switch (data->type) { 386 case SETUP_E820_EXT: 387 parse_e820_ext(data, pa_data); 388 break; 389 default: 390 break; 391 } 392 pa_data = data->next; 393 early_iounmap(data, PAGE_SIZE); 394 } 395 } 396 397 static void __init e820_reserve_setup_data(void) 398 { 399 struct setup_data *data; 400 u64 pa_data; 401 int found = 0; 402 403 if (boot_params.hdr.version < 0x0209) 404 return; 405 pa_data = boot_params.hdr.setup_data; 406 while (pa_data) { 407 data = early_memremap(pa_data, sizeof(*data)); 408 e820_update_range(pa_data, sizeof(*data)+data->len, 409 E820_RAM, E820_RESERVED_KERN); 410 found = 1; 411 pa_data = data->next; 412 early_iounmap(data, sizeof(*data)); 413 } 414 if (!found) 415 return; 416 417 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 418 memcpy(&e820_saved, &e820, sizeof(struct e820map)); 419 printk(KERN_INFO "extended physical RAM map:\n"); 420 e820_print_map("reserve setup_data"); 421 } 422 423 static void __init reserve_early_setup_data(void) 424 { 425 struct setup_data *data; 426 u64 pa_data; 427 char buf[32]; 428 429 if (boot_params.hdr.version < 0x0209) 430 return; 431 pa_data = boot_params.hdr.setup_data; 432 while (pa_data) { 433 data = early_memremap(pa_data, sizeof(*data)); 434 sprintf(buf, "setup data %x", data->type); 435 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); 436 pa_data = data->next; 437 early_iounmap(data, sizeof(*data)); 438 } 439 } 440 441 /* 442 * --------- Crashkernel reservation ------------------------------ 443 */ 444 445 #ifdef CONFIG_KEXEC 446 447 /** 448 * Reserve @size bytes of crashkernel memory at any suitable offset. 449 * 450 * @size: Size of the crashkernel memory to reserve. 451 * Returns the base address on success, and -1ULL on failure. 452 */ 453 static 454 unsigned long long __init find_and_reserve_crashkernel(unsigned long long size) 455 { 456 const unsigned long long alignment = 16<<20; /* 16M */ 457 unsigned long long start = 0LL; 458 459 while (1) { 460 int ret; 461 462 start = find_e820_area(start, ULONG_MAX, size, alignment); 463 if (start == -1ULL) 464 return start; 465 466 /* try to reserve it */ 467 ret = reserve_bootmem_generic(start, size, BOOTMEM_EXCLUSIVE); 468 if (ret >= 0) 469 return start; 470 471 start += alignment; 472 } 473 } 474 475 static inline unsigned long long get_total_mem(void) 476 { 477 unsigned long long total; 478 479 total = max_low_pfn - min_low_pfn; 480 #ifdef CONFIG_HIGHMEM 481 total += highend_pfn - highstart_pfn; 482 #endif 483 484 return total << PAGE_SHIFT; 485 } 486 487 static void __init reserve_crashkernel(void) 488 { 489 unsigned long long total_mem; 490 unsigned long long crash_size, crash_base; 491 int ret; 492 493 total_mem = get_total_mem(); 494 495 ret = parse_crashkernel(boot_command_line, total_mem, 496 &crash_size, &crash_base); 497 if (ret != 0 || crash_size <= 0) 498 return; 499 500 /* 0 means: find the address automatically */ 501 if (crash_base <= 0) { 502 crash_base = find_and_reserve_crashkernel(crash_size); 503 if (crash_base == -1ULL) { 504 pr_info("crashkernel reservation failed. " 505 "No suitable area found.\n"); 506 return; 507 } 508 } else { 509 ret = reserve_bootmem_generic(crash_base, crash_size, 510 BOOTMEM_EXCLUSIVE); 511 if (ret < 0) { 512 pr_info("crashkernel reservation failed - " 513 "memory is in use\n"); 514 return; 515 } 516 } 517 518 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 519 "for crashkernel (System RAM: %ldMB)\n", 520 (unsigned long)(crash_size >> 20), 521 (unsigned long)(crash_base >> 20), 522 (unsigned long)(total_mem >> 20)); 523 524 crashk_res.start = crash_base; 525 crashk_res.end = crash_base + crash_size - 1; 526 insert_resource(&iomem_resource, &crashk_res); 527 } 528 #else 529 static void __init reserve_crashkernel(void) 530 { 531 } 532 #endif 533 534 static struct resource standard_io_resources[] = { 535 { .name = "dma1", .start = 0x00, .end = 0x1f, 536 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 537 { .name = "pic1", .start = 0x20, .end = 0x21, 538 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 539 { .name = "timer0", .start = 0x40, .end = 0x43, 540 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 541 { .name = "timer1", .start = 0x50, .end = 0x53, 542 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 543 { .name = "keyboard", .start = 0x60, .end = 0x60, 544 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 545 { .name = "keyboard", .start = 0x64, .end = 0x64, 546 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 547 { .name = "dma page reg", .start = 0x80, .end = 0x8f, 548 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 549 { .name = "pic2", .start = 0xa0, .end = 0xa1, 550 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 551 { .name = "dma2", .start = 0xc0, .end = 0xdf, 552 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 553 { .name = "fpu", .start = 0xf0, .end = 0xff, 554 .flags = IORESOURCE_BUSY | IORESOURCE_IO } 555 }; 556 557 static void __init reserve_standard_io_resources(void) 558 { 559 int i; 560 561 /* request I/O space for devices used on all i[345]86 PCs */ 562 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) 563 request_resource(&ioport_resource, &standard_io_resources[i]); 564 565 } 566 567 /* 568 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by 569 * is_kdump_kernel() to determine if we are booting after a panic. Hence 570 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. 571 */ 572 573 #ifdef CONFIG_CRASH_DUMP 574 /* elfcorehdr= specifies the location of elf core header 575 * stored by the crashed kernel. This option will be passed 576 * by kexec loader to the capture kernel. 577 */ 578 static int __init setup_elfcorehdr(char *arg) 579 { 580 char *end; 581 if (!arg) 582 return -EINVAL; 583 elfcorehdr_addr = memparse(arg, &end); 584 return end > arg ? 0 : -EINVAL; 585 } 586 early_param("elfcorehdr", setup_elfcorehdr); 587 #endif 588 589 static int __init default_update_genapic(void) 590 { 591 #ifdef CONFIG_X86_SMP 592 if (!apic->wakeup_cpu) 593 apic->wakeup_cpu = wakeup_secondary_cpu_via_init; 594 #endif 595 596 return 0; 597 } 598 599 static struct x86_quirks default_x86_quirks __initdata = { 600 .update_genapic = default_update_genapic, 601 }; 602 603 struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 604 605 #ifdef CONFIG_X86_RESERVE_LOW_64K 606 static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) 607 { 608 printk(KERN_NOTICE 609 "%s detected: BIOS may corrupt low RAM, working it around.\n", 610 d->ident); 611 612 e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); 613 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 614 615 return 0; 616 } 617 #endif 618 619 /* List of systems that have known low memory corruption BIOS problems */ 620 static struct dmi_system_id __initdata bad_bios_dmi_table[] = { 621 #ifdef CONFIG_X86_RESERVE_LOW_64K 622 { 623 .callback = dmi_low_memory_corruption, 624 .ident = "AMI BIOS", 625 .matches = { 626 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), 627 }, 628 }, 629 { 630 .callback = dmi_low_memory_corruption, 631 .ident = "Phoenix BIOS", 632 .matches = { 633 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), 634 }, 635 }, 636 #endif 637 {} 638 }; 639 640 /* 641 * Determine if we were loaded by an EFI loader. If so, then we have also been 642 * passed the efi memmap, systab, etc., so we should use these data structures 643 * for initialization. Note, the efi init code path is determined by the 644 * global efi_enabled. This allows the same kernel image to be used on existing 645 * systems (with a traditional BIOS) as well as on EFI systems. 646 */ 647 /* 648 * setup_arch - architecture-specific boot-time initializations 649 * 650 * Note: On x86_64, fixmaps are ready for use even before this is called. 651 */ 652 653 void __init setup_arch(char **cmdline_p) 654 { 655 #ifdef CONFIG_X86_32 656 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 657 visws_early_detect(); 658 pre_setup_arch_hook(); 659 #else 660 printk(KERN_INFO "Command line: %s\n", boot_command_line); 661 #endif 662 663 /* VMI may relocate the fixmap; do this before touching ioremap area */ 664 vmi_init(); 665 666 early_cpu_init(); 667 early_ioremap_init(); 668 669 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 670 screen_info = boot_params.screen_info; 671 edid_info = boot_params.edid_info; 672 #ifdef CONFIG_X86_32 673 apm_info.bios = boot_params.apm_bios_info; 674 ist_info = boot_params.ist_info; 675 if (boot_params.sys_desc_table.length != 0) { 676 set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); 677 machine_id = boot_params.sys_desc_table.table[0]; 678 machine_submodel_id = boot_params.sys_desc_table.table[1]; 679 BIOS_revision = boot_params.sys_desc_table.table[2]; 680 } 681 #endif 682 saved_video_mode = boot_params.hdr.vid_mode; 683 bootloader_type = boot_params.hdr.type_of_loader; 684 685 #ifdef CONFIG_BLK_DEV_RAM 686 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; 687 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); 688 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); 689 #endif 690 #ifdef CONFIG_EFI 691 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 692 #ifdef CONFIG_X86_32 693 "EL32", 694 #else 695 "EL64", 696 #endif 697 4)) { 698 efi_enabled = 1; 699 efi_reserve_early(); 700 } 701 #endif 702 703 ARCH_SETUP 704 705 setup_memory_map(); 706 parse_setup_data(); 707 /* update the e820_saved too */ 708 e820_reserve_setup_data(); 709 710 copy_edd(); 711 712 if (!boot_params.hdr.root_flags) 713 root_mountflags &= ~MS_RDONLY; 714 init_mm.start_code = (unsigned long) _text; 715 init_mm.end_code = (unsigned long) _etext; 716 init_mm.end_data = (unsigned long) _edata; 717 #ifdef CONFIG_X86_32 718 init_mm.brk = init_pg_tables_end + PAGE_OFFSET; 719 #else 720 init_mm.brk = (unsigned long) &_end; 721 #endif 722 723 code_resource.start = virt_to_phys(_text); 724 code_resource.end = virt_to_phys(_etext)-1; 725 data_resource.start = virt_to_phys(_etext); 726 data_resource.end = virt_to_phys(_edata)-1; 727 bss_resource.start = virt_to_phys(&__bss_start); 728 bss_resource.end = virt_to_phys(&__bss_stop)-1; 729 730 #ifdef CONFIG_CMDLINE_BOOL 731 #ifdef CONFIG_CMDLINE_OVERRIDE 732 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 733 #else 734 if (builtin_cmdline[0]) { 735 /* append boot loader cmdline to builtin */ 736 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); 737 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); 738 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 739 } 740 #endif 741 #endif 742 743 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 744 *cmdline_p = command_line; 745 746 parse_early_param(); 747 748 #ifdef CONFIG_X86_64 749 check_efer(); 750 #endif 751 752 /* Must be before kernel pagetables are setup */ 753 vmi_activate(); 754 755 /* after early param, so could get panic from serial */ 756 reserve_early_setup_data(); 757 758 if (acpi_mps_check()) { 759 #ifdef CONFIG_X86_LOCAL_APIC 760 disable_apic = 1; 761 #endif 762 setup_clear_cpu_cap(X86_FEATURE_APIC); 763 } 764 765 #ifdef CONFIG_PCI 766 if (pci_early_dump_regs) 767 early_dump_pci_devices(); 768 #endif 769 770 finish_e820_parsing(); 771 772 dmi_scan_machine(); 773 774 dmi_check_system(bad_bios_dmi_table); 775 776 /* 777 * VMware detection requires dmi to be available, so this 778 * needs to be done after dmi_scan_machine, for the BP. 779 */ 780 init_hypervisor(&boot_cpu_data); 781 782 #ifdef CONFIG_X86_32 783 probe_roms(); 784 #endif 785 786 /* after parse_early_param, so could debug it */ 787 insert_resource(&iomem_resource, &code_resource); 788 insert_resource(&iomem_resource, &data_resource); 789 insert_resource(&iomem_resource, &bss_resource); 790 791 if (efi_enabled) 792 efi_init(); 793 794 #ifdef CONFIG_X86_32 795 if (ppro_with_ram_bug()) { 796 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, 797 E820_RESERVED); 798 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 799 printk(KERN_INFO "fixed physical RAM map:\n"); 800 e820_print_map("bad_ppro"); 801 } 802 #else 803 early_gart_iommu_check(); 804 #endif 805 806 /* 807 * partially used pages are not usable - thus 808 * we are rounding upwards: 809 */ 810 max_pfn = e820_end_of_ram_pfn(); 811 812 /* preallocate 4k for mptable mpc */ 813 early_reserve_e820_mpc_new(); 814 /* update e820 for memory not covered by WB MTRRs */ 815 mtrr_bp_init(); 816 if (mtrr_trim_uncached_memory(max_pfn)) 817 max_pfn = e820_end_of_ram_pfn(); 818 819 #ifdef CONFIG_X86_32 820 /* max_low_pfn get updated here */ 821 find_low_pfn_range(); 822 #else 823 num_physpages = max_pfn; 824 825 if (cpu_has_x2apic) 826 check_x2apic(); 827 828 /* How many end-of-memory variables you have, grandma! */ 829 /* need this before calling reserve_initrd */ 830 if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) 831 max_low_pfn = e820_end_of_low_ram_pfn(); 832 else 833 max_low_pfn = max_pfn; 834 835 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 836 #endif 837 838 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION 839 setup_bios_corruption_check(); 840 #endif 841 842 /* max_pfn_mapped is updated here */ 843 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); 844 max_pfn_mapped = max_low_pfn_mapped; 845 846 #ifdef CONFIG_X86_64 847 if (max_pfn > max_low_pfn) { 848 max_pfn_mapped = init_memory_mapping(1UL<<32, 849 max_pfn<<PAGE_SHIFT); 850 /* can we preseve max_low_pfn ?*/ 851 max_low_pfn = max_pfn; 852 } 853 #endif 854 855 /* 856 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 857 */ 858 859 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 860 if (init_ohci1394_dma_early) 861 init_ohci1394_dma_on_all_controllers(); 862 #endif 863 864 reserve_initrd(); 865 866 #ifdef CONFIG_X86_64 867 vsmp_init(); 868 #endif 869 870 io_delay_init(); 871 872 /* 873 * Parse the ACPI tables for possible boot-time SMP configuration. 874 */ 875 acpi_boot_table_init(); 876 877 early_acpi_boot_init(); 878 879 #ifdef CONFIG_ACPI_NUMA 880 /* 881 * Parse SRAT to discover nodes. 882 */ 883 acpi_numa_init(); 884 #endif 885 886 initmem_init(0, max_pfn); 887 888 #ifdef CONFIG_ACPI_SLEEP 889 /* 890 * Reserve low memory region for sleep support. 891 */ 892 acpi_reserve_bootmem(); 893 #endif 894 #ifdef CONFIG_X86_FIND_SMP_CONFIG 895 /* 896 * Find and reserve possible boot-time SMP configuration: 897 */ 898 find_smp_config(); 899 #endif 900 reserve_crashkernel(); 901 902 #ifdef CONFIG_X86_64 903 /* 904 * dma32_reserve_bootmem() allocates bootmem which may conflict 905 * with the crashkernel command line, so do that after 906 * reserve_crashkernel() 907 */ 908 dma32_reserve_bootmem(); 909 #endif 910 911 reserve_ibft_region(); 912 913 #ifdef CONFIG_KVM_CLOCK 914 kvmclock_init(); 915 #endif 916 917 paravirt_pagetable_setup_start(swapper_pg_dir); 918 paging_init(); 919 paravirt_pagetable_setup_done(swapper_pg_dir); 920 paravirt_post_allocator_init(); 921 922 #ifdef CONFIG_X86_64 923 map_vsyscall(); 924 #endif 925 926 #ifdef CONFIG_X86_GENERICARCH 927 generic_apic_probe(); 928 #endif 929 930 early_quirks(); 931 932 /* 933 * Read APIC and some other early information from ACPI tables. 934 */ 935 acpi_boot_init(); 936 937 #if defined(CONFIG_X86_MPPARSE) || defined(CONFIG_X86_VISWS) 938 /* 939 * get boot-time SMP configuration: 940 */ 941 if (smp_found_config) 942 get_smp_config(); 943 #endif 944 945 prefill_possible_map(); 946 947 #ifdef CONFIG_X86_64 948 init_cpu_to_node(); 949 #endif 950 951 init_apic_mappings(); 952 ioapic_init_mappings(); 953 954 /* need to wait for io_apic is mapped */ 955 probe_nr_irqs_gsi(); 956 957 kvm_guest_init(); 958 959 e820_reserve_resources(); 960 e820_mark_nosave_regions(max_low_pfn); 961 962 #ifdef CONFIG_X86_32 963 request_resource(&iomem_resource, &video_ram_resource); 964 #endif 965 reserve_standard_io_resources(); 966 967 e820_setup_gap(); 968 969 #ifdef CONFIG_VT 970 #if defined(CONFIG_VGA_CONSOLE) 971 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 972 conswitchp = &vga_con; 973 #elif defined(CONFIG_DUMMY_CONSOLE) 974 conswitchp = &dummy_con; 975 #endif 976 #endif 977 } 978 979 980