1/* 2 * 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * 5 * Enhanced CPU detection and feature setting code by Mike Jagdis 6 * and Martin Mares, November 1997. 7 */ 8 9.text 10#include <linux/threads.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/segment.h> 14#include <asm/page_types.h> 15#include <asm/pgtable_types.h> 16#include <asm/cache.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/setup.h> 20#include <asm/processor-flags.h> 21#include <asm/msr-index.h> 22#include <asm/cpufeatures.h> 23#include <asm/percpu.h> 24#include <asm/nops.h> 25#include <asm/bootparam.h> 26#include <asm/export.h> 27 28/* Physical address */ 29#define pa(X) ((X) - __PAGE_OFFSET) 30 31/* 32 * References to members of the new_cpu_data structure. 33 */ 34 35#define X86 new_cpu_data+CPUINFO_x86 36#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 37#define X86_MODEL new_cpu_data+CPUINFO_x86_model 38#define X86_MASK new_cpu_data+CPUINFO_x86_mask 39#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 40#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 41#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 42#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 43 44/* 45 * This is how much memory in addition to the memory covered up to 46 * and including _end we need mapped initially. 47 * We need: 48 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 49 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 50 * 51 * Modulo rounding, each megabyte assigned here requires a kilobyte of 52 * memory, which is currently unreclaimed. 53 * 54 * This should be a multiple of a page. 55 * 56 * KERNEL_IMAGE_SIZE should be greater than pa(_end) 57 * and small than max_low_pfn, otherwise will waste some page table entries 58 */ 59 60#if PTRS_PER_PMD > 1 61#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 62#else 63#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 64#endif 65 66#define SIZEOF_PTREGS 17*4 67 68/* 69 * Number of possible pages in the lowmem region. 70 * 71 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a 72 * gas warning about overflowing shift count when gas has been compiled 73 * with only a host target support using a 32-bit type for internal 74 * representation. 75 */ 76LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT) 77 78/* Enough space to fit pagetables for the low memory linear map */ 79MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT 80 81/* 82 * Worst-case size of the kernel mapping we need to make: 83 * a relocatable kernel can live anywhere in lowmem, so we need to be able 84 * to map all of lowmem. 85 */ 86KERNEL_PAGES = LOWMEM_PAGES 87 88INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 89RESERVE_BRK(pagetables, INIT_MAP_SIZE) 90 91/* 92 * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 93 * %esi points to the real-mode code as a 32-bit pointer. 94 * CS and DS must be 4 GB flat segments, but we don't depend on 95 * any particular GDT layout, because we load our own as soon as we 96 * can. 97 */ 98__HEAD 99ENTRY(startup_32) 100 movl pa(initial_stack),%ecx 101 102 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 103 us to not reload segments */ 104 testb $KEEP_SEGMENTS, BP_loadflags(%esi) 105 jnz 2f 106 107/* 108 * Set segments to known values. 109 */ 110 lgdt pa(boot_gdt_descr) 111 movl $(__BOOT_DS),%eax 112 movl %eax,%ds 113 movl %eax,%es 114 movl %eax,%fs 115 movl %eax,%gs 116 movl %eax,%ss 1172: 118 leal -__PAGE_OFFSET(%ecx),%esp 119 120/* 121 * Clear BSS first so that there are no surprises... 122 */ 123 cld 124 xorl %eax,%eax 125 movl $pa(__bss_start),%edi 126 movl $pa(__bss_stop),%ecx 127 subl %edi,%ecx 128 shrl $2,%ecx 129 rep ; stosl 130/* 131 * Copy bootup parameters out of the way. 132 * Note: %esi still has the pointer to the real-mode data. 133 * With the kexec as boot loader, parameter segment might be loaded beyond 134 * kernel image and might not even be addressable by early boot page tables. 135 * (kexec on panic case). Hence copy out the parameters before initializing 136 * page tables. 137 */ 138 movl $pa(boot_params),%edi 139 movl $(PARAM_SIZE/4),%ecx 140 cld 141 rep 142 movsl 143 movl pa(boot_params) + NEW_CL_POINTER,%esi 144 andl %esi,%esi 145 jz 1f # No command line 146 movl $pa(boot_command_line),%edi 147 movl $(COMMAND_LINE_SIZE/4),%ecx 148 rep 149 movsl 1501: 151 152#ifdef CONFIG_OLPC 153 /* save OFW's pgdir table for later use when calling into OFW */ 154 movl %cr3, %eax 155 movl %eax, pa(olpc_ofw_pgd) 156#endif 157 158#ifdef CONFIG_MICROCODE 159 /* Early load ucode on BSP. */ 160 call load_ucode_bsp 161#endif 162 163/* 164 * Initialize page tables. This creates a PDE and a set of page 165 * tables, which are located immediately beyond __brk_base. The variable 166 * _brk_end is set up to point to the first "safe" location. 167 * Mappings are created both at virtual address 0 (identity mapping) 168 * and PAGE_OFFSET for up to _end. 169 */ 170#ifdef CONFIG_X86_PAE 171 172 /* 173 * In PAE mode initial_page_table is statically defined to contain 174 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 175 * entries). The identity mapping is handled by pointing two PGD entries 176 * to the first kernel PMD. 177 * 178 * Note the upper half of each PMD or PTE are always zero at this stage. 179 */ 180 181#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 182 183 xorl %ebx,%ebx /* %ebx is kept at zero */ 184 185 movl $pa(__brk_base), %edi 186 movl $pa(initial_pg_pmd), %edx 187 movl $PTE_IDENT_ATTR, %eax 18810: 189 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 190 movl %ecx,(%edx) /* Store PMD entry */ 191 /* Upper half already zero */ 192 addl $8,%edx 193 movl $512,%ecx 19411: 195 stosl 196 xchgl %eax,%ebx 197 stosl 198 xchgl %eax,%ebx 199 addl $0x1000,%eax 200 loop 11b 201 202 /* 203 * End condition: we must map up to the end + MAPPING_BEYOND_END. 204 */ 205 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 206 cmpl %ebp,%eax 207 jb 10b 2081: 209 addl $__PAGE_OFFSET, %edi 210 movl %edi, pa(_brk_end) 211 shrl $12, %eax 212 movl %eax, pa(max_pfn_mapped) 213 214 /* Do early initialization of the fixmap area */ 215 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 216 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 217#else /* Not PAE */ 218 219page_pde_offset = (__PAGE_OFFSET >> 20); 220 221 movl $pa(__brk_base), %edi 222 movl $pa(initial_page_table), %edx 223 movl $PTE_IDENT_ATTR, %eax 22410: 225 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 226 movl %ecx,(%edx) /* Store identity PDE entry */ 227 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 228 addl $4,%edx 229 movl $1024, %ecx 23011: 231 stosl 232 addl $0x1000,%eax 233 loop 11b 234 /* 235 * End condition: we must map up to the end + MAPPING_BEYOND_END. 236 */ 237 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 238 cmpl %ebp,%eax 239 jb 10b 240 addl $__PAGE_OFFSET, %edi 241 movl %edi, pa(_brk_end) 242 shrl $12, %eax 243 movl %eax, pa(max_pfn_mapped) 244 245 /* Do early initialization of the fixmap area */ 246 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 247 movl %eax,pa(initial_page_table+0xffc) 248#endif 249 250#ifdef CONFIG_PARAVIRT 251 /* This is can only trip for a broken bootloader... */ 252 cmpw $0x207, pa(boot_params + BP_version) 253 jb .Ldefault_entry 254 255 /* Paravirt-compatible boot parameters. Look to see what architecture 256 we're booting under. */ 257 movl pa(boot_params + BP_hardware_subarch), %eax 258 cmpl $num_subarch_entries, %eax 259 jae .Lbad_subarch 260 261 movl pa(subarch_entries)(,%eax,4), %eax 262 subl $__PAGE_OFFSET, %eax 263 jmp *%eax 264 265.Lbad_subarch: 266WEAK(lguest_entry) 267WEAK(xen_entry) 268 /* Unknown implementation; there's really 269 nothing we can do at this point. */ 270 ud2a 271 272 __INITDATA 273 274subarch_entries: 275 .long .Ldefault_entry /* normal x86/PC */ 276 .long lguest_entry /* lguest hypervisor */ 277 .long xen_entry /* Xen hypervisor */ 278 .long .Ldefault_entry /* Moorestown MID */ 279num_subarch_entries = (. - subarch_entries) / 4 280.previous 281#else 282 jmp .Ldefault_entry 283#endif /* CONFIG_PARAVIRT */ 284 285#ifdef CONFIG_HOTPLUG_CPU 286/* 287 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 288 * up already except stack. We just set up stack here. Then call 289 * start_secondary(). 290 */ 291ENTRY(start_cpu0) 292 movl initial_stack, %ecx 293 movl %ecx, %esp 294 call *(initial_code) 2951: jmp 1b 296ENDPROC(start_cpu0) 297#endif 298 299/* 300 * Non-boot CPU entry point; entered from trampoline.S 301 * We can't lgdt here, because lgdt itself uses a data segment, but 302 * we know the trampoline has already loaded the boot_gdt for us. 303 * 304 * If cpu hotplug is not supported then this code can go in init section 305 * which will be freed later 306 */ 307ENTRY(startup_32_smp) 308 cld 309 movl $(__BOOT_DS),%eax 310 movl %eax,%ds 311 movl %eax,%es 312 movl %eax,%fs 313 movl %eax,%gs 314 movl pa(initial_stack),%ecx 315 movl %eax,%ss 316 leal -__PAGE_OFFSET(%ecx),%esp 317 318#ifdef CONFIG_MICROCODE 319 /* Early load ucode on AP. */ 320 call load_ucode_ap 321#endif 322 323.Ldefault_entry: 324#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 325 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 326 X86_CR0_PG) 327 movl $(CR0_STATE & ~X86_CR0_PG),%eax 328 movl %eax,%cr0 329 330/* 331 * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 332 * bits like NT set. This would confuse the debugger if this code is traced. So 333 * initialize them properly now before switching to protected mode. That means 334 * DF in particular (even though we have cleared it earlier after copying the 335 * command line) because GCC expects it. 336 */ 337 pushl $0 338 popfl 339 340/* 341 * New page tables may be in 4Mbyte page mode and may be using the global pages. 342 * 343 * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 344 * if and only if CPUID exists and has flags other than the FPU flag set. 345 */ 346 movl $-1,pa(X86_CPUID) # preset CPUID level 347 movl $X86_EFLAGS_ID,%ecx 348 pushl %ecx 349 popfl # set EFLAGS=ID 350 pushfl 351 popl %eax # get EFLAGS 352 testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 353 jz .Lenable_paging # hw disallowed setting of ID bit 354 # which means no CPUID and no CR4 355 356 xorl %eax,%eax 357 cpuid 358 movl %eax,pa(X86_CPUID) # save largest std CPUID function 359 360 movl $1,%eax 361 cpuid 362 andl $~1,%edx # Ignore CPUID.FPU 363 jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 364 365 movl pa(mmu_cr4_features),%eax 366 movl %eax,%cr4 367 368 testb $X86_CR4_PAE, %al # check if PAE is enabled 369 jz .Lenable_paging 370 371 /* Check if extended functions are implemented */ 372 movl $0x80000000, %eax 373 cpuid 374 /* Value must be in the range 0x80000001 to 0x8000ffff */ 375 subl $0x80000001, %eax 376 cmpl $(0x8000ffff-0x80000001), %eax 377 ja .Lenable_paging 378 379 /* Clear bogus XD_DISABLE bits */ 380 call verify_cpu 381 382 mov $0x80000001, %eax 383 cpuid 384 /* Execute Disable bit supported? */ 385 btl $(X86_FEATURE_NX & 31), %edx 386 jnc .Lenable_paging 387 388 /* Setup EFER (Extended Feature Enable Register) */ 389 movl $MSR_EFER, %ecx 390 rdmsr 391 392 btsl $_EFER_NX, %eax 393 /* Make changes effective */ 394 wrmsr 395 396.Lenable_paging: 397 398/* 399 * Enable paging 400 */ 401 movl $pa(initial_page_table), %eax 402 movl %eax,%cr3 /* set the page table pointer.. */ 403 movl $CR0_STATE,%eax 404 movl %eax,%cr0 /* ..and set paging (PG) bit */ 405 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 4061: 407 /* Shift the stack pointer to a virtual address */ 408 addl $__PAGE_OFFSET, %esp 409 410/* 411 * start system 32-bit setup. We need to re-do some of the things done 412 * in 16-bit mode for the "real" operations. 413 */ 414 movl setup_once_ref,%eax 415 andl %eax,%eax 416 jz 1f # Did we do this already? 417 call *%eax 4181: 419 420/* 421 * Check if it is 486 422 */ 423 movb $4,X86 # at least 486 424 cmpl $-1,X86_CPUID 425 je .Lis486 426 427 /* get vendor info */ 428 xorl %eax,%eax # call CPUID with 0 -> return vendor ID 429 cpuid 430 movl %eax,X86_CPUID # save CPUID level 431 movl %ebx,X86_VENDOR_ID # lo 4 chars 432 movl %edx,X86_VENDOR_ID+4 # next 4 chars 433 movl %ecx,X86_VENDOR_ID+8 # last 4 chars 434 435 orl %eax,%eax # do we have processor info as well? 436 je .Lis486 437 438 movl $1,%eax # Use the CPUID instruction to get CPU type 439 cpuid 440 movb %al,%cl # save reg for future use 441 andb $0x0f,%ah # mask processor family 442 movb %ah,X86 443 andb $0xf0,%al # mask model 444 shrb $4,%al 445 movb %al,X86_MODEL 446 andb $0x0f,%cl # mask mask revision 447 movb %cl,X86_MASK 448 movl %edx,X86_CAPABILITY 449 450.Lis486: 451 movl $0x50022,%ecx # set AM, WP, NE and MP 452 movl %cr0,%eax 453 andl $0x80000011,%eax # Save PG,PE,ET 454 orl %ecx,%eax 455 movl %eax,%cr0 456 457 lgdt early_gdt_descr 458 lidt idt_descr 459 ljmp $(__KERNEL_CS),$1f 4601: movl $(__KERNEL_DS),%eax # reload all the segment registers 461 movl %eax,%ss # after changing gdt. 462 463 movl $(__USER_DS),%eax # DS/ES contains default USER segment 464 movl %eax,%ds 465 movl %eax,%es 466 467 movl $(__KERNEL_PERCPU), %eax 468 movl %eax,%fs # set this cpu's percpu 469 470 movl $(__KERNEL_STACK_CANARY),%eax 471 movl %eax,%gs 472 473 xorl %eax,%eax # Clear LDT 474 lldt %ax 475 476 call *(initial_code) 4771: jmp 1b 478ENDPROC(startup_32_smp) 479 480#include "verify_cpu.S" 481 482/* 483 * setup_once 484 * 485 * The setup work we only want to run on the BSP. 486 * 487 * Warning: %esi is live across this function. 488 */ 489__INIT 490setup_once: 491 /* 492 * Set up a idt with 256 interrupt gates that push zero if there 493 * is no error code and then jump to early_idt_handler_common. 494 * It doesn't actually load the idt - that needs to be done on 495 * each CPU. Interrupts are enabled elsewhere, when we can be 496 * relatively sure everything is ok. 497 */ 498 499 movl $idt_table,%edi 500 movl $early_idt_handler_array,%eax 501 movl $NUM_EXCEPTION_VECTORS,%ecx 5021: 503 movl %eax,(%edi) 504 movl %eax,4(%edi) 505 /* interrupt gate, dpl=0, present */ 506 movl $(0x8E000000 + __KERNEL_CS),2(%edi) 507 addl $EARLY_IDT_HANDLER_SIZE,%eax 508 addl $8,%edi 509 loop 1b 510 511 movl $256 - NUM_EXCEPTION_VECTORS,%ecx 512 movl $ignore_int,%edx 513 movl $(__KERNEL_CS << 16),%eax 514 movw %dx,%ax /* selector = 0x0010 = cs */ 515 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 5162: 517 movl %eax,(%edi) 518 movl %edx,4(%edi) 519 addl $8,%edi 520 loop 2b 521 522#ifdef CONFIG_CC_STACKPROTECTOR 523 /* 524 * Configure the stack canary. The linker can't handle this by 525 * relocation. Manually set base address in stack canary 526 * segment descriptor. 527 */ 528 movl $gdt_page,%eax 529 movl $stack_canary,%ecx 530 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 531 shrl $16, %ecx 532 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 533 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 534#endif 535 536 andl $0,setup_once_ref /* Once is enough, thanks */ 537 ret 538 539ENTRY(early_idt_handler_array) 540 # 36(%esp) %eflags 541 # 32(%esp) %cs 542 # 28(%esp) %eip 543 # 24(%rsp) error code 544 i = 0 545 .rept NUM_EXCEPTION_VECTORS 546 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 547 pushl $0 # Dummy error code, to make stack frame uniform 548 .endif 549 pushl $i # 20(%esp) Vector number 550 jmp early_idt_handler_common 551 i = i + 1 552 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 553 .endr 554ENDPROC(early_idt_handler_array) 555 556early_idt_handler_common: 557 /* 558 * The stack is the hardware frame, an error code or zero, and the 559 * vector number. 560 */ 561 cld 562 563 incl %ss:early_recursion_flag 564 565 /* The vector number is in pt_regs->gs */ 566 567 cld 568 pushl %fs /* pt_regs->fs */ 569 movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 570 pushl %es /* pt_regs->es */ 571 movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 572 pushl %ds /* pt_regs->ds */ 573 movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 574 pushl %eax /* pt_regs->ax */ 575 pushl %ebp /* pt_regs->bp */ 576 pushl %edi /* pt_regs->di */ 577 pushl %esi /* pt_regs->si */ 578 pushl %edx /* pt_regs->dx */ 579 pushl %ecx /* pt_regs->cx */ 580 pushl %ebx /* pt_regs->bx */ 581 582 /* Fix up DS and ES */ 583 movl $(__KERNEL_DS), %ecx 584 movl %ecx, %ds 585 movl %ecx, %es 586 587 /* Load the vector number into EDX */ 588 movl PT_GS(%esp), %edx 589 590 /* Load GS into pt_regs->gs and clear high bits */ 591 movw %gs, PT_GS(%esp) 592 movw $0, PT_GS+2(%esp) 593 594 movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ 595 call early_fixup_exception 596 597 popl %ebx /* pt_regs->bx */ 598 popl %ecx /* pt_regs->cx */ 599 popl %edx /* pt_regs->dx */ 600 popl %esi /* pt_regs->si */ 601 popl %edi /* pt_regs->di */ 602 popl %ebp /* pt_regs->bp */ 603 popl %eax /* pt_regs->ax */ 604 popl %ds /* pt_regs->ds */ 605 popl %es /* pt_regs->es */ 606 popl %fs /* pt_regs->fs */ 607 popl %gs /* pt_regs->gs */ 608 decl %ss:early_recursion_flag 609 addl $4, %esp /* pop pt_regs->orig_ax */ 610 iret 611ENDPROC(early_idt_handler_common) 612 613/* This is the default interrupt "handler" :-) */ 614 ALIGN 615ignore_int: 616 cld 617#ifdef CONFIG_PRINTK 618 pushl %eax 619 pushl %ecx 620 pushl %edx 621 pushl %es 622 pushl %ds 623 movl $(__KERNEL_DS),%eax 624 movl %eax,%ds 625 movl %eax,%es 626 cmpl $2,early_recursion_flag 627 je hlt_loop 628 incl early_recursion_flag 629 pushl 16(%esp) 630 pushl 24(%esp) 631 pushl 32(%esp) 632 pushl 40(%esp) 633 pushl $int_msg 634 call printk 635 636 call dump_stack 637 638 addl $(5*4),%esp 639 popl %ds 640 popl %es 641 popl %edx 642 popl %ecx 643 popl %eax 644#endif 645 iret 646 647hlt_loop: 648 hlt 649 jmp hlt_loop 650ENDPROC(ignore_int) 651__INITDATA 652 .align 4 653GLOBAL(early_recursion_flag) 654 .long 0 655 656__REFDATA 657 .align 4 658ENTRY(initial_code) 659 .long i386_start_kernel 660ENTRY(setup_once_ref) 661 .long setup_once 662 663/* 664 * BSS section 665 */ 666__PAGE_ALIGNED_BSS 667 .align PAGE_SIZE 668#ifdef CONFIG_X86_PAE 669initial_pg_pmd: 670 .fill 1024*KPMDS,4,0 671#else 672.globl initial_page_table 673initial_page_table: 674 .fill 1024,4,0 675#endif 676initial_pg_fixmap: 677 .fill 1024,4,0 678.globl empty_zero_page 679empty_zero_page: 680 .fill 4096,1,0 681.globl swapper_pg_dir 682swapper_pg_dir: 683 .fill 1024,4,0 684EXPORT_SYMBOL(empty_zero_page) 685 686/* 687 * This starts the data section. 688 */ 689#ifdef CONFIG_X86_PAE 690__PAGE_ALIGNED_DATA 691 /* Page-aligned for the benefit of paravirt? */ 692 .align PAGE_SIZE 693ENTRY(initial_page_table) 694 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 695# if KPMDS == 3 696 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 697 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 698 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 699# elif KPMDS == 2 700 .long 0,0 701 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 702 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 703# elif KPMDS == 1 704 .long 0,0 705 .long 0,0 706 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 707# else 708# error "Kernel PMDs should be 1, 2 or 3" 709# endif 710 .align PAGE_SIZE /* needs to be page-sized too */ 711#endif 712 713.data 714.balign 4 715ENTRY(initial_stack) 716 /* 717 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 718 * unwinder reliably detect the end of the stack. 719 */ 720 .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \ 721 TOP_OF_KERNEL_STACK_PADDING; 722 723__INITRODATA 724int_msg: 725 .asciz "Unknown interrupt or fault at: %p %p %p\n" 726 727#include "../../x86/xen/xen-head.S" 728 729/* 730 * The IDT and GDT 'descriptors' are a strange 48-bit object 731 * only used by the lidt and lgdt instructions. They are not 732 * like usual segment descriptors - they consist of a 16-bit 733 * segment size, and 32-bit linear address value: 734 */ 735 736 .data 737.globl boot_gdt_descr 738.globl idt_descr 739 740 ALIGN 741# early boot GDT descriptor (must use 1:1 address mapping) 742 .word 0 # 32 bit align gdt_desc.address 743boot_gdt_descr: 744 .word __BOOT_DS+7 745 .long boot_gdt - __PAGE_OFFSET 746 747 .word 0 # 32-bit align idt_desc.address 748idt_descr: 749 .word IDT_ENTRIES*8-1 # idt contains 256 entries 750 .long idt_table 751 752# boot GDT descriptor (later on used by CPU#0): 753 .word 0 # 32 bit align gdt_desc.address 754ENTRY(early_gdt_descr) 755 .word GDT_ENTRIES*8-1 756 .long gdt_page /* Overwritten for secondary CPUs */ 757 758/* 759 * The boot_gdt must mirror the equivalent in setup.S and is 760 * used only for booting. 761 */ 762 .align L1_CACHE_BYTES 763ENTRY(boot_gdt) 764 .fill GDT_ENTRY_BOOT_CS,8,0 765 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 766 .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 767