1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2005 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * 25 * This program is free software; you can redistribute it and/or modify it 26 * under the terms of the GNU General Public License as published by the 27 * Free Software Foundation; either version 2 of the License, or (at your 28 * option) any later version. 29 */ 30 31#include <linux/init.h> 32#include <asm/processor.h> 33#include <asm/page.h> 34#include <asm/mmu.h> 35#include <asm/pgtable.h> 36#include <asm/cputable.h> 37#include <asm/thread_info.h> 38#include <asm/ppc_asm.h> 39#include <asm/asm-offsets.h> 40#include <asm/ptrace.h> 41#include <asm/synch.h> 42#include "head_booke.h" 43 44 45/* As with the other PowerPC ports, it is expected that when code 46 * execution begins here, the following registers contain valid, yet 47 * optional, information: 48 * 49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 50 * r4 - Starting address of the init RAM disk 51 * r5 - Ending address of the init RAM disk 52 * r6 - Start of kernel command line string (e.g. "mem=128") 53 * r7 - End of kernel command line string 54 * 55 */ 56 __HEAD 57_ENTRY(_stext); 58_ENTRY(_start); 59 /* 60 * Reserve a word at a fixed location to store the address 61 * of abatron_pteptrs 62 */ 63 nop 64 mr r31,r3 /* save device tree ptr */ 65 li r24,0 /* CPU number */ 66 67#ifdef CONFIG_RELOCATABLE 68/* 69 * Relocate ourselves to the current runtime address. 70 * This is called only by the Boot CPU. 71 * "relocate" is called with our current runtime virutal 72 * address. 73 * r21 will be loaded with the physical runtime address of _stext 74 */ 75 bl 0f /* Get our runtime address */ 760: mflr r21 /* Make it accessible */ 77 addis r21,r21,(_stext - 0b)@ha 78 addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ 79 80 /* 81 * We have the runtime (virutal) address of our base. 82 * We calculate our shift of offset from a 256M page. 83 * We could map the 256M page we belong to at PAGE_OFFSET and 84 * get going from there. 85 */ 86 lis r4,KERNELBASE@h 87 ori r4,r4,KERNELBASE@l 88 rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ 89 rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ 90 subf r3,r5,r6 /* r3 = r6 - r5 */ 91 add r3,r4,r3 /* Required Virutal Address */ 92 93 bl relocate 94#endif 95 96 bl init_cpu_state 97 98 /* 99 * This is where the main kernel code starts. 100 */ 101 102 /* ptr to current */ 103 lis r2,init_task@h 104 ori r2,r2,init_task@l 105 106 /* ptr to current thread */ 107 addi r4,r2,THREAD /* init task's THREAD */ 108 mtspr SPRN_SPRG_THREAD,r4 109 110 /* stack */ 111 lis r1,init_thread_union@h 112 ori r1,r1,init_thread_union@l 113 li r0,0 114 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 115 116 bl early_init 117 118#ifdef CONFIG_RELOCATABLE 119 /* 120 * Relocatable kernel support based on processing of dynamic 121 * relocation entries. 122 * 123 * r25 will contain RPN/ERPN for the start address of memory 124 * r21 will contain the current offset of _stext 125 */ 126 lis r3,kernstart_addr@ha 127 la r3,kernstart_addr@l(r3) 128 129 /* 130 * Compute the kernstart_addr. 131 * kernstart_addr => (r6,r8) 132 * kernstart_addr & ~0xfffffff => (r6,r7) 133 */ 134 rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ 135 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ 136 rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ 137 or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ 138 139 /* Store kernstart_addr */ 140 stw r6,0(r3) /* higher 32bit */ 141 stw r8,4(r3) /* lower 32bit */ 142 143 /* 144 * Compute the virt_phys_offset : 145 * virt_phys_offset = stext.run - kernstart_addr 146 * 147 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) 148 * When we relocate, we have : 149 * 150 * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) 151 * 152 * hence: 153 * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) 154 * 155 */ 156 157 /* KERNELBASE&~0xfffffff => (r4,r5) */ 158 li r4, 0 /* higer 32bit */ 159 lis r5,KERNELBASE@h 160 rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ 161 162 /* 163 * 64bit subtraction. 164 */ 165 subfc r5,r7,r5 166 subfe r4,r6,r4 167 168 /* Store virt_phys_offset */ 169 lis r3,virt_phys_offset@ha 170 la r3,virt_phys_offset@l(r3) 171 172 stw r4,0(r3) 173 stw r5,4(r3) 174 175#elif defined(CONFIG_DYNAMIC_MEMSTART) 176 /* 177 * Mapping based, page aligned dynamic kernel loading. 178 * 179 * r25 will contain RPN/ERPN for the start address of memory 180 * 181 * Add the difference between KERNELBASE and PAGE_OFFSET to the 182 * start of physical memory to get kernstart_addr. 183 */ 184 lis r3,kernstart_addr@ha 185 la r3,kernstart_addr@l(r3) 186 187 lis r4,KERNELBASE@h 188 ori r4,r4,KERNELBASE@l 189 lis r5,PAGE_OFFSET@h 190 ori r5,r5,PAGE_OFFSET@l 191 subf r4,r5,r4 192 193 rlwinm r6,r25,0,28,31 /* ERPN */ 194 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ 195 add r7,r7,r4 196 197 stw r6,0(r3) 198 stw r7,4(r3) 199#endif 200 201/* 202 * Decide what sort of machine this is and initialize the MMU. 203 */ 204 li r3,0 205 mr r4,r31 206 bl machine_init 207 bl MMU_init 208 209 /* Setup PTE pointers for the Abatron bdiGDB */ 210 lis r6, swapper_pg_dir@h 211 ori r6, r6, swapper_pg_dir@l 212 lis r5, abatron_pteptrs@h 213 ori r5, r5, abatron_pteptrs@l 214 lis r4, KERNELBASE@h 215 ori r4, r4, KERNELBASE@l 216 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 217 stw r6, 0(r5) 218 219 /* Clear the Machine Check Syndrome Register */ 220 li r0,0 221 mtspr SPRN_MCSR,r0 222 223 /* Let's move on */ 224 lis r4,start_kernel@h 225 ori r4,r4,start_kernel@l 226 lis r3,MSR_KERNEL@h 227 ori r3,r3,MSR_KERNEL@l 228 mtspr SPRN_SRR0,r4 229 mtspr SPRN_SRR1,r3 230 rfi /* change context and jump to start_kernel */ 231 232/* 233 * Interrupt vector entry code 234 * 235 * The Book E MMUs are always on so we don't need to handle 236 * interrupts in real mode as with previous PPC processors. In 237 * this case we handle interrupts in the kernel virtual address 238 * space. 239 * 240 * Interrupt vectors are dynamically placed relative to the 241 * interrupt prefix as determined by the address of interrupt_base. 242 * The interrupt vectors offsets are programmed using the labels 243 * for each interrupt vector entry. 244 * 245 * Interrupt vectors must be aligned on a 16 byte boundary. 246 * We align on a 32 byte cache line boundary for good measure. 247 */ 248 249interrupt_base: 250 /* Critical Input Interrupt */ 251 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 252 253 /* Machine Check Interrupt */ 254 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 255 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) 256 257 /* Data Storage Interrupt */ 258 DATA_STORAGE_EXCEPTION 259 260 /* Instruction Storage Interrupt */ 261 INSTRUCTION_STORAGE_EXCEPTION 262 263 /* External Input Interrupt */ 264 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 265 266 /* Alignment Interrupt */ 267 ALIGNMENT_EXCEPTION 268 269 /* Program Interrupt */ 270 PROGRAM_EXCEPTION 271 272 /* Floating Point Unavailable Interrupt */ 273#ifdef CONFIG_PPC_FPU 274 FP_UNAVAILABLE_EXCEPTION 275#else 276 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 277#endif 278 /* System Call Interrupt */ 279 START_EXCEPTION(SystemCall) 280 NORMAL_EXCEPTION_PROLOG 281 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 282 283 /* Auxiliary Processor Unavailable Interrupt */ 284 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 285 286 /* Decrementer Interrupt */ 287 DECREMENTER_EXCEPTION 288 289 /* Fixed Internal Timer Interrupt */ 290 /* TODO: Add FIT support */ 291 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 292 293 /* Watchdog Timer Interrupt */ 294 /* TODO: Add watchdog support */ 295#ifdef CONFIG_BOOKE_WDT 296 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 297#else 298 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) 299#endif 300 301 /* Data TLB Error Interrupt */ 302 START_EXCEPTION(DataTLBError44x) 303 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 304 mtspr SPRN_SPRG_WSCRATCH1, r11 305 mtspr SPRN_SPRG_WSCRATCH2, r12 306 mtspr SPRN_SPRG_WSCRATCH3, r13 307 mfcr r11 308 mtspr SPRN_SPRG_WSCRATCH4, r11 309 mfspr r10, SPRN_DEAR /* Get faulting address */ 310 311 /* If we are faulting a kernel address, we have to use the 312 * kernel page tables. 313 */ 314 lis r11, PAGE_OFFSET@h 315 cmplw r10, r11 316 blt+ 3f 317 lis r11, swapper_pg_dir@h 318 ori r11, r11, swapper_pg_dir@l 319 320 mfspr r12,SPRN_MMUCR 321 rlwinm r12,r12,0,0,23 /* Clear TID */ 322 323 b 4f 324 325 /* Get the PGD for the current thread */ 3263: 327 mfspr r11,SPRN_SPRG_THREAD 328 lwz r11,PGDIR(r11) 329 330 /* Load PID into MMUCR TID */ 331 mfspr r12,SPRN_MMUCR 332 mfspr r13,SPRN_PID /* Get PID */ 333 rlwimi r12,r13,0,24,31 /* Set TID */ 334 3354: 336 mtspr SPRN_MMUCR,r12 337 338 /* Mask of required permission bits. Note that while we 339 * do copy ESR:ST to _PAGE_RW position as trying to write 340 * to an RO page is pretty common, we don't do it with 341 * _PAGE_DIRTY. We could do it, but it's a fairly rare 342 * event so I'd rather take the overhead when it happens 343 * rather than adding an instruction here. We should measure 344 * whether the whole thing is worth it in the first place 345 * as we could avoid loading SPRN_ESR completely in the first 346 * place... 347 * 348 * TODO: Is it worth doing that mfspr & rlwimi in the first 349 * place or can we save a couple of instructions here ? 350 */ 351 mfspr r12,SPRN_ESR 352 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 353 rlwimi r13,r12,10,30,30 354 355 /* Load the PTE */ 356 /* Compute pgdir/pmd offset */ 357 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 358 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 359 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 360 beq 2f /* Bail if no table */ 361 362 /* Compute pte address */ 363 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 364 lwz r11, 0(r12) /* Get high word of pte entry */ 365 lwz r12, 4(r12) /* Get low word of pte entry */ 366 367 lis r10,tlb_44x_index@ha 368 369 andc. r13,r13,r12 /* Check permission */ 370 371 /* Load the next available TLB index */ 372 lwz r13,tlb_44x_index@l(r10) 373 374 bne 2f /* Bail if permission mismach */ 375 376 /* Increment, rollover, and store TLB index */ 377 addi r13,r13,1 378 379 /* Compare with watermark (instruction gets patched) */ 380 .globl tlb_44x_patch_hwater_D 381tlb_44x_patch_hwater_D: 382 cmpwi 0,r13,1 /* reserve entries */ 383 ble 5f 384 li r13,0 3855: 386 /* Store the next available TLB index */ 387 stw r13,tlb_44x_index@l(r10) 388 389 /* Re-load the faulting address */ 390 mfspr r10,SPRN_DEAR 391 392 /* Jump to common tlb load */ 393 b finish_tlb_load_44x 394 3952: 396 /* The bailout. Restore registers to pre-exception conditions 397 * and call the heavyweights to help us out. 398 */ 399 mfspr r11, SPRN_SPRG_RSCRATCH4 400 mtcr r11 401 mfspr r13, SPRN_SPRG_RSCRATCH3 402 mfspr r12, SPRN_SPRG_RSCRATCH2 403 mfspr r11, SPRN_SPRG_RSCRATCH1 404 mfspr r10, SPRN_SPRG_RSCRATCH0 405 b DataStorage 406 407 /* Instruction TLB Error Interrupt */ 408 /* 409 * Nearly the same as above, except we get our 410 * information from different registers and bailout 411 * to a different point. 412 */ 413 START_EXCEPTION(InstructionTLBError44x) 414 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 415 mtspr SPRN_SPRG_WSCRATCH1, r11 416 mtspr SPRN_SPRG_WSCRATCH2, r12 417 mtspr SPRN_SPRG_WSCRATCH3, r13 418 mfcr r11 419 mtspr SPRN_SPRG_WSCRATCH4, r11 420 mfspr r10, SPRN_SRR0 /* Get faulting address */ 421 422 /* If we are faulting a kernel address, we have to use the 423 * kernel page tables. 424 */ 425 lis r11, PAGE_OFFSET@h 426 cmplw r10, r11 427 blt+ 3f 428 lis r11, swapper_pg_dir@h 429 ori r11, r11, swapper_pg_dir@l 430 431 mfspr r12,SPRN_MMUCR 432 rlwinm r12,r12,0,0,23 /* Clear TID */ 433 434 b 4f 435 436 /* Get the PGD for the current thread */ 4373: 438 mfspr r11,SPRN_SPRG_THREAD 439 lwz r11,PGDIR(r11) 440 441 /* Load PID into MMUCR TID */ 442 mfspr r12,SPRN_MMUCR 443 mfspr r13,SPRN_PID /* Get PID */ 444 rlwimi r12,r13,0,24,31 /* Set TID */ 445 4464: 447 mtspr SPRN_MMUCR,r12 448 449 /* Make up the required permissions */ 450 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 451 452 /* Compute pgdir/pmd offset */ 453 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 454 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 455 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 456 beq 2f /* Bail if no table */ 457 458 /* Compute pte address */ 459 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 460 lwz r11, 0(r12) /* Get high word of pte entry */ 461 lwz r12, 4(r12) /* Get low word of pte entry */ 462 463 lis r10,tlb_44x_index@ha 464 465 andc. r13,r13,r12 /* Check permission */ 466 467 /* Load the next available TLB index */ 468 lwz r13,tlb_44x_index@l(r10) 469 470 bne 2f /* Bail if permission mismach */ 471 472 /* Increment, rollover, and store TLB index */ 473 addi r13,r13,1 474 475 /* Compare with watermark (instruction gets patched) */ 476 .globl tlb_44x_patch_hwater_I 477tlb_44x_patch_hwater_I: 478 cmpwi 0,r13,1 /* reserve entries */ 479 ble 5f 480 li r13,0 4815: 482 /* Store the next available TLB index */ 483 stw r13,tlb_44x_index@l(r10) 484 485 /* Re-load the faulting address */ 486 mfspr r10,SPRN_SRR0 487 488 /* Jump to common TLB load point */ 489 b finish_tlb_load_44x 490 4912: 492 /* The bailout. Restore registers to pre-exception conditions 493 * and call the heavyweights to help us out. 494 */ 495 mfspr r11, SPRN_SPRG_RSCRATCH4 496 mtcr r11 497 mfspr r13, SPRN_SPRG_RSCRATCH3 498 mfspr r12, SPRN_SPRG_RSCRATCH2 499 mfspr r11, SPRN_SPRG_RSCRATCH1 500 mfspr r10, SPRN_SPRG_RSCRATCH0 501 b InstructionStorage 502 503/* 504 * Both the instruction and data TLB miss get to this 505 * point to load the TLB. 506 * r10 - EA of fault 507 * r11 - PTE high word value 508 * r12 - PTE low word value 509 * r13 - TLB index 510 * MMUCR - loaded with proper value when we get here 511 * Upon exit, we reload everything and RFI. 512 */ 513finish_tlb_load_44x: 514 /* Combine RPN & ERPN an write WS 0 */ 515 rlwimi r11,r12,0,0,31-PAGE_SHIFT 516 tlbwe r11,r13,PPC44x_TLB_XLAT 517 518 /* 519 * Create WS1. This is the faulting address (EPN), 520 * page size, and valid flag. 521 */ 522 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE 523 /* Insert valid and page size */ 524 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 525 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ 526 527 /* And WS 2 */ 528 li r10,0xf85 /* Mask to apply from PTE */ 529 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 530 and r11,r12,r10 /* Mask PTE bits to keep */ 531 andi. r10,r12,_PAGE_USER /* User page ? */ 532 beq 1f /* nope, leave U bits empty */ 533 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 5341: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 535 536 /* Done...restore registers and get out of here. 537 */ 538 mfspr r11, SPRN_SPRG_RSCRATCH4 539 mtcr r11 540 mfspr r13, SPRN_SPRG_RSCRATCH3 541 mfspr r12, SPRN_SPRG_RSCRATCH2 542 mfspr r11, SPRN_SPRG_RSCRATCH1 543 mfspr r10, SPRN_SPRG_RSCRATCH0 544 rfi /* Force context change */ 545 546/* TLB error interrupts for 476 547 */ 548#ifdef CONFIG_PPC_47x 549 START_EXCEPTION(DataTLBError47x) 550 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 551 mtspr SPRN_SPRG_WSCRATCH1,r11 552 mtspr SPRN_SPRG_WSCRATCH2,r12 553 mtspr SPRN_SPRG_WSCRATCH3,r13 554 mfcr r11 555 mtspr SPRN_SPRG_WSCRATCH4,r11 556 mfspr r10,SPRN_DEAR /* Get faulting address */ 557 558 /* If we are faulting a kernel address, we have to use the 559 * kernel page tables. 560 */ 561 lis r11,PAGE_OFFSET@h 562 cmplw cr0,r10,r11 563 blt+ 3f 564 lis r11,swapper_pg_dir@h 565 ori r11,r11, swapper_pg_dir@l 566 li r12,0 /* MMUCR = 0 */ 567 b 4f 568 569 /* Get the PGD for the current thread and setup MMUCR */ 5703: mfspr r11,SPRN_SPRG3 571 lwz r11,PGDIR(r11) 572 mfspr r12,SPRN_PID /* Get PID */ 5734: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 574 575 /* Mask of required permission bits. Note that while we 576 * do copy ESR:ST to _PAGE_RW position as trying to write 577 * to an RO page is pretty common, we don't do it with 578 * _PAGE_DIRTY. We could do it, but it's a fairly rare 579 * event so I'd rather take the overhead when it happens 580 * rather than adding an instruction here. We should measure 581 * whether the whole thing is worth it in the first place 582 * as we could avoid loading SPRN_ESR completely in the first 583 * place... 584 * 585 * TODO: Is it worth doing that mfspr & rlwimi in the first 586 * place or can we save a couple of instructions here ? 587 */ 588 mfspr r12,SPRN_ESR 589 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 590 rlwimi r13,r12,10,30,30 591 592 /* Load the PTE */ 593 /* Compute pgdir/pmd offset */ 594 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 595 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 596 597 /* Word 0 is EPN,V,TS,DSIZ */ 598 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 599 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 600 li r12,0 601 tlbwe r10,r12,0 602 603 /* XXX can we do better ? Need to make sure tlbwe has established 604 * latch V bit in MMUCR0 before the PTE is loaded further down */ 605#ifdef CONFIG_SMP 606 isync 607#endif 608 609 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 610 /* Compute pte address */ 611 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 612 beq 2f /* Bail if no table */ 613 lwz r11,0(r12) /* Get high word of pte entry */ 614 615 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the 616 * bottom of r12 to create a data dependency... We can also use r10 617 * as destination nowadays 618 */ 619#ifdef CONFIG_SMP 620 lwsync 621#endif 622 lwz r12,4(r12) /* Get low word of pte entry */ 623 624 andc. r13,r13,r12 /* Check permission */ 625 626 /* Jump to common tlb load */ 627 beq finish_tlb_load_47x 628 6292: /* The bailout. Restore registers to pre-exception conditions 630 * and call the heavyweights to help us out. 631 */ 632 mfspr r11,SPRN_SPRG_RSCRATCH4 633 mtcr r11 634 mfspr r13,SPRN_SPRG_RSCRATCH3 635 mfspr r12,SPRN_SPRG_RSCRATCH2 636 mfspr r11,SPRN_SPRG_RSCRATCH1 637 mfspr r10,SPRN_SPRG_RSCRATCH0 638 b DataStorage 639 640 /* Instruction TLB Error Interrupt */ 641 /* 642 * Nearly the same as above, except we get our 643 * information from different registers and bailout 644 * to a different point. 645 */ 646 START_EXCEPTION(InstructionTLBError47x) 647 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 648 mtspr SPRN_SPRG_WSCRATCH1,r11 649 mtspr SPRN_SPRG_WSCRATCH2,r12 650 mtspr SPRN_SPRG_WSCRATCH3,r13 651 mfcr r11 652 mtspr SPRN_SPRG_WSCRATCH4,r11 653 mfspr r10,SPRN_SRR0 /* Get faulting address */ 654 655 /* If we are faulting a kernel address, we have to use the 656 * kernel page tables. 657 */ 658 lis r11,PAGE_OFFSET@h 659 cmplw cr0,r10,r11 660 blt+ 3f 661 lis r11,swapper_pg_dir@h 662 ori r11,r11, swapper_pg_dir@l 663 li r12,0 /* MMUCR = 0 */ 664 b 4f 665 666 /* Get the PGD for the current thread and setup MMUCR */ 6673: mfspr r11,SPRN_SPRG_THREAD 668 lwz r11,PGDIR(r11) 669 mfspr r12,SPRN_PID /* Get PID */ 6704: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 671 672 /* Make up the required permissions */ 673 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 674 675 /* Load PTE */ 676 /* Compute pgdir/pmd offset */ 677 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 678 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 679 680 /* Word 0 is EPN,V,TS,DSIZ */ 681 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 682 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 683 li r12,0 684 tlbwe r10,r12,0 685 686 /* XXX can we do better ? Need to make sure tlbwe has established 687 * latch V bit in MMUCR0 before the PTE is loaded further down */ 688#ifdef CONFIG_SMP 689 isync 690#endif 691 692 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 693 /* Compute pte address */ 694 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 695 beq 2f /* Bail if no table */ 696 697 lwz r11,0(r12) /* Get high word of pte entry */ 698 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the 699 * bottom of r12 to create a data dependency... We can also use r10 700 * as destination nowadays 701 */ 702#ifdef CONFIG_SMP 703 lwsync 704#endif 705 lwz r12,4(r12) /* Get low word of pte entry */ 706 707 andc. r13,r13,r12 /* Check permission */ 708 709 /* Jump to common TLB load point */ 710 beq finish_tlb_load_47x 711 7122: /* The bailout. Restore registers to pre-exception conditions 713 * and call the heavyweights to help us out. 714 */ 715 mfspr r11, SPRN_SPRG_RSCRATCH4 716 mtcr r11 717 mfspr r13, SPRN_SPRG_RSCRATCH3 718 mfspr r12, SPRN_SPRG_RSCRATCH2 719 mfspr r11, SPRN_SPRG_RSCRATCH1 720 mfspr r10, SPRN_SPRG_RSCRATCH0 721 b InstructionStorage 722 723/* 724 * Both the instruction and data TLB miss get to this 725 * point to load the TLB. 726 * r10 - free to use 727 * r11 - PTE high word value 728 * r12 - PTE low word value 729 * r13 - free to use 730 * MMUCR - loaded with proper value when we get here 731 * Upon exit, we reload everything and RFI. 732 */ 733finish_tlb_load_47x: 734 /* Combine RPN & ERPN an write WS 1 */ 735 rlwimi r11,r12,0,0,31-PAGE_SHIFT 736 tlbwe r11,r13,1 737 738 /* And make up word 2 */ 739 li r10,0xf85 /* Mask to apply from PTE */ 740 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 741 and r11,r12,r10 /* Mask PTE bits to keep */ 742 andi. r10,r12,_PAGE_USER /* User page ? */ 743 beq 1f /* nope, leave U bits empty */ 744 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 7451: tlbwe r11,r13,2 746 747 /* Done...restore registers and get out of here. 748 */ 749 mfspr r11, SPRN_SPRG_RSCRATCH4 750 mtcr r11 751 mfspr r13, SPRN_SPRG_RSCRATCH3 752 mfspr r12, SPRN_SPRG_RSCRATCH2 753 mfspr r11, SPRN_SPRG_RSCRATCH1 754 mfspr r10, SPRN_SPRG_RSCRATCH0 755 rfi 756 757#endif /* CONFIG_PPC_47x */ 758 759 /* Debug Interrupt */ 760 /* 761 * This statement needs to exist at the end of the IVPR 762 * definition just in case you end up taking a debug 763 * exception within another exception. 764 */ 765 DEBUG_CRIT_EXCEPTION 766 767/* 768 * Global functions 769 */ 770 771/* 772 * Adjust the machine check IVOR on 440A cores 773 */ 774_GLOBAL(__fixup_440A_mcheck) 775 li r3,MachineCheckA@l 776 mtspr SPRN_IVOR1,r3 777 sync 778 blr 779 780/* 781 * extern void giveup_altivec(struct task_struct *prev) 782 * 783 * The 44x core does not have an AltiVec unit. 784 */ 785_GLOBAL(giveup_altivec) 786 blr 787 788/* 789 * extern void giveup_fpu(struct task_struct *prev) 790 * 791 * The 44x core does not have an FPU. 792 */ 793#ifndef CONFIG_PPC_FPU 794_GLOBAL(giveup_fpu) 795 blr 796#endif 797 798_GLOBAL(set_context) 799 800#ifdef CONFIG_BDI_SWITCH 801 /* Context switch the PTE pointer for the Abatron BDI2000. 802 * The PGDIR is the second parameter. 803 */ 804 lis r5, abatron_pteptrs@h 805 ori r5, r5, abatron_pteptrs@l 806 stw r4, 0x4(r5) 807#endif 808 mtspr SPRN_PID,r3 809 isync /* Force context change */ 810 blr 811 812/* 813 * Init CPU state. This is called at boot time or for secondary CPUs 814 * to setup initial TLB entries, setup IVORs, etc... 815 * 816 */ 817_GLOBAL(init_cpu_state) 818 mflr r22 819#ifdef CONFIG_PPC_47x 820 /* We use the PVR to differenciate 44x cores from 476 */ 821 mfspr r3,SPRN_PVR 822 srwi r3,r3,16 823 cmplwi cr0,r3,PVR_476FPE@h 824 beq head_start_47x 825 cmplwi cr0,r3,PVR_476@h 826 beq head_start_47x 827 cmplwi cr0,r3,PVR_476_ISS@h 828 beq head_start_47x 829#endif /* CONFIG_PPC_47x */ 830 831/* 832 * In case the firmware didn't do it, we apply some workarounds 833 * that are good for all 440 core variants here 834 */ 835 mfspr r3,SPRN_CCR0 836 rlwinm r3,r3,0,0,27 /* disable icache prefetch */ 837 isync 838 mtspr SPRN_CCR0,r3 839 isync 840 sync 841 842/* 843 * Set up the initial MMU state for 44x 844 * 845 * We are still executing code at the virtual address 846 * mappings set by the firmware for the base of RAM. 847 * 848 * We first invalidate all TLB entries but the one 849 * we are running from. We then load the KERNELBASE 850 * mappings so we can begin to use kernel addresses 851 * natively and so the interrupt vector locations are 852 * permanently pinned (necessary since Book E 853 * implementations always have translation enabled). 854 * 855 * TODO: Use the known TLB entry we are running from to 856 * determine which physical region we are located 857 * in. This can be used to determine where in RAM 858 * (on a shared CPU system) or PCI memory space 859 * (on a DRAMless system) we are located. 860 * For now, we assume a perfect world which means 861 * we are located at the base of DRAM (physical 0). 862 */ 863 864/* 865 * Search TLB for entry that we are currently using. 866 * Invalidate all entries but the one we are using. 867 */ 868 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 869 mfspr r3,SPRN_PID /* Get PID */ 870 mfmsr r4 /* Get MSR */ 871 andi. r4,r4,MSR_IS@l /* TS=1? */ 872 beq wmmucr /* If not, leave STS=0 */ 873 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ 874wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 875 sync 876 877 bl invstr /* Find our address */ 878invstr: mflr r5 /* Make it accessible */ 879 tlbsx r23,0,r5 /* Find entry we are in */ 880 li r4,0 /* Start at TLB entry 0 */ 881 li r3,0 /* Set PAGEID inval value */ 8821: cmpw r23,r4 /* Is this our entry? */ 883 beq skpinv /* If so, skip the inval */ 884 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 885skpinv: addi r4,r4,1 /* Increment */ 886 cmpwi r4,64 /* Are we done? */ 887 bne 1b /* If not, repeat */ 888 isync /* If so, context change */ 889 890/* 891 * Configure and load pinned entry into TLB slot 63. 892 */ 893#ifdef CONFIG_NONSTATIC_KERNEL 894 /* 895 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT 896 * entries of the initial mapping set by the boot loader. 897 * The XLAT entry is stored in r25 898 */ 899 900 /* Read the XLAT entry for our current mapping */ 901 tlbre r25,r23,PPC44x_TLB_XLAT 902 903 lis r3,KERNELBASE@h 904 ori r3,r3,KERNELBASE@l 905 906 /* Use our current RPN entry */ 907 mr r4,r25 908#else 909 910 lis r3,PAGE_OFFSET@h 911 ori r3,r3,PAGE_OFFSET@l 912 913 /* Kernel is at the base of RAM */ 914 li r4, 0 /* Load the kernel physical address */ 915#endif 916 917 /* Load the kernel PID = 0 */ 918 li r0,0 919 mtspr SPRN_PID,r0 920 sync 921 922 /* Initialize MMUCR */ 923 li r5,0 924 mtspr SPRN_MMUCR,r5 925 sync 926 927 /* pageid fields */ 928 clrrwi r3,r3,10 /* Mask off the effective page number */ 929 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M 930 931 /* xlat fields */ 932 clrrwi r4,r4,10 /* Mask off the real page number */ 933 /* ERPN is 0 for first 4GB page */ 934 935 /* attrib fields */ 936 /* Added guarded bit to protect against speculative loads/stores */ 937 li r5,0 938 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 939 940 li r0,63 /* TLB slot 63 */ 941 942 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ 943 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ 944 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ 945 946 /* Force context change */ 947 mfmsr r0 948 mtspr SPRN_SRR1, r0 949 lis r0,3f@h 950 ori r0,r0,3f@l 951 mtspr SPRN_SRR0,r0 952 sync 953 rfi 954 955 /* If necessary, invalidate original entry we used */ 9563: cmpwi r23,63 957 beq 4f 958 li r6,0 959 tlbwe r6,r23,PPC44x_TLB_PAGEID 960 isync 961 9624: 963#ifdef CONFIG_PPC_EARLY_DEBUG_44x 964 /* Add UART mapping for early debug. */ 965 966 /* pageid fields */ 967 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 968 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K 969 970 /* xlat fields */ 971 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 972 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 973 974 /* attrib fields */ 975 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) 976 li r0,62 /* TLB slot 0 */ 977 978 tlbwe r3,r0,PPC44x_TLB_PAGEID 979 tlbwe r4,r0,PPC44x_TLB_XLAT 980 tlbwe r5,r0,PPC44x_TLB_ATTRIB 981 982 /* Force context change */ 983 isync 984#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 985 986 /* Establish the interrupt vector offsets */ 987 SET_IVOR(0, CriticalInput); 988 SET_IVOR(1, MachineCheck); 989 SET_IVOR(2, DataStorage); 990 SET_IVOR(3, InstructionStorage); 991 SET_IVOR(4, ExternalInput); 992 SET_IVOR(5, Alignment); 993 SET_IVOR(6, Program); 994 SET_IVOR(7, FloatingPointUnavailable); 995 SET_IVOR(8, SystemCall); 996 SET_IVOR(9, AuxillaryProcessorUnavailable); 997 SET_IVOR(10, Decrementer); 998 SET_IVOR(11, FixedIntervalTimer); 999 SET_IVOR(12, WatchdogTimer); 1000 SET_IVOR(13, DataTLBError44x); 1001 SET_IVOR(14, InstructionTLBError44x); 1002 SET_IVOR(15, DebugCrit); 1003 1004 b head_start_common 1005 1006 1007#ifdef CONFIG_PPC_47x 1008 1009#ifdef CONFIG_SMP 1010 1011/* Entry point for secondary 47x processors */ 1012_GLOBAL(start_secondary_47x) 1013 mr r24,r3 /* CPU number */ 1014 1015 bl init_cpu_state 1016 1017 /* Now we need to bolt the rest of kernel memory which 1018 * is done in C code. We must be careful because our task 1019 * struct or our stack can (and will probably) be out 1020 * of reach of the initial 256M TLB entry, so we use a 1021 * small temporary stack in .bss for that. This works 1022 * because only one CPU at a time can be in this code 1023 */ 1024 lis r1,temp_boot_stack@h 1025 ori r1,r1,temp_boot_stack@l 1026 addi r1,r1,1024-STACK_FRAME_OVERHEAD 1027 li r0,0 1028 stw r0,0(r1) 1029 bl mmu_init_secondary 1030 1031 /* Now we can get our task struct and real stack pointer */ 1032 1033 /* Get current_thread_info and current */ 1034 lis r1,secondary_ti@ha 1035 lwz r1,secondary_ti@l(r1) 1036 lwz r2,TI_TASK(r1) 1037 1038 /* Current stack pointer */ 1039 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1040 li r0,0 1041 stw r0,0(r1) 1042 1043 /* Kernel stack for exception entry in SPRG3 */ 1044 addi r4,r2,THREAD /* init task's THREAD */ 1045 mtspr SPRN_SPRG3,r4 1046 1047 b start_secondary 1048 1049#endif /* CONFIG_SMP */ 1050 1051/* 1052 * Set up the initial MMU state for 44x 1053 * 1054 * We are still executing code at the virtual address 1055 * mappings set by the firmware for the base of RAM. 1056 */ 1057 1058head_start_47x: 1059 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 1060 mfspr r3,SPRN_PID /* Get PID */ 1061 mfmsr r4 /* Get MSR */ 1062 andi. r4,r4,MSR_IS@l /* TS=1? */ 1063 beq 1f /* If not, leave STS=0 */ 1064 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ 10651: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 1066 sync 1067 1068 /* Find the entry we are running from */ 1069 bl 1f 10701: mflr r23 1071 tlbsx r23,0,r23 1072 tlbre r24,r23,0 1073 tlbre r25,r23,1 1074 tlbre r26,r23,2 1075 1076/* 1077 * Cleanup time 1078 */ 1079 1080 /* Initialize MMUCR */ 1081 li r5,0 1082 mtspr SPRN_MMUCR,r5 1083 sync 1084 1085clear_all_utlb_entries: 1086 1087 #; Set initial values. 1088 1089 addis r3,0,0x8000 1090 addi r4,0,0 1091 addi r5,0,0 1092 b clear_utlb_entry 1093 1094 #; Align the loop to speed things up. 1095 1096 .align 6 1097 1098clear_utlb_entry: 1099 1100 tlbwe r4,r3,0 1101 tlbwe r5,r3,1 1102 tlbwe r5,r3,2 1103 addis r3,r3,0x2000 1104 cmpwi r3,0 1105 bne clear_utlb_entry 1106 addis r3,0,0x8000 1107 addis r4,r4,0x100 1108 cmpwi r4,0 1109 bne clear_utlb_entry 1110 1111 #; Restore original entry. 1112 1113 oris r23,r23,0x8000 /* specify the way */ 1114 tlbwe r24,r23,0 1115 tlbwe r25,r23,1 1116 tlbwe r26,r23,2 1117 1118/* 1119 * Configure and load pinned entry into TLB for the kernel core 1120 */ 1121 1122 lis r3,PAGE_OFFSET@h 1123 ori r3,r3,PAGE_OFFSET@l 1124 1125 /* Load the kernel PID = 0 */ 1126 li r0,0 1127 mtspr SPRN_PID,r0 1128 sync 1129 1130 /* Word 0 */ 1131 clrrwi r3,r3,12 /* Mask off the effective page number */ 1132 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M 1133 1134 /* Word 1 - use r25. RPN is the same as the original entry */ 1135 1136 /* Word 2 */ 1137 li r5,0 1138 ori r5,r5,PPC47x_TLB2_S_RWX 1139#ifdef CONFIG_SMP 1140 ori r5,r5,PPC47x_TLB2_M 1141#endif 1142 1143 /* We write to way 0 and bolted 0 */ 1144 lis r0,0x8800 1145 tlbwe r3,r0,0 1146 tlbwe r25,r0,1 1147 tlbwe r5,r0,2 1148 1149/* 1150 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix 1151 * them up later 1152 */ 1153 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) 1154 mtspr SPRN_SSPCR,r3 1155 mtspr SPRN_USPCR,r3 1156 LOAD_REG_IMMEDIATE(r3, 0x12345670) 1157 mtspr SPRN_ISPCR,r3 1158 1159 /* Force context change */ 1160 mfmsr r0 1161 mtspr SPRN_SRR1, r0 1162 lis r0,3f@h 1163 ori r0,r0,3f@l 1164 mtspr SPRN_SRR0,r0 1165 sync 1166 rfi 1167 1168 /* Invalidate original entry we used */ 11693: 1170 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ 1171 tlbwe r24,r23,0 1172 addi r24,0,0 1173 tlbwe r24,r23,1 1174 tlbwe r24,r23,2 1175 isync /* Clear out the shadow TLB entries */ 1176 1177#ifdef CONFIG_PPC_EARLY_DEBUG_44x 1178 /* Add UART mapping for early debug. */ 1179 1180 /* Word 0 */ 1181 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 1182 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M 1183 1184 /* Word 1 */ 1185 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 1186 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 1187 1188 /* Word 2 */ 1189 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) 1190 1191 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same 1192 * congruence class as the kernel, we need to make sure of it at 1193 * some point 1194 */ 1195 lis r0,0x8d00 1196 tlbwe r3,r0,0 1197 tlbwe r4,r0,1 1198 tlbwe r5,r0,2 1199 1200 /* Force context change */ 1201 isync 1202#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 1203 1204 /* Establish the interrupt vector offsets */ 1205 SET_IVOR(0, CriticalInput); 1206 SET_IVOR(1, MachineCheckA); 1207 SET_IVOR(2, DataStorage); 1208 SET_IVOR(3, InstructionStorage); 1209 SET_IVOR(4, ExternalInput); 1210 SET_IVOR(5, Alignment); 1211 SET_IVOR(6, Program); 1212 SET_IVOR(7, FloatingPointUnavailable); 1213 SET_IVOR(8, SystemCall); 1214 SET_IVOR(9, AuxillaryProcessorUnavailable); 1215 SET_IVOR(10, Decrementer); 1216 SET_IVOR(11, FixedIntervalTimer); 1217 SET_IVOR(12, WatchdogTimer); 1218 SET_IVOR(13, DataTLBError47x); 1219 SET_IVOR(14, InstructionTLBError47x); 1220 SET_IVOR(15, DebugCrit); 1221 1222 /* We configure icbi to invalidate 128 bytes at a time since the 1223 * current 32-bit kernel code isn't too happy with icache != dcache 1224 * block size 1225 */ 1226 mfspr r3,SPRN_CCR0 1227 oris r3,r3,0x0020 1228 mtspr SPRN_CCR0,r3 1229 isync 1230 1231#endif /* CONFIG_PPC_47x */ 1232 1233/* 1234 * Here we are back to code that is common between 44x and 47x 1235 * 1236 * We proceed to further kernel initialization and return to the 1237 * main kernel entry 1238 */ 1239head_start_common: 1240 /* Establish the interrupt vector base */ 1241 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1242 mtspr SPRN_IVPR,r4 1243 1244 /* 1245 * If the kernel was loaded at a non-zero 256 MB page, we need to 1246 * mask off the most significant 4 bits to get the relative address 1247 * from the start of physical memory 1248 */ 1249 rlwinm r22,r22,0,4,31 1250 addis r22,r22,PAGE_OFFSET@h 1251 mtlr r22 1252 isync 1253 blr 1254 1255/* 1256 * We put a few things here that have to be page-aligned. This stuff 1257 * goes at the beginning of the data segment, which is page-aligned. 1258 */ 1259 .data 1260 .align PAGE_SHIFT 1261 .globl sdata 1262sdata: 1263 .globl empty_zero_page 1264empty_zero_page: 1265 .space PAGE_SIZE 1266 1267/* 1268 * To support >32-bit physical addresses, we use an 8KB pgdir. 1269 */ 1270 .globl swapper_pg_dir 1271swapper_pg_dir: 1272 .space PGD_TABLE_SIZE 1273 1274/* 1275 * Room for two PTE pointers, usually the kernel and current user pointers 1276 * to their respective root page table. 1277 */ 1278abatron_pteptrs: 1279 .space 8 1280 1281#ifdef CONFIG_SMP 1282 .align 12 1283temp_boot_stack: 1284 .space 1024 1285#endif /* CONFIG_SMP */ 1286