1/* tsb.S: Sparc64 TSB table handling. 2 * 3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net> 4 */ 5 6 7#include <asm/tsb.h> 8#include <asm/hypervisor.h> 9#include <asm/page.h> 10#include <asm/cpudata.h> 11#include <asm/mmu.h> 12 13 .text 14 .align 32 15 16 /* Invoked from TLB miss handler, we are in the 17 * MMU global registers and they are setup like 18 * this: 19 * 20 * %g1: TSB entry pointer 21 * %g2: available temporary 22 * %g3: FAULT_CODE_{D,I}TLB 23 * %g4: available temporary 24 * %g5: available temporary 25 * %g6: TAG TARGET 26 * %g7: available temporary, will be loaded by us with 27 * the physical address base of the linux page 28 * tables for the current address space 29 */ 30tsb_miss_dtlb: 31 mov TLB_TAG_ACCESS, %g4 32 ldxa [%g4] ASI_DMMU, %g4 33 srlx %g4, PAGE_SHIFT, %g4 34 ba,pt %xcc, tsb_miss_page_table_walk 35 sllx %g4, PAGE_SHIFT, %g4 36 37tsb_miss_itlb: 38 mov TLB_TAG_ACCESS, %g4 39 ldxa [%g4] ASI_IMMU, %g4 40 srlx %g4, PAGE_SHIFT, %g4 41 ba,pt %xcc, tsb_miss_page_table_walk 42 sllx %g4, PAGE_SHIFT, %g4 43 44 /* At this point we have: 45 * %g1 -- PAGE_SIZE TSB entry address 46 * %g3 -- FAULT_CODE_{D,I}TLB 47 * %g4 -- missing virtual address 48 * %g6 -- TAG TARGET (vaddr >> 22) 49 */ 50tsb_miss_page_table_walk: 51 TRAP_LOAD_TRAP_BLOCK(%g7, %g5) 52 53 /* Before committing to a full page table walk, 54 * check the huge page TSB. 55 */ 56#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 57 58661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 59 nop 60 .section .sun4v_2insn_patch, "ax" 61 .word 661b 62 mov SCRATCHPAD_UTSBREG2, %g5 63 ldxa [%g5] ASI_SCRATCHPAD, %g5 64 .previous 65 66 cmp %g5, -1 67 be,pt %xcc, 80f 68 nop 69 70 /* We need an aligned pair of registers containing 2 values 71 * which can be easily rematerialized. %g6 and %g7 foot the 72 * bill just nicely. We'll save %g6 away into %g2 for the 73 * huge page TSB TAG comparison. 74 * 75 * Perform a huge page TSB lookup. 76 */ 77 mov %g6, %g2 78 and %g5, 0x7, %g6 79 mov 512, %g7 80 andn %g5, 0x7, %g5 81 sllx %g7, %g6, %g7 82 srlx %g4, REAL_HPAGE_SHIFT, %g6 83 sub %g7, 1, %g7 84 and %g6, %g7, %g6 85 sllx %g6, 4, %g6 86 add %g5, %g6, %g5 87 88 TSB_LOAD_QUAD(%g5, %g6) 89 cmp %g6, %g2 90 be,a,pt %xcc, tsb_tlb_reload 91 mov %g7, %g5 92 93 /* No match, remember the huge page TSB entry address, 94 * and restore %g6 and %g7. 95 */ 96 TRAP_LOAD_TRAP_BLOCK(%g7, %g6) 97 srlx %g4, 22, %g6 9880: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP] 99 100#endif 101 102 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7 103 104 /* At this point we have: 105 * %g1 -- TSB entry address 106 * %g3 -- FAULT_CODE_{D,I}TLB 107 * %g4 -- missing virtual address 108 * %g6 -- TAG TARGET (vaddr >> 22) 109 * %g7 -- page table physical address 110 * 111 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE 112 * TSB both lack a matching entry. 113 */ 114tsb_miss_page_table_walk_sun4v_fastpath: 115 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) 116 117 /* Valid PTE is now in %g5. */ 118 119#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 120661: sethi %uhi(_PAGE_SZALL_4U), %g7 121 sllx %g7, 32, %g7 122 .section .sun4v_2insn_patch, "ax" 123 .word 661b 124 mov _PAGE_SZALL_4V, %g7 125 nop 126 .previous 127 128 and %g5, %g7, %g2 129 130661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 131 sllx %g7, 32, %g7 132 .section .sun4v_2insn_patch, "ax" 133 .word 661b 134 mov _PAGE_SZHUGE_4V, %g7 135 nop 136 .previous 137 138 cmp %g2, %g7 139 bne,pt %xcc, 60f 140 nop 141 142 /* It is a huge page, use huge page TSB entry address we 143 * calculated above. If the huge page TSB has not been 144 * allocated, setup a trap stack and call hugetlb_setup() 145 * to do so, then return from the trap to replay the TLB 146 * miss. 147 * 148 * This is necessary to handle the case of transparent huge 149 * pages where we don't really have a non-atomic context 150 * in which to allocate the hugepage TSB hash table. When 151 * the 'mm' faults in the hugepage for the first time, we 152 * thus handle it here. This also makes sure that we can 153 * allocate the TSB hash table on the correct NUMA node. 154 */ 155 TRAP_LOAD_TRAP_BLOCK(%g7, %g2) 156 ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 157 cmp %g1, -1 158 bne,pt %xcc, 60f 159 nop 160 161661: rdpr %pstate, %g5 162 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 163 .section .sun4v_2insn_patch, "ax" 164 .word 661b 165 SET_GL(1) 166 nop 167 .previous 168 169 rdpr %tl, %g7 170 cmp %g7, 1 171 bne,pn %xcc, winfix_trampoline 172 mov %g3, %g4 173 ba,pt %xcc, etrap 174 rd %pc, %g7 175 call hugetlb_setup 176 add %sp, PTREGS_OFF, %o0 177 ba,pt %xcc, rtrap 178 nop 179 18060: 181#endif 182 183 /* At this point we have: 184 * %g1 -- TSB entry address 185 * %g3 -- FAULT_CODE_{D,I}TLB 186 * %g5 -- valid PTE 187 * %g6 -- TAG TARGET (vaddr >> 22) 188 */ 189tsb_reload: 190 TSB_LOCK_TAG(%g1, %g2, %g7) 191 TSB_WRITE(%g1, %g5, %g6) 192 193 /* Finally, load TLB and return from trap. */ 194tsb_tlb_reload: 195 cmp %g3, FAULT_CODE_DTLB 196 bne,pn %xcc, tsb_itlb_load 197 nop 198 199tsb_dtlb_load: 200 201661: stxa %g5, [%g0] ASI_DTLB_DATA_IN 202 retry 203 .section .sun4v_2insn_patch, "ax" 204 .word 661b 205 nop 206 nop 207 .previous 208 209 /* For sun4v the ASI_DTLB_DATA_IN store and the retry 210 * instruction get nop'd out and we get here to branch 211 * to the sun4v tlb load code. The registers are setup 212 * as follows: 213 * 214 * %g4: vaddr 215 * %g5: PTE 216 * %g6: TAG 217 * 218 * The sun4v TLB load wants the PTE in %g3 so we fix that 219 * up here. 220 */ 221 ba,pt %xcc, sun4v_dtlb_load 222 mov %g5, %g3 223 224tsb_itlb_load: 225 /* Executable bit must be set. */ 226661: sethi %hi(_PAGE_EXEC_4U), %g4 227 andcc %g5, %g4, %g0 228 .section .sun4v_2insn_patch, "ax" 229 .word 661b 230 andcc %g5, _PAGE_EXEC_4V, %g0 231 nop 232 .previous 233 234 be,pn %xcc, tsb_do_fault 235 nop 236 237661: stxa %g5, [%g0] ASI_ITLB_DATA_IN 238 retry 239 .section .sun4v_2insn_patch, "ax" 240 .word 661b 241 nop 242 nop 243 .previous 244 245 /* For sun4v the ASI_ITLB_DATA_IN store and the retry 246 * instruction get nop'd out and we get here to branch 247 * to the sun4v tlb load code. The registers are setup 248 * as follows: 249 * 250 * %g4: vaddr 251 * %g5: PTE 252 * %g6: TAG 253 * 254 * The sun4v TLB load wants the PTE in %g3 so we fix that 255 * up here. 256 */ 257 ba,pt %xcc, sun4v_itlb_load 258 mov %g5, %g3 259 260 /* No valid entry in the page tables, do full fault 261 * processing. 262 */ 263 264 .globl tsb_do_fault 265tsb_do_fault: 266 cmp %g3, FAULT_CODE_DTLB 267 268661: rdpr %pstate, %g5 269 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 270 .section .sun4v_2insn_patch, "ax" 271 .word 661b 272 SET_GL(1) 273 ldxa [%g0] ASI_SCRATCHPAD, %g4 274 .previous 275 276 bne,pn %xcc, tsb_do_itlb_fault 277 nop 278 279tsb_do_dtlb_fault: 280 rdpr %tl, %g3 281 cmp %g3, 1 282 283661: mov TLB_TAG_ACCESS, %g4 284 ldxa [%g4] ASI_DMMU, %g5 285 .section .sun4v_2insn_patch, "ax" 286 .word 661b 287 ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 288 nop 289 .previous 290 291 /* Clear context ID bits. */ 292 srlx %g5, PAGE_SHIFT, %g5 293 sllx %g5, PAGE_SHIFT, %g5 294 295 be,pt %xcc, sparc64_realfault_common 296 mov FAULT_CODE_DTLB, %g4 297 ba,pt %xcc, winfix_trampoline 298 nop 299 300tsb_do_itlb_fault: 301 rdpr %tpc, %g5 302 ba,pt %xcc, sparc64_realfault_common 303 mov FAULT_CODE_ITLB, %g4 304 305 .globl sparc64_realfault_common 306sparc64_realfault_common: 307 /* fault code in %g4, fault address in %g5, etrap will 308 * preserve these two values in %l4 and %l5 respectively 309 */ 310 ba,pt %xcc, etrap ! Save trap state 3111: rd %pc, %g7 ! ... 312 stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code 313 stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address 314 call do_sparc64_fault ! Call fault handler 315 add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg 316 ba,pt %xcc, rtrap ! Restore cpu state 317 nop ! Delay slot (fill me) 318 319winfix_trampoline: 320 rdpr %tpc, %g3 ! Prepare winfixup TNPC 321 or %g3, 0x7c, %g3 ! Compute branch offset 322 wrpr %g3, %tnpc ! Write it into TNPC 323 done ! Trap return 324 325 /* Insert an entry into the TSB. 326 * 327 * %o0: TSB entry pointer (virt or phys address) 328 * %o1: tag 329 * %o2: pte 330 */ 331 .align 32 332 .globl __tsb_insert 333__tsb_insert: 334 rdpr %pstate, %o5 335 wrpr %o5, PSTATE_IE, %pstate 336 TSB_LOCK_TAG(%o0, %g2, %g3) 337 TSB_WRITE(%o0, %o2, %o1) 338 wrpr %o5, %pstate 339 retl 340 nop 341 .size __tsb_insert, .-__tsb_insert 342 343 /* Flush the given TSB entry if it has the matching 344 * tag. 345 * 346 * %o0: TSB entry pointer (virt or phys address) 347 * %o1: tag 348 */ 349 .align 32 350 .globl tsb_flush 351 .type tsb_flush,#function 352tsb_flush: 353 sethi %hi(TSB_TAG_LOCK_HIGH), %g2 3541: TSB_LOAD_TAG(%o0, %g1) 355 srlx %g1, 32, %o3 356 andcc %o3, %g2, %g0 357 bne,pn %icc, 1b 358 nop 359 cmp %g1, %o1 360 mov 1, %o3 361 bne,pt %xcc, 2f 362 sllx %o3, TSB_TAG_INVALID_BIT, %o3 363 TSB_CAS_TAG(%o0, %g1, %o3) 364 cmp %g1, %o3 365 bne,pn %xcc, 1b 366 nop 3672: retl 368 nop 369 .size tsb_flush, .-tsb_flush 370 371 /* Reload MMU related context switch state at 372 * schedule() time. 373 * 374 * %o0: page table physical address 375 * %o1: TSB base config pointer 376 * %o2: TSB huge config pointer, or NULL if none 377 * %o3: Hypervisor TSB descriptor physical address 378 * 379 * We have to run this whole thing with interrupts 380 * disabled so that the current cpu doesn't change 381 * due to preemption. 382 */ 383 .align 32 384 .globl __tsb_context_switch 385 .type __tsb_context_switch,#function 386__tsb_context_switch: 387 rdpr %pstate, %g1 388 wrpr %g1, PSTATE_IE, %pstate 389 390 TRAP_LOAD_TRAP_BLOCK(%g2, %g3) 391 392 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] 393 394 ldx [%o1 + TSB_CONFIG_REG_VAL], %o0 395 brz,pt %o2, 1f 396 mov -1, %g3 397 398 ldx [%o2 + TSB_CONFIG_REG_VAL], %g3 399 4001: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE] 401 402 sethi %hi(tlb_type), %g2 403 lduw [%g2 + %lo(tlb_type)], %g2 404 cmp %g2, 3 405 bne,pt %icc, 50f 406 nop 407 408 /* Hypervisor TSB switch. */ 409 mov SCRATCHPAD_UTSBREG1, %o5 410 stxa %o0, [%o5] ASI_SCRATCHPAD 411 mov SCRATCHPAD_UTSBREG2, %o5 412 stxa %g3, [%o5] ASI_SCRATCHPAD 413 414 mov 2, %o0 415 cmp %g3, -1 416 move %xcc, 1, %o0 417 418 mov HV_FAST_MMU_TSB_CTXNON0, %o5 419 mov %o3, %o1 420 ta HV_FAST_TRAP 421 422 /* Finish up. */ 423 ba,pt %xcc, 9f 424 nop 425 426 /* SUN4U TSB switch. */ 42750: mov TSB_REG, %o5 428 stxa %o0, [%o5] ASI_DMMU 429 membar #Sync 430 stxa %o0, [%o5] ASI_IMMU 431 membar #Sync 432 4332: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4 434 brz %o4, 9f 435 ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5 436 437 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 438 mov TLB_TAG_ACCESS, %g3 439 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 440 stxa %o4, [%g3] ASI_DMMU 441 membar #Sync 442 sllx %g2, 3, %g2 443 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS 444 membar #Sync 445 446 brz,pt %o2, 9f 447 nop 448 449 ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4 450 ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5 451 mov TLB_TAG_ACCESS, %g3 452 stxa %o4, [%g3] ASI_DMMU 453 membar #Sync 454 sub %g2, (1 << 3), %g2 455 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS 456 membar #Sync 457 4589: 459 wrpr %g1, %pstate 460 461 retl 462 nop 463 .size __tsb_context_switch, .-__tsb_context_switch 464 465#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ 466 (1 << TSB_TAG_INVALID_BIT)) 467 468 .align 32 469 .globl copy_tsb 470 .type copy_tsb,#function 471copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 472 * %o2=new_tsb_base, %o3=new_tsb_size 473 */ 474 sethi %uhi(TSB_PASS_BITS), %g7 475 srlx %o3, 4, %o3 476 add %o0, %o1, %g1 /* end of old tsb */ 477 sllx %g7, 32, %g7 478 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 479 480661: prefetcha [%o0] ASI_N, #one_read 481 .section .tsb_phys_patch, "ax" 482 .word 661b 483 prefetcha [%o0] ASI_PHYS_USE_EC, #one_read 484 .previous 485 48690: andcc %o0, (64 - 1), %g0 487 bne 1f 488 add %o0, 64, %o5 489 490661: prefetcha [%o5] ASI_N, #one_read 491 .section .tsb_phys_patch, "ax" 492 .word 661b 493 prefetcha [%o5] ASI_PHYS_USE_EC, #one_read 494 .previous 495 4961: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ 497 andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ 498 bne,pn %xcc, 80f /* Skip it */ 499 sllx %g2, 22, %o4 /* TAG --> VADDR */ 500 501 /* This can definitely be computed faster... */ 502 srlx %o0, 4, %o5 /* Build index */ 503 and %o5, 511, %o5 /* Mask index */ 504 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 505 or %o4, %o5, %o4 /* Full VADDR. */ 506 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 507 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 508 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 509 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ 510 add %o4, 0x8, %o4 /* Advance to TTE */ 511 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 512 51380: add %o0, 16, %o0 514 cmp %o0, %g1 515 bne,pt %xcc, 90b 516 nop 517 518 retl 519 nop 520 .size copy_tsb, .-copy_tsb 521 522 /* Set the invalid bit in all TSB entries. */ 523 .align 32 524 .globl tsb_init 525 .type tsb_init,#function 526tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ 527 prefetch [%o0 + 0x000], #n_writes 528 mov 1, %g1 529 prefetch [%o0 + 0x040], #n_writes 530 sllx %g1, TSB_TAG_INVALID_BIT, %g1 531 prefetch [%o0 + 0x080], #n_writes 5321: prefetch [%o0 + 0x0c0], #n_writes 533 stx %g1, [%o0 + 0x00] 534 stx %g1, [%o0 + 0x10] 535 stx %g1, [%o0 + 0x20] 536 stx %g1, [%o0 + 0x30] 537 prefetch [%o0 + 0x100], #n_writes 538 stx %g1, [%o0 + 0x40] 539 stx %g1, [%o0 + 0x50] 540 stx %g1, [%o0 + 0x60] 541 stx %g1, [%o0 + 0x70] 542 prefetch [%o0 + 0x140], #n_writes 543 stx %g1, [%o0 + 0x80] 544 stx %g1, [%o0 + 0x90] 545 stx %g1, [%o0 + 0xa0] 546 stx %g1, [%o0 + 0xb0] 547 prefetch [%o0 + 0x180], #n_writes 548 stx %g1, [%o0 + 0xc0] 549 stx %g1, [%o0 + 0xd0] 550 stx %g1, [%o0 + 0xe0] 551 stx %g1, [%o0 + 0xf0] 552 subcc %o1, 0x100, %o1 553 bne,pt %xcc, 1b 554 add %o0, 0x100, %o0 555 retl 556 nop 557 nop 558 nop 559 .size tsb_init, .-tsb_init 560 561 .globl NGtsb_init 562 .type NGtsb_init,#function 563NGtsb_init: 564 rd %asi, %g2 565 mov 1, %g1 566 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi 567 sllx %g1, TSB_TAG_INVALID_BIT, %g1 5681: stxa %g1, [%o0 + 0x00] %asi 569 stxa %g1, [%o0 + 0x10] %asi 570 stxa %g1, [%o0 + 0x20] %asi 571 stxa %g1, [%o0 + 0x30] %asi 572 stxa %g1, [%o0 + 0x40] %asi 573 stxa %g1, [%o0 + 0x50] %asi 574 stxa %g1, [%o0 + 0x60] %asi 575 stxa %g1, [%o0 + 0x70] %asi 576 stxa %g1, [%o0 + 0x80] %asi 577 stxa %g1, [%o0 + 0x90] %asi 578 stxa %g1, [%o0 + 0xa0] %asi 579 stxa %g1, [%o0 + 0xb0] %asi 580 stxa %g1, [%o0 + 0xc0] %asi 581 stxa %g1, [%o0 + 0xd0] %asi 582 stxa %g1, [%o0 + 0xe0] %asi 583 stxa %g1, [%o0 + 0xf0] %asi 584 subcc %o1, 0x100, %o1 585 bne,pt %xcc, 1b 586 add %o0, 0x100, %o0 587 membar #Sync 588 retl 589 wr %g2, 0x0, %asi 590 .size NGtsb_init, .-NGtsb_init 591