1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Assembly code support for the Olympus-C module 26 */ 27 28#pragma ident "%Z%%M% %I% %E% SMI" 29 30#if !defined(lint) 31#include "assym.h" 32#endif /* lint */ 33 34#include <sys/asm_linkage.h> 35#include <sys/mmu.h> 36#include <vm/hat_sfmmu.h> 37#include <sys/machparam.h> 38#include <sys/machcpuvar.h> 39#include <sys/machthread.h> 40#include <sys/machtrap.h> 41#include <sys/privregs.h> 42#include <sys/asm_linkage.h> 43#include <sys/trap.h> 44#include <sys/opl_olympus_regs.h> 45#include <sys/opl_module.h> 46#include <sys/xc_impl.h> 47#include <sys/intreg.h> 48#include <sys/async.h> 49#include <sys/clock.h> 50#include <sys/cmpregs.h> 51 52#ifdef TRAPTRACE 53#include <sys/traptrace.h> 54#endif /* TRAPTRACE */ 55 56/* 57 * Macro that flushes the entire Ecache. 58 * 59 * arg1 = ecache size 60 * arg2 = ecache linesize 61 * arg3 = ecache flush address - Not used for olympus-C 62 */ 63#define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1) \ 64 mov ASI_L2_CTRL_U2_FLUSH, arg1; \ 65 mov ASI_L2_CTRL_RW_ADDR, arg2; \ 66 stxa arg1, [arg2]ASI_L2_CTRL 67 68/* 69 * SPARC64-VI MMU and Cache operations. 70 */ 71 72#if defined(lint) 73 74/* ARGSUSED */ 75void 76vtag_flushpage(caddr_t vaddr, uint64_t sfmmup) 77{} 78 79#else /* lint */ 80 81 ENTRY_NP(vtag_flushpage) 82 /* 83 * flush page from the tlb 84 * 85 * %o0 = vaddr 86 * %o1 = sfmmup 87 */ 88 rdpr %pstate, %o5 89#ifdef DEBUG 90 PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1) 91#endif /* DEBUG */ 92 /* 93 * disable ints 94 */ 95 andn %o5, PSTATE_IE, %o4 96 wrpr %o4, 0, %pstate 97 98 /* 99 * Then, blow out the tlb 100 * Interrupts are disabled to prevent the primary ctx register 101 * from changing underneath us. 102 */ 103 sethi %hi(ksfmmup), %o3 104 ldx [%o3 + %lo(ksfmmup)], %o3 105 cmp %o3, %o1 106 bne,pt %xcc, 1f ! if not kernel as, go to 1 107 sethi %hi(FLUSH_ADDR), %o3 108 /* 109 * For Kernel demaps use primary. type = page implicitly 110 */ 111 stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */ 112 stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */ 113 flush %o3 114 retl 115 wrpr %g0, %o5, %pstate /* enable interrupts */ 1161: 117 /* 118 * User demap. We need to set the primary context properly. 119 * Secondary context cannot be used for SPARC64-VI IMMU. 120 * %o0 = vaddr 121 * %o1 = sfmmup 122 * %o3 = FLUSH_ADDR 123 */ 124 SFMMU_CPU_CNUM(%o1, %g1, %g2) ! %g1 = sfmmu cnum on this CPU 125 126 ldub [%o1 + SFMMU_CEXT], %o4 ! %o4 = sfmmup->sfmmu_cext 127 sll %o4, CTXREG_EXT_SHIFT, %o4 128 or %g1, %o4, %g1 ! %g1 = pgsz | cnum 129 130 wrpr %g0, 1, %tl 131 set MMU_PCONTEXT, %o4 132 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0 133 ldxa [%o4]ASI_DMMU, %o2 ! %o2 = save old ctxnum 134 stxa %g1, [%o4]ASI_DMMU ! wr new ctxum 135 136 stxa %g0, [%o0]ASI_DTLB_DEMAP 137 stxa %g0, [%o0]ASI_ITLB_DEMAP 138 stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */ 139 flush %o3 140 wrpr %g0, 0, %tl 141 142 retl 143 wrpr %g0, %o5, %pstate /* enable interrupts */ 144 SET_SIZE(vtag_flushpage) 145 146#endif /* lint */ 147 148 149#if defined(lint) 150 151void 152vtag_flushall(void) 153{} 154 155#else /* lint */ 156 157 ENTRY_NP2(vtag_flushall, demap_all) 158 /* 159 * flush the tlb 160 */ 161 sethi %hi(FLUSH_ADDR), %o3 162 set DEMAP_ALL_TYPE, %g1 163 stxa %g0, [%g1]ASI_DTLB_DEMAP 164 stxa %g0, [%g1]ASI_ITLB_DEMAP 165 flush %o3 166 retl 167 nop 168 SET_SIZE(demap_all) 169 SET_SIZE(vtag_flushall) 170 171#endif /* lint */ 172 173 174#if defined(lint) 175 176/* ARGSUSED */ 177void 178vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup) 179{} 180 181#else /* lint */ 182 183 ENTRY_NP(vtag_flushpage_tl1) 184 /* 185 * x-trap to flush page from tlb and tsb 186 * 187 * %g1 = vaddr, zero-extended on 32-bit kernel 188 * %g2 = sfmmup 189 * 190 * assumes TSBE_TAG = 0 191 */ 192 srln %g1, MMU_PAGESHIFT, %g1 193 194 sethi %hi(ksfmmup), %g3 195 ldx [%g3 + %lo(ksfmmup)], %g3 196 cmp %g3, %g2 197 bne,pt %xcc, 1f ! if not kernel as, go to 1 198 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 199 200 /* We need to demap in the kernel context */ 201 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1 202 stxa %g0, [%g1]ASI_DTLB_DEMAP 203 stxa %g0, [%g1]ASI_ITLB_DEMAP 204 retry 2051: 206 /* We need to demap in a user context */ 207 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1 208 209 SFMMU_CPU_CNUM(%g2, %g6, %g3) ! %g6 = sfmmu cnum on this CPU 210 211 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext 212 sll %g4, CTXREG_EXT_SHIFT, %g4 213 or %g6, %g4, %g6 ! %g6 = pgsz | cnum 214 215 set MMU_PCONTEXT, %g4 216 ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */ 217 stxa %g6, [%g4]ASI_DMMU /* wr new ctxum */ 218 stxa %g0, [%g1]ASI_DTLB_DEMAP 219 stxa %g0, [%g1]ASI_ITLB_DEMAP 220 stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */ 221 retry 222 SET_SIZE(vtag_flushpage_tl1) 223 224#endif /* lint */ 225 226 227#if defined(lint) 228 229/* ARGSUSED */ 230void 231vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt) 232{} 233 234#else /* lint */ 235 236 ENTRY_NP(vtag_flush_pgcnt_tl1) 237 /* 238 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb 239 * 240 * %g1 = vaddr, zero-extended on 32-bit kernel 241 * %g2 = <sfmmup58|pgcnt6> 242 * 243 * NOTE: this handler relies on the fact that no 244 * interrupts or traps can occur during the loop 245 * issuing the TLB_DEMAP operations. It is assumed 246 * that interrupts are disabled and this code is 247 * fetching from the kernel locked text address. 248 * 249 * assumes TSBE_TAG = 0 250 */ 251 set SFMMU_PGCNT_MASK, %g4 252 and %g4, %g2, %g3 /* g3 = pgcnt - 1 */ 253 add %g3, 1, %g3 /* g3 = pgcnt */ 254 255 andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */ 256 srln %g1, MMU_PAGESHIFT, %g1 257 258 sethi %hi(ksfmmup), %g4 259 ldx [%g4 + %lo(ksfmmup)], %g4 260 cmp %g4, %g2 261 bne,pn %xcc, 1f /* if not kernel as, go to 1 */ 262 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 263 264 /* We need to demap in the kernel context */ 265 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1 266 set MMU_PAGESIZE, %g2 /* g2 = pgsize */ 267 sethi %hi(FLUSH_ADDR), %g5 2684: 269 stxa %g0, [%g1]ASI_DTLB_DEMAP 270 stxa %g0, [%g1]ASI_ITLB_DEMAP 271 flush %g5 ! flush required by immu 272 273 deccc %g3 /* decr pgcnt */ 274 bnz,pt %icc,4b 275 add %g1, %g2, %g1 /* next page */ 276 retry 2771: 278 /* 279 * We need to demap in a user context 280 * 281 * g2 = sfmmup 282 * g3 = pgcnt 283 */ 284 SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU 285 286 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1 287 288 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext 289 sll %g4, CTXREG_EXT_SHIFT, %g4 290 or %g5, %g4, %g5 291 292 set MMU_PCONTEXT, %g4 293 ldxa [%g4]ASI_DMMU, %g6 /* rd old ctxnum */ 294 stxa %g5, [%g4]ASI_DMMU /* wr new ctxum */ 295 296 set MMU_PAGESIZE, %g2 /* g2 = pgsize */ 297 sethi %hi(FLUSH_ADDR), %g5 2983: 299 stxa %g0, [%g1]ASI_DTLB_DEMAP 300 stxa %g0, [%g1]ASI_ITLB_DEMAP 301 flush %g5 ! flush required by immu 302 303 deccc %g3 /* decr pgcnt */ 304 bnz,pt %icc,3b 305 add %g1, %g2, %g1 /* next page */ 306 307 stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */ 308 retry 309 SET_SIZE(vtag_flush_pgcnt_tl1) 310 311#endif /* lint */ 312 313 314#if defined(lint) 315 316/*ARGSUSED*/ 317void 318vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2) 319{} 320 321#else /* lint */ 322 323 ENTRY_NP(vtag_flushall_tl1) 324 /* 325 * x-trap to flush tlb 326 */ 327 set DEMAP_ALL_TYPE, %g4 328 stxa %g0, [%g4]ASI_DTLB_DEMAP 329 stxa %g0, [%g4]ASI_ITLB_DEMAP 330 retry 331 SET_SIZE(vtag_flushall_tl1) 332 333#endif /* lint */ 334 335 336/* 337 * VAC (virtual address conflict) does not apply to OPL. 338 * VAC resolution is managed by the Olympus processor hardware. 339 * As a result, all OPL VAC flushing routines are no-ops. 340 */ 341 342#if defined(lint) 343 344/* ARGSUSED */ 345void 346vac_flushpage(pfn_t pfnum, int vcolor) 347{} 348 349#else /* lint */ 350 351 ENTRY(vac_flushpage) 352 retl 353 nop 354 SET_SIZE(vac_flushpage) 355 356#endif /* lint */ 357 358#if defined(lint) 359 360/* ARGSUSED */ 361void 362vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor) 363{} 364 365#else /* lint */ 366 367 ENTRY_NP(vac_flushpage_tl1) 368 retry 369 SET_SIZE(vac_flushpage_tl1) 370 371#endif /* lint */ 372 373 374#if defined(lint) 375 376/* ARGSUSED */ 377void 378vac_flushcolor(int vcolor, pfn_t pfnum) 379{} 380 381#else /* lint */ 382 383 ENTRY(vac_flushcolor) 384 retl 385 nop 386 SET_SIZE(vac_flushcolor) 387 388#endif /* lint */ 389 390 391 392#if defined(lint) 393 394/* ARGSUSED */ 395void 396vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum) 397{} 398 399#else /* lint */ 400 401 ENTRY(vac_flushcolor_tl1) 402 retry 403 SET_SIZE(vac_flushcolor_tl1) 404 405#endif /* lint */ 406 407#if defined(lint) 408 409int 410idsr_busy(void) 411{ 412 return (0); 413} 414 415#else /* lint */ 416 417/* 418 * Determine whether or not the IDSR is busy. 419 * Entry: no arguments 420 * Returns: 1 if busy, 0 otherwise 421 */ 422 ENTRY(idsr_busy) 423 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1 424 clr %o0 425 btst IDSR_BUSY, %g1 426 bz,a,pt %xcc, 1f 427 mov 1, %o0 4281: 429 retl 430 nop 431 SET_SIZE(idsr_busy) 432 433#endif /* lint */ 434 435#if defined(lint) 436 437/* ARGSUSED */ 438void 439init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 440{} 441 442/* ARGSUSED */ 443void 444init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 445{} 446 447#else /* lint */ 448 449 .global _dispatch_status_busy 450_dispatch_status_busy: 451 .asciz "ASI_INTR_DISPATCH_STATUS error: busy" 452 .align 4 453 454/* 455 * Setup interrupt dispatch data registers 456 * Entry: 457 * %o0 - function or inumber to call 458 * %o1, %o2 - arguments (2 uint64_t's) 459 */ 460 .seg "text" 461 462 ENTRY(init_mondo) 463#ifdef DEBUG 464 ! 465 ! IDSR should not be busy at the moment 466 ! 467 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1 468 btst IDSR_BUSY, %g1 469 bz,pt %xcc, 1f 470 nop 471 sethi %hi(_dispatch_status_busy), %o0 472 call panic 473 or %o0, %lo(_dispatch_status_busy), %o0 474#endif /* DEBUG */ 475 476 ALTENTRY(init_mondo_nocheck) 477 ! 478 ! interrupt vector dispatch data reg 0 479 ! 4801: 481 mov IDDR_0, %g1 482 mov IDDR_1, %g2 483 mov IDDR_2, %g3 484 stxa %o0, [%g1]ASI_INTR_DISPATCH 485 486 ! 487 ! interrupt vector dispatch data reg 1 488 ! 489 stxa %o1, [%g2]ASI_INTR_DISPATCH 490 491 ! 492 ! interrupt vector dispatch data reg 2 493 ! 494 stxa %o2, [%g3]ASI_INTR_DISPATCH 495 496 membar #Sync 497 retl 498 nop 499 SET_SIZE(init_mondo_nocheck) 500 SET_SIZE(init_mondo) 501 502#endif /* lint */ 503 504 505#if defined(lint) 506 507/* ARGSUSED */ 508void 509shipit(int upaid, int bn) 510{ return; } 511 512#else /* lint */ 513 514/* 515 * Ship mondo to aid using busy/nack pair bn 516 */ 517 ENTRY_NP(shipit) 518 sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<23:14> = agent id 519 sll %o1, IDCR_BN_SHIFT, %g2 ! IDCR<28:24> = b/n pair 520 or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70 521 or %g1, %g2, %g1 522 stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch 523 membar #Sync 524 retl 525 nop 526 SET_SIZE(shipit) 527 528#endif /* lint */ 529 530 531#if defined(lint) 532 533/* ARGSUSED */ 534void 535flush_instr_mem(caddr_t vaddr, size_t len) 536{} 537 538#else /* lint */ 539 540/* 541 * flush_instr_mem: 542 * Flush 1 page of the I-$ starting at vaddr 543 * %o0 vaddr 544 * %o1 bytes to be flushed 545 * 546 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with 547 * the stores from all processors so that a FLUSH instruction is only needed 548 * to ensure pipeline is consistent. This means a single flush is sufficient at 549 * the end of a sequence of stores that updates the instruction stream to 550 * ensure correct operation. 551 */ 552 553 ENTRY(flush_instr_mem) 554 flush %o0 ! address irrelevant 555 retl 556 nop 557 SET_SIZE(flush_instr_mem) 558 559#endif /* lint */ 560 561 562/* 563 * flush_ecache: 564 * %o0 - 64 bit physical address 565 * %o1 - ecache size 566 * %o2 - ecache linesize 567 */ 568#if defined(lint) 569 570/*ARGSUSED*/ 571void 572flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize) 573{} 574 575#else /* !lint */ 576 577 ENTRY(flush_ecache) 578 579 /* 580 * Flush the entire Ecache. 581 */ 582 ECACHE_FLUSHALL(%o1, %o2, %o0, %o4) 583 retl 584 nop 585 SET_SIZE(flush_ecache) 586 587#endif /* lint */ 588 589#if defined(lint) 590 591/*ARGSUSED*/ 592void 593kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size, 594 int icache_lsize) 595{ 596} 597 598#else /* lint */ 599 600 /* 601 * I/D cache flushing is not needed for OPL processors 602 */ 603 ENTRY(kdi_flush_idcache) 604 retl 605 nop 606 SET_SIZE(kdi_flush_idcache) 607 608#endif /* lint */ 609 610#ifdef TRAPTRACE 611/* 612 * Simplified trap trace macro for OPL. Adapted from us3. 613 */ 614#define OPL_TRAPTRACE(ptr, scr1, scr2, label) \ 615 CPU_INDEX(scr1, ptr); \ 616 sll scr1, TRAPTR_SIZE_SHIFT, scr1; \ 617 set trap_trace_ctl, ptr; \ 618 add ptr, scr1, scr1; \ 619 ld [scr1 + TRAPTR_LIMIT], ptr; \ 620 tst ptr; \ 621 be,pn %icc, label/**/1; \ 622 ldx [scr1 + TRAPTR_PBASE], ptr; \ 623 ld [scr1 + TRAPTR_OFFSET], scr1; \ 624 add ptr, scr1, ptr; \ 625 rd %asi, scr2; \ 626 wr %g0, TRAPTR_ASI, %asi; \ 627 rd STICK, scr1; \ 628 stxa scr1, [ptr + TRAP_ENT_TICK]%asi; \ 629 rdpr %tl, scr1; \ 630 stha scr1, [ptr + TRAP_ENT_TL]%asi; \ 631 rdpr %tt, scr1; \ 632 stha scr1, [ptr + TRAP_ENT_TT]%asi; \ 633 rdpr %tpc, scr1; \ 634 stna scr1, [ptr + TRAP_ENT_TPC]%asi; \ 635 rdpr %tstate, scr1; \ 636 stxa scr1, [ptr + TRAP_ENT_TSTATE]%asi; \ 637 stna %sp, [ptr + TRAP_ENT_SP]%asi; \ 638 stna %g0, [ptr + TRAP_ENT_TR]%asi; \ 639 stna %g0, [ptr + TRAP_ENT_F1]%asi; \ 640 stna %g0, [ptr + TRAP_ENT_F2]%asi; \ 641 stna %g0, [ptr + TRAP_ENT_F3]%asi; \ 642 stna %g0, [ptr + TRAP_ENT_F4]%asi; \ 643 wr %g0, scr2, %asi; \ 644 CPU_INDEX(ptr, scr1); \ 645 sll ptr, TRAPTR_SIZE_SHIFT, ptr; \ 646 set trap_trace_ctl, scr1; \ 647 add scr1, ptr, ptr; \ 648 ld [ptr + TRAPTR_OFFSET], scr1; \ 649 ld [ptr + TRAPTR_LIMIT], scr2; \ 650 st scr1, [ptr + TRAPTR_LAST_OFFSET]; \ 651 add scr1, TRAP_ENT_SIZE, scr1; \ 652 sub scr2, TRAP_ENT_SIZE, scr2; \ 653 cmp scr1, scr2; \ 654 movge %icc, 0, scr1; \ 655 st scr1, [ptr + TRAPTR_OFFSET]; \ 656label/**/1: 657#endif /* TRAPTRACE */ 658 659 660 661/* 662 * Macros facilitating error handling. 663 */ 664 665/* 666 * Save alternative global registers reg1, reg2, reg3 667 * to scratchpad registers 1, 2, 3 respectively. 668 */ 669#define OPL_SAVE_GLOBAL(reg1, reg2, reg3) \ 670 stxa reg1, [%g0]ASI_SCRATCHPAD ;\ 671 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\ 672 stxa reg2, [reg1]ASI_SCRATCHPAD ;\ 673 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\ 674 stxa reg3, [reg1]ASI_SCRATCHPAD 675 676/* 677 * Restore alternative global registers reg1, reg2, reg3 678 * from scratchpad registers 1, 2, 3 respectively. 679 */ 680#define OPL_RESTORE_GLOBAL(reg1, reg2, reg3) \ 681 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\ 682 ldxa [reg1]ASI_SCRATCHPAD, reg3 ;\ 683 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\ 684 ldxa [reg1]ASI_SCRATCHPAD, reg2 ;\ 685 ldxa [%g0]ASI_SCRATCHPAD, reg1 686 687/* 688 * Logs value `val' into the member `offset' of a structure 689 * at physical address `pa' 690 */ 691#define LOG_REG(pa, offset, val) \ 692 add pa, offset, pa ;\ 693 stxa val, [pa]ASI_MEM 694 695#define FLUSH_ALL_TLB(tmp1) \ 696 set DEMAP_ALL_TYPE, tmp1 ;\ 697 stxa %g0, [tmp1]ASI_ITLB_DEMAP ;\ 698 stxa %g0, [tmp1]ASI_DTLB_DEMAP ;\ 699 sethi %hi(FLUSH_ADDR), tmp1 ;\ 700 flush tmp1 701 702/* 703 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG 704 * scratch register by zeroing all other fields. Result is in pa. 705 */ 706#define LOG_ADDR(pa) \ 707 mov OPL_SCRATCHPAD_ERRLOG, pa ;\ 708 ldxa [pa]ASI_SCRATCHPAD, pa ;\ 709 sllx pa, 64-ERRLOG_REG_EIDR_SHIFT, pa ;\ 710 srlx pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa ;\ 711 sllx pa, ERRLOG_REG_ERR_SHIFT, pa 712 713/* 714 * Advance the per-cpu error log buffer pointer to the next 715 * ERRLOG_SZ entry, making sure that it will modulo (wraparound) 716 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are 717 * unused input registers for this macro. 718 * 719 * Algorithm: 720 * 1. logpa = contents of errorlog scratchpad register 721 * 2. bufmask = ERRLOG_BUFSIZ - 1 722 * 3. tmp = logpa & ~(bufmask) (tmp is now logbase) 723 * 4. logpa += ERRLOG_SZ 724 * 5. logpa = logpa & bufmask (get new offset to logbase) 725 * 4. logpa = tmp | logpa 726 * 7. write logpa back into errorlog scratchpad register 727 * 728 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask) 729 * 730 */ 731#define UPDATE_LOGADD(logpa, bufmask, tmp) \ 732 set OPL_SCRATCHPAD_ERRLOG, tmp ;\ 733 ldxa [tmp]ASI_SCRATCHPAD, logpa ;\ 734 set (ERRLOG_BUFSZ-1), bufmask ;\ 735 andn logpa, bufmask, tmp ;\ 736 add logpa, ERRLOG_SZ, logpa ;\ 737 and logpa, bufmask, logpa ;\ 738 or tmp, logpa, logpa ;\ 739 set OPL_SCRATCHPAD_ERRLOG, tmp ;\ 740 stxa logpa, [tmp]ASI_SCRATCHPAD 741 742/* Log error status registers into the log buffer */ 743#define LOG_SYNC_REG(sfsr, sfar, tmp) \ 744 LOG_ADDR(tmp) ;\ 745 LOG_REG(tmp, LOG_SFSR_OFF, sfsr) ;\ 746 LOG_ADDR(tmp) ;\ 747 mov tmp, sfsr ;\ 748 LOG_REG(tmp, LOG_SFAR_OFF, sfar) ;\ 749 rd STICK, sfar ;\ 750 mov sfsr, tmp ;\ 751 LOG_REG(tmp, LOG_STICK_OFF, sfar) ;\ 752 rdpr %tl, tmp ;\ 753 sllx tmp, 32, sfar ;\ 754 rdpr %tt, tmp ;\ 755 or sfar, tmp, sfar ;\ 756 mov sfsr, tmp ;\ 757 LOG_REG(tmp, LOG_TL_OFF, sfar) ;\ 758 set OPL_SCRATCHPAD_ERRLOG, tmp ;\ 759 ldxa [tmp]ASI_SCRATCHPAD, sfar ;\ 760 mov sfsr, tmp ;\ 761 LOG_REG(tmp, LOG_ASI3_OFF, sfar) ;\ 762 rdpr %tpc, sfar ;\ 763 mov sfsr, tmp ;\ 764 LOG_REG(tmp, LOG_TPC_OFF, sfar) ;\ 765 UPDATE_LOGADD(sfsr, sfar, tmp) 766 767#define LOG_UGER_REG(uger, tmp, tmp2) \ 768 LOG_ADDR(tmp) ;\ 769 mov tmp, tmp2 ;\ 770 LOG_REG(tmp2, LOG_UGER_OFF, uger) ;\ 771 mov tmp, uger ;\ 772 rd STICK, tmp2 ;\ 773 LOG_REG(tmp, LOG_STICK_OFF, tmp2) ;\ 774 rdpr %tl, tmp ;\ 775 sllx tmp, 32, tmp2 ;\ 776 rdpr %tt, tmp ;\ 777 or tmp2, tmp, tmp2 ;\ 778 mov uger, tmp ;\ 779 LOG_REG(tmp, LOG_TL_OFF, tmp2) ;\ 780 set OPL_SCRATCHPAD_ERRLOG, tmp2 ;\ 781 ldxa [tmp2]ASI_SCRATCHPAD, tmp2 ;\ 782 mov uger, tmp ;\ 783 LOG_REG(tmp, LOG_ASI3_OFF, tmp2) ;\ 784 rdpr %tstate, tmp2 ;\ 785 mov uger, tmp ;\ 786 LOG_REG(tmp, LOG_TSTATE_OFF, tmp2) ;\ 787 rdpr %tpc, tmp2 ;\ 788 mov uger, tmp ;\ 789 LOG_REG(tmp, LOG_TPC_OFF, tmp2) ;\ 790 UPDATE_LOGADD(uger, tmp, tmp2) 791 792/* 793 * Scrub the STICK_COMPARE register to clear error by updating 794 * it to a reasonable value for interrupt generation. 795 * Ensure that we observe the CPU_ENABLE flag so that we 796 * don't accidentally enable TICK interrupt in STICK_COMPARE 797 * i.e. no clock interrupt will be generated if CPU_ENABLE flag 798 * is off. 799 */ 800#define UPDATE_STICK_COMPARE(tmp1, tmp2) \ 801 CPU_ADDR(tmp1, tmp2) ;\ 802 lduh [tmp1 + CPU_FLAGS], tmp2 ;\ 803 andcc tmp2, CPU_ENABLE, %g0 ;\ 804 set OPL_UGER_STICK_DIFF, tmp2 ;\ 805 rd STICK, tmp1 ;\ 806 add tmp1, tmp2, tmp1 ;\ 807 mov 1, tmp2 ;\ 808 sllx tmp2, TICKINT_DIS_SHFT, tmp2 ;\ 809 or tmp1, tmp2, tmp2 ;\ 810 movnz %xcc, tmp1, tmp2 ;\ 811 wr tmp2, %g0, STICK_COMPARE 812 813/* 814 * Reset registers that may be corrupted by IAUG_CRE error. 815 * To update interrupt handling related registers force the 816 * clock interrupt. 817 */ 818#define IAG_CRE(tmp1, tmp2) \ 819 set OPL_SCRATCHPAD_ERRLOG, tmp1 ;\ 820 ldxa [tmp1]ASI_SCRATCHPAD, tmp1 ;\ 821 srlx tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1 ;\ 822 set ERRLOG_REG_EIDR_MASK, tmp2 ;\ 823 and tmp1, tmp2, tmp1 ;\ 824 stxa tmp1, [%g0]ASI_EIDR ;\ 825 wr %g0, 0, SOFTINT ;\ 826 sethi %hi(hres_last_tick), tmp1 ;\ 827 ldx [tmp1 + %lo(hres_last_tick)], tmp1 ;\ 828 set OPL_UGER_STICK_DIFF, tmp2 ;\ 829 add tmp1, tmp2, tmp1 ;\ 830 wr tmp1, %g0, STICK ;\ 831 UPDATE_STICK_COMPARE(tmp1, tmp2) 832 833 834#define CLEAR_FPREGS(tmp) \ 835 wr %g0, FPRS_FEF, %fprs ;\ 836 wr %g0, %g0, %gsr ;\ 837 sethi %hi(opl_clr_freg), tmp ;\ 838 or tmp, %lo(opl_clr_freg), tmp ;\ 839 ldx [tmp], %fsr ;\ 840 fzero %d0 ;\ 841 fzero %d2 ;\ 842 fzero %d4 ;\ 843 fzero %d6 ;\ 844 fzero %d8 ;\ 845 fzero %d10 ;\ 846 fzero %d12 ;\ 847 fzero %d14 ;\ 848 fzero %d16 ;\ 849 fzero %d18 ;\ 850 fzero %d20 ;\ 851 fzero %d22 ;\ 852 fzero %d24 ;\ 853 fzero %d26 ;\ 854 fzero %d28 ;\ 855 fzero %d30 ;\ 856 fzero %d32 ;\ 857 fzero %d34 ;\ 858 fzero %d36 ;\ 859 fzero %d38 ;\ 860 fzero %d40 ;\ 861 fzero %d42 ;\ 862 fzero %d44 ;\ 863 fzero %d46 ;\ 864 fzero %d48 ;\ 865 fzero %d50 ;\ 866 fzero %d52 ;\ 867 fzero %d54 ;\ 868 fzero %d56 ;\ 869 fzero %d58 ;\ 870 fzero %d60 ;\ 871 fzero %d62 ;\ 872 wr %g0, %g0, %fprs 873 874#define CLEAR_GLOBALS() \ 875 mov %g0, %g1 ;\ 876 mov %g0, %g2 ;\ 877 mov %g0, %g3 ;\ 878 mov %g0, %g4 ;\ 879 mov %g0, %g5 ;\ 880 mov %g0, %g6 ;\ 881 mov %g0, %g7 882 883/* 884 * We do not clear the alternative globals here because they 885 * are scratch registers, i.e. there is no code that reads from 886 * them without write to them firstly. In other words every 887 * read always follows write that makes extra write to the 888 * alternative globals unnecessary. 889 */ 890#define CLEAR_GEN_REGS(tmp1, label) \ 891 set TSTATE_KERN, tmp1 ;\ 892 wrpr %g0, tmp1, %tstate ;\ 893 mov %g0, %y ;\ 894 mov %g0, %asi ;\ 895 mov %g0, %ccr ;\ 896 mov %g0, %l0 ;\ 897 mov %g0, %l1 ;\ 898 mov %g0, %l2 ;\ 899 mov %g0, %l3 ;\ 900 mov %g0, %l4 ;\ 901 mov %g0, %l5 ;\ 902 mov %g0, %l6 ;\ 903 mov %g0, %l7 ;\ 904 mov %g0, %i0 ;\ 905 mov %g0, %i1 ;\ 906 mov %g0, %i2 ;\ 907 mov %g0, %i3 ;\ 908 mov %g0, %i4 ;\ 909 mov %g0, %i5 ;\ 910 mov %g0, %i6 ;\ 911 mov %g0, %i7 ;\ 912 mov %g0, %o1 ;\ 913 mov %g0, %o2 ;\ 914 mov %g0, %o3 ;\ 915 mov %g0, %o4 ;\ 916 mov %g0, %o5 ;\ 917 mov %g0, %o6 ;\ 918 mov %g0, %o7 ;\ 919 mov %g0, %o0 ;\ 920 mov %g0, %g4 ;\ 921 mov %g0, %g5 ;\ 922 mov %g0, %g6 ;\ 923 mov %g0, %g7 ;\ 924 rdpr %tl, tmp1 ;\ 925 cmp tmp1, 1 ;\ 926 be,pt %xcc, label/**/1 ;\ 927 rdpr %pstate, tmp1 ;\ 928 wrpr tmp1, PSTATE_AG|PSTATE_IG, %pstate ;\ 929 CLEAR_GLOBALS() ;\ 930 rdpr %pstate, tmp1 ;\ 931 wrpr tmp1, PSTATE_IG|PSTATE_MG, %pstate ;\ 932 CLEAR_GLOBALS() ;\ 933 rdpr %pstate, tmp1 ;\ 934 wrpr tmp1, PSTATE_MG|PSTATE_AG, %pstate ;\ 935 ba,pt %xcc, label/**/2 ;\ 936 nop ;\ 937label/**/1: ;\ 938 wrpr tmp1, PSTATE_AG, %pstate ;\ 939 CLEAR_GLOBALS() ;\ 940 rdpr %pstate, tmp1 ;\ 941 wrpr tmp1, PSTATE_AG, %pstate ;\ 942label/**/2: 943 944 945/* 946 * Reset all window related registers 947 */ 948#define RESET_WINREG(tmp) \ 949 sethi %hi(nwin_minus_one), tmp ;\ 950 ld [tmp + %lo(nwin_minus_one)], tmp ;\ 951 wrpr %g0, tmp, %cwp ;\ 952 wrpr %g0, tmp, %cleanwin ;\ 953 sub tmp, 1, tmp ;\ 954 wrpr %g0, tmp, %cansave ;\ 955 wrpr %g0, %g0, %canrestore ;\ 956 wrpr %g0, %g0, %otherwin ;\ 957 wrpr %g0, PIL_MAX, %pil ;\ 958 wrpr %g0, WSTATE_KERN, %wstate 959 960 961#define RESET_PREV_TSTATE(tmp1, tmp2, label) \ 962 rdpr %tl, tmp1 ;\ 963 subcc tmp1, 1, tmp1 ;\ 964 bz,pt %xcc, label/**/1 ;\ 965 nop ;\ 966 wrpr tmp1, %g0, %tl ;\ 967 set TSTATE_KERN, tmp2 ;\ 968 wrpr tmp2, %g0, %tstate ;\ 969 wrpr %g0, %g0, %tpc ;\ 970 wrpr %g0, %g0, %tnpc ;\ 971 add tmp1, 1, tmp1 ;\ 972 wrpr tmp1, %g0, %tl ;\ 973label/**/1: 974 975 976/* 977 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc, 978 * and we reset these regiseter here. 979 */ 980#define RESET_CUR_TSTATE(tmp) \ 981 set TSTATE_KERN, tmp ;\ 982 wrpr %g0, tmp, %tstate ;\ 983 wrpr %g0, 0, %tpc ;\ 984 wrpr %g0, 0, %tnpc ;\ 985 RESET_WINREG(tmp) 986 987/* 988 * In case of urgent errors some MMU registers may be 989 * corrupted, so we set here some reasonable values for 990 * them. Note that resetting MMU registers also reset the context 991 * info, we will need to reset the window registers to prevent 992 * spill/fill that depends on context info for correct behaviour. 993 * Note that the TLBs must be flushed before programming the context 994 * registers. 995 */ 996 997#if !defined(lint) 998#define RESET_MMU_REGS(tmp1, tmp2, tmp3) \ 999 FLUSH_ALL_TLB(tmp1) ;\ 1000 set MMU_PCONTEXT, tmp1 ;\ 1001 sethi %hi(kcontextreg), tmp2 ;\ 1002 ldx [tmp2 + %lo(kcontextreg)], tmp2 ;\ 1003 stxa tmp2, [tmp1]ASI_DMMU ;\ 1004 set MMU_SCONTEXT, tmp1 ;\ 1005 stxa tmp2, [tmp1]ASI_DMMU ;\ 1006 sethi %hi(ktsb_base), tmp1 ;\ 1007 ldx [tmp1 + %lo(ktsb_base)], tmp2 ;\ 1008 mov MMU_TSB, tmp3 ;\ 1009 stxa tmp2, [tmp3]ASI_IMMU ;\ 1010 stxa tmp2, [tmp3]ASI_DMMU ;\ 1011 membar #Sync ;\ 1012 RESET_WINREG(tmp1) 1013 1014#define RESET_TSB_TAGPTR(tmp) \ 1015 set MMU_TAG_ACCESS, tmp ;\ 1016 stxa %g0, [tmp]ASI_IMMU ;\ 1017 stxa %g0, [tmp]ASI_DMMU ;\ 1018 membar #Sync 1019#endif /* lint */ 1020 1021/* 1022 * In case of errors in the MMU_TSB_PREFETCH registers we have to 1023 * reset them. We can use "0" as the reset value, this way we set 1024 * the "V" bit of the registers to 0, which will disable the prefetch 1025 * so the values of the other fields are irrelevant. 1026 */ 1027#if !defined(lint) 1028#define RESET_TSB_PREFETCH(tmp) \ 1029 set VA_UTSBPREF_8K, tmp ;\ 1030 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\ 1031 set VA_UTSBPREF_4M, tmp ;\ 1032 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\ 1033 set VA_KTSBPREF_8K, tmp ;\ 1034 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\ 1035 set VA_KTSBPREF_4M, tmp ;\ 1036 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\ 1037 set VA_UTSBPREF_8K, tmp ;\ 1038 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\ 1039 set VA_UTSBPREF_4M, tmp ;\ 1040 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\ 1041 set VA_KTSBPREF_8K, tmp ;\ 1042 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\ 1043 set VA_KTSBPREF_4M, tmp ;\ 1044 stxa %g0, [tmp]ASI_DTSB_PREFETCH 1045#endif /* lint */ 1046 1047/* 1048 * In case of errors in the MMU_SHARED_CONTEXT register we have to 1049 * reset its value. We can use "0" as the reset value, it will put 1050 * 0 in the IV field disabling the shared context support, and 1051 * making values of all the other fields of the register irrelevant. 1052 */ 1053#if !defined(lint) 1054#define RESET_SHARED_CTXT(tmp) \ 1055 set MMU_SHARED_CONTEXT, tmp ;\ 1056 stxa %g0, [tmp]ASI_DMMU 1057#endif /* lint */ 1058 1059/* 1060 * RESET_TO_PRIV() 1061 * 1062 * In many cases, we need to force the thread into privilege mode because 1063 * privilege mode is only thing in which the system continue to work 1064 * due to undeterminable user mode information that come from register 1065 * corruption. 1066 * 1067 * - opl_uger_ctxt 1068 * If the error is secondary TSB related register parity, we have no idea 1069 * what value is supposed to be for it. 1070 * 1071 * The below three cases %tstate is not accessible until it is overwritten 1072 * with some value, so we have no clue if the thread was running on user mode 1073 * or not 1074 * - opl_uger_pstate 1075 * If the error is %pstate parity, it propagates to %tstate. 1076 * - opl_uger_tstate 1077 * No need to say the reason 1078 * - opl_uger_r 1079 * If the error is %ccr or %asi parity, it propagates to %tstate 1080 * 1081 * For the above four cases, user mode info may not be available for 1082 * sys_trap() and user_trap() to work consistently. So we have to force 1083 * the thread into privilege mode. 1084 * 1085 * Forcing the thread to privilege mode requires forcing 1086 * regular %g7 to be CPU_THREAD. Because if it was running on user mode, 1087 * %g7 will be set in user_trap(). Also since the %sp may be in 1088 * an inconsistent state, we need to do a stack reset and switch to 1089 * something we know i.e. current thread's kernel stack. 1090 * We also reset the window registers and MMU registers just to 1091 * make sure. 1092 * 1093 * To set regular %g7, we need to clear PSTATE_AG bit and need to 1094 * use one local register. Note that we are panicking and will never 1095 * unwind back so it is ok to clobber a local. 1096 * 1097 * If the thread was running in user mode, the %tpc value itself might be 1098 * within the range of OBP addresses. %tpc must be forced to be zero to prevent 1099 * sys_trap() from going to prom_trap() 1100 * 1101 */ 1102#define RESET_TO_PRIV(tmp, tmp1, tmp2, local) \ 1103 RESET_MMU_REGS(tmp, tmp1, tmp2) ;\ 1104 CPU_ADDR(tmp, tmp1) ;\ 1105 ldx [tmp + CPU_THREAD], local ;\ 1106 ldx [local + T_STACK], tmp ;\ 1107 sub tmp, STACK_BIAS, %sp ;\ 1108 rdpr %pstate, tmp ;\ 1109 wrpr tmp, PSTATE_AG, %pstate ;\ 1110 mov local, %g7 ;\ 1111 rdpr %pstate, local ;\ 1112 wrpr local, PSTATE_AG, %pstate ;\ 1113 wrpr %g0, 1, %tl ;\ 1114 set TSTATE_KERN, tmp ;\ 1115 rdpr %cwp, tmp1 ;\ 1116 or tmp, tmp1, tmp ;\ 1117 wrpr tmp, %g0, %tstate ;\ 1118 wrpr %g0, %tpc 1119 1120 1121#if defined(lint) 1122 1123void 1124ce_err(void) 1125{} 1126 1127#else /* lint */ 1128 1129/* 1130 * We normally don't expect CE traps since we disable the 1131 * 0x63 trap reporting at the start of day. There is a 1132 * small window before we disable them, so let check for 1133 * it. Otherwise, panic. 1134 */ 1135 1136 .align 128 1137 ENTRY_NP(ce_err) 1138 mov AFSR_ECR, %g1 1139 ldxa [%g1]ASI_ECR, %g1 1140 andcc %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0 1141 bz,pn %xcc, 1f 1142 nop 1143 retry 11441: 1145 /* 1146 * We did disabled the 0x63 trap reporting. 1147 * This shouldn't happen - panic. 1148 */ 1149 set trap, %g1 1150 rdpr %tt, %g3 1151 sethi %hi(sys_trap), %g5 1152 jmp %g5 + %lo(sys_trap) 1153 sub %g0, 1, %g4 1154 SET_SIZE(ce_err) 1155 1156#endif /* lint */ 1157 1158 1159#if defined(lint) 1160 1161void 1162ce_err_tl1(void) 1163{} 1164 1165#else /* lint */ 1166 1167/* 1168 * We don't use trap for CE detection. 1169 */ 1170 ENTRY_NP(ce_err_tl1) 1171 set trap, %g1 1172 rdpr %tt, %g3 1173 sethi %hi(sys_trap), %g5 1174 jmp %g5 + %lo(sys_trap) 1175 sub %g0, 1, %g4 1176 SET_SIZE(ce_err_tl1) 1177 1178#endif /* lint */ 1179 1180 1181#if defined(lint) 1182 1183void 1184async_err(void) 1185{} 1186 1187#else /* lint */ 1188 1189/* 1190 * async_err is the default handler for IAE/DAE traps. 1191 * For OPL, we patch in the right handler at start of day. 1192 * But if a IAE/DAE trap get generated before the handler 1193 * is patched, panic. 1194 */ 1195 ENTRY_NP(async_err) 1196 set trap, %g1 1197 rdpr %tt, %g3 1198 sethi %hi(sys_trap), %g5 1199 jmp %g5 + %lo(sys_trap) 1200 sub %g0, 1, %g4 1201 SET_SIZE(async_err) 1202 1203#endif /* lint */ 1204 1205#if defined(lint) 1206void 1207opl_sync_trap(void) 1208{} 1209#else /* lint */ 1210 1211 .seg ".data" 1212 .global opl_clr_freg 1213 .global opl_cpu0_err_log 1214 1215 .align 16 1216opl_clr_freg: 1217 .word 0 1218 .align 16 1219 1220 .align MMU_PAGESIZE 1221opl_cpu0_err_log: 1222 .skip MMU_PAGESIZE 1223 1224/* 1225 * Common synchronous error trap handler (tt=0xA, 0x32) 1226 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler. 1227 * The error handling can be best summarized as follows: 1228 * 0. Do TRAPTRACE if enabled. 1229 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs. 1230 * 2. The SFSR register is read and verified as valid by checking 1231 * SFSR.FV bit being set. If the SFSR.FV is not set, the 1232 * error cases cannot be decoded/determined and the SFPAR 1233 * register that contain the physical faultaddr is also 1234 * not valid. Also the SPFAR is only valid for UE/TO/BERR error 1235 * cases. Assuming the SFSR.FV is valid: 1236 * - BERR(bus error)/TO(timeout)/UE case 1237 * If any of these error cases are detected, read the SFPAR 1238 * to get the faultaddress. Generate ereport. 1239 * - TLB Parity case (only recoverable case) 1240 * For DAE, read SFAR for the faultaddress. For IAE, 1241 * use %tpc for faultaddress (SFAR is not valid in IAE) 1242 * Flush all the tlbs. 1243 * Subtract one from the recoverable error count stored in 1244 * the error log scratch register. If the threshold limit 1245 * is reached (zero) - generate ereport. Else 1246 * restore globals and retry (no ereport is generated). 1247 * - TLB Multiple hits 1248 * For DAE, read SFAR for the faultaddress. For IAE, 1249 * use %tpc for faultaddress (SFAR is not valid in IAE). 1250 * Flush all tlbs and generate ereport. 1251 * 3. TL=0 and TL>0 considerations 1252 * - Since both TL=0 & TL>1 traps are made to vector into 1253 * the same handler, the underlying assumption/design here is 1254 * that any nested error condition (if happens) occurs only 1255 * in the handler and the system is assumed to eventually 1256 * Red-mode. With this philosophy in mind, the recoverable 1257 * TLB Parity error case never check the TL level before it 1258 * retry. Note that this is ok for the TL>1 case (assuming we 1259 * don't have a nested error) since we always save the globals 1260 * %g1, %g2 & %g3 whenever we enter this trap handler. 1261 * - Additional TL=0 vs TL>1 handling includes: 1262 * - For UE error occuring under TL>1, special handling 1263 * is added to prevent the unlikely chance of a cpu-lockup 1264 * when a UE was originally detected in user stack and 1265 * the spill trap handler taken from sys_trap() so happened 1266 * to reference the same UE location. Under the above 1267 * condition (TL>1 and UE error), paranoid code is added 1268 * to reset window regs so that spill traps can't happen 1269 * during the unwind back to TL=0 handling. 1270 * Note that we can do that because we are not returning 1271 * back. 1272 * 4. Ereport generation. 1273 * - Ereport generation is performed when we unwind to the TL=0 1274 * handling code via sys_trap(). on_trap()/lofault protection 1275 * will apply there. 1276 * 1277 */ 1278 ENTRY_NP(opl_sync_trap) 1279#ifdef TRAPTRACE 1280 OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb) 1281 rdpr %tt, %g1 1282#endif /* TRAPTRACE */ 1283 cmp %g1, T_INSTR_ERROR 1284 bne,pt %xcc, 0f 1285 mov MMU_SFSR, %g3 1286 ldxa [%g3]ASI_IMMU, %g1 ! IAE trap case tt = 0xa 1287 andcc %g1, SFSR_FV, %g0 1288 bz,a,pn %xcc, 2f ! Branch if SFSR is invalid and 1289 rdpr %tpc, %g2 ! use %tpc for faultaddr instead 1290 1291 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3 1292 andcc %g1, %g3, %g0 ! Check for UE/BERR/TO errors 1293 bz,a,pt %xcc, 1f ! Branch if not UE/BERR/TO and 1294 rdpr %tpc, %g2 ! use %tpc as faultaddr 1295 set OPL_MMU_SFPAR, %g3 ! In the UE/BERR/TO cases, use 1296 ba,pt %xcc, 2f ! SFPAR as faultaddr 1297 ldxa [%g3]ASI_IMMU, %g2 12980: 1299 ldxa [%g3]ASI_DMMU, %g1 ! DAE trap case tt = 0x32 1300 andcc %g1, SFSR_FV, %g0 1301 bnz,pt %xcc, 7f ! branch if SFSR.FV is valid 1302 mov MMU_SFAR, %g2 ! set %g2 to use SFAR 1303 ba,pt %xcc, 2f ! SFSR.FV is not valid, read SFAR 1304 ldxa [%g2]ASI_DMMU, %g2 ! for faultaddr 13057: 1306 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3 1307 andcc %g1, %g3, %g0 ! Check UE/BERR/TO for valid SFPAR 1308 movnz %xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for 1309 ldxa [%g2]ASI_DMMU, %g2 ! faultaddr 13101: 1311 sethi %hi(SFSR_TLB_PRT), %g3 1312 andcc %g1, %g3, %g0 1313 bz,pt %xcc, 8f ! branch for TLB multi-hit check 1314 nop 1315 /* 1316 * This is the TLB parity error case and it is the 1317 * only retryable error case. 1318 * Only %g1, %g2 and %g3 are allowed 1319 */ 1320 FLUSH_ALL_TLB(%g3) 1321 set OPL_SCRATCHPAD_ERRLOG, %g3 1322 ldxa [%g3]ASI_SCRATCHPAD, %g3 ! Read errlog scratchreg 1323 and %g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count 1324 subcc %g3, 1, %g0 ! Subtract one from the count 1325 bz,pn %xcc, 2f ! too many TLB parity errs in a certain 1326 nop ! period, branch to generate ereport 1327 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log 1328 set OPL_SCRATCHPAD_ERRLOG, %g3 1329 ldxa [%g3]ASI_SCRATCHPAD, %g2 1330 sub %g2, 1, %g2 ! decrement error counter by 1 1331 stxa %g2, [%g3]ASI_SCRATCHPAD ! update the errlog scratchreg 1332 OPL_RESTORE_GLOBAL(%g1, %g2, %g3) 1333 retry 13348: 1335 sethi %hi(SFSR_TLB_MUL), %g3 1336 andcc %g1, %g3, %g0 1337 bz,pt %xcc, 2f ! check for the TLB multi-hit errors 1338 nop 1339 FLUSH_ALL_TLB(%g3) 13402: 1341 /* 1342 * non-retryable error handling 1343 * now we can use other registers since 1344 * we will not be returning back 1345 */ 1346 mov %g1, %g5 ! %g5 = SFSR 1347 mov %g2, %g6 ! %g6 = SFPAR or SFAR/tpc 1348 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log 1349 1350 /* 1351 * Special case for UE on user stack. 1352 * There is a possibility that the same error may come back here 1353 * by touching the same UE in spill trap handler taken from 1354 * sys_trap(). It ends up with an infinite loop causing a cpu lockup. 1355 * Conditions for this handling this case are: 1356 * - SFSR_FV is valid and SFSR_UE is set 1357 * - we are at TL > 1 1358 * If the above conditions are true, we force %cansave to be a 1359 * big number to prevent spill trap in sys_trap(). Note that 1360 * we will not be returning back. 1361 */ 1362 rdpr %tt, %g4 ! %g4 == ttype 1363 rdpr %tl, %g1 ! %g1 == tl 1364 cmp %g1, 1 ! Check if TL == 1 1365 be,pt %xcc, 3f ! branch if we came from TL=0 1366 nop 1367 andcc %g5, SFSR_FV, %g0 ! see if SFSR.FV is valid 1368 bz,pn %xcc, 4f ! branch, checking UE is meaningless 1369 sethi %hi(SFSR_UE), %g2 1370 andcc %g5, %g2, %g0 ! check for UE 1371 bz,pt %xcc, 4f ! branch if not UE 1372 nop 1373 RESET_WINREG(%g1) ! reset windows to prevent spills 13744: 1375 RESET_USER_RTT_REGS(%g2, %g3, 5f) 13765: 1377 mov %g5, %g3 ! pass SFSR to the 3rd arg 1378 mov %g6, %g2 ! pass SFAR to the 2nd arg 1379 set opl_cpu_isync_tl1_error, %g1 1380 set opl_cpu_dsync_tl1_error, %g6 1381 cmp %g4, T_INSTR_ERROR 1382 movne %icc, %g6, %g1 1383 ba,pt %icc, 6f 1384 nop 13853: 1386 mov %g5, %g3 ! pass SFSR to the 3rd arg 1387 mov %g6, %g2 ! pass SFAR to the 2nd arg 1388 set opl_cpu_isync_tl0_error, %g1 1389 set opl_cpu_dsync_tl0_error, %g6 1390 cmp %g4, T_INSTR_ERROR 1391 movne %icc, %g6, %g1 13926: 1393 sethi %hi(sys_trap), %g5 1394 jmp %g5 + %lo(sys_trap) 1395 mov PIL_15, %g4 1396 SET_SIZE(opl_sync_trap) 1397#endif /* lint */ 1398 1399#if defined(lint) 1400void 1401opl_uger_trap(void) 1402{} 1403#else /* lint */ 1404/* 1405 * Common Urgent error trap handler (tt=0x40) 1406 * All TL=0 and TL>0 0x40 traps vector to this handler. 1407 * The error handling can be best summarized as follows: 1408 * 1. Read the Urgent error status register (UGERSR) 1409 * Faultaddress is N/A here and it is not collected. 1410 * 2. Check to see if we have a multiple errors case 1411 * If so, we enable WEAK_ED (weak error detection) bit 1412 * to prevent any potential error storms and branch directly 1413 * to generate ereport. (we don't decode/handle individual 1414 * error cases when we get a multiple error situation) 1415 * 3. Now look for the recoverable error cases which include 1416 * IUG_DTLB, IUG_ITLB or COREERR errors. If any of the 1417 * recoverable errors are detected, do the following: 1418 * - Flush all tlbs. 1419 * - Verify that we came from TL=0, if not, generate 1420 * ereport. Note that the reason we don't recover 1421 * at TL>0 is because the AGs might be corrupted or 1422 * inconsistent. We can't save/restore them into 1423 * the scratchpad regs like we did for opl_sync_trap(). 1424 * - Check the INSTEND[5:4] bits in the UGERSR. If the 1425 * value is 0x3 (11b), this error is not recoverable. 1426 * Generate ereport. 1427 * - Subtract one from the recoverable error count stored in 1428 * the error log scratch register. If the threshold limit 1429 * is reached (zero) - generate ereport. 1430 * - If the count is within the limit, update the count 1431 * in the error log register (subtract one). Log the error 1432 * info in the log buffer. Capture traptrace if enabled. 1433 * Retry (no ereport generated) 1434 * 4. The rest of the error cases are unrecoverable and will 1435 * be handled according (flushing regs, etc as required). 1436 * For details on these error cases (UGER_CRE, UGER_CTXT, etc..) 1437 * consult the OPL cpu/mem philosophy doc. 1438 * Ereport will be generated for these errors. 1439 * 5. Ereport generation. 1440 * - Ereport generation for urgent error trap always 1441 * result in a panic when we unwind to the TL=0 handling 1442 * code via sys_trap(). on_trap()/lofault protection do 1443 * not apply there. 1444 */ 1445 ENTRY_NP(opl_uger_trap) 1446 set ASI_UGERSR, %g2 1447 ldxa [%g2]ASI_AFSR, %g1 ! Read the UGERSR reg 1448 1449 set UGESR_MULTI, %g2 1450 andcc %g1, %g2, %g0 ! Check for Multi-errs 1451 bz,pt %xcc, opl_uger_is_recover ! branch if not Multi-errs 1452 nop 1453 set AFSR_ECR, %g2 1454 ldxa [%g2]ASI_AFSR, %g3 ! Enable Weak error 1455 or %g3, ASI_ECR_WEAK_ED, %g3 ! detect mode to prevent 1456 stxa %g3, [%g2]ASI_AFSR ! potential error storms 1457 ba %xcc, opl_uger_panic1 1458 nop 1459 1460opl_uger_is_recover: 1461 set UGESR_CAN_RECOVER, %g2 ! Check for recoverable 1462 andcc %g1, %g2, %g0 ! errors i.e.IUG_DTLB, 1463 bz,pt %xcc, opl_uger_cre ! IUG_ITLB or COREERR 1464 nop 1465 1466 /* 1467 * Fall thru to handle recoverable case 1468 * Need to do the following additional checks to determine 1469 * if this is indeed recoverable. 1470 * 1. Error trap came from TL=0 and 1471 * 2. INSTEND[5:4] bits in UGERSR is not 0x3 1472 * 3. Recoverable error count limit not reached 1473 * 1474 */ 1475 FLUSH_ALL_TLB(%g3) 1476 rdpr %tl, %g3 ! Read TL 1477 cmp %g3, 1 ! Check if we came from TL=0 1478 bne,pt %xcc, opl_uger_panic ! branch if came from TL>0 1479 nop 1480 srlx %g1, 4, %g2 ! shift INSTEND[5:4] -> [1:0] 1481 and %g2, 3, %g2 ! extract the shifted [1:0] bits 1482 cmp %g2, 3 ! check if INSTEND is recoverable 1483 be,pt %xcc, opl_uger_panic ! panic if ([1:0] = 11b) 1484 nop 1485 set OPL_SCRATCHPAD_ERRLOG, %g3 1486 ldxa [%g3]ASI_SCRATCHPAD, %g2 ! Read errlog scratch reg 1487 and %g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and 1488 subcc %g3, 1, %g3 ! subtract one from it 1489 bz,pt %xcc, opl_uger_panic ! If count reached zero, too many 1490 nop ! errors, branch to generate ereport 1491 sub %g2, 1, %g2 ! Subtract one from the count 1492 set OPL_SCRATCHPAD_ERRLOG, %g3 ! and write back the updated 1493 stxa %g2, [%g3]ASI_SCRATCHPAD ! count into the errlog reg 1494 LOG_UGER_REG(%g1, %g2, %g3) ! Log the error info 1495#ifdef TRAPTRACE 1496 OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb) 1497#endif /* TRAPTRACE */ 1498 retry ! retry - no ereport 1499 1500 /* 1501 * Process the rest of the unrecoverable error cases 1502 * All error cases below ultimately branch to either 1503 * opl_uger_panic or opl_uger_panic1. 1504 * opl_uger_panic1 is the same as opl_uger_panic except 1505 * for the additional execution of the RESET_TO_PRIV() 1506 * macro that does a heavy handed reset. Read the 1507 * comments for RESET_TO_PRIV() macro for more info. 1508 */ 1509opl_uger_cre: 1510 set UGESR_IAUG_CRE, %g2 1511 andcc %g1, %g2, %g0 1512 bz,pt %xcc, opl_uger_ctxt 1513 nop 1514 IAG_CRE(%g2, %g3) 1515 set AFSR_ECR, %g2 1516 ldxa [%g2]ASI_AFSR, %g3 1517 or %g3, ASI_ECR_WEAK_ED, %g3 1518 stxa %g3, [%g2]ASI_AFSR 1519 ba %xcc, opl_uger_panic 1520 nop 1521 1522opl_uger_ctxt: 1523 set UGESR_IAUG_TSBCTXT, %g2 1524 andcc %g1, %g2, %g0 1525 bz,pt %xcc, opl_uger_tsbp 1526 nop 1527 GET_CPU_IMPL(%g2) 1528 cmp %g2, JUPITER_IMPL 1529 bne %xcc, 1f 1530 nop 1531 RESET_SHARED_CTXT(%g2) 15321: 1533 RESET_MMU_REGS(%g2, %g3, %g4) 1534 ba %xcc, opl_uger_panic 1535 nop 1536 1537opl_uger_tsbp: 1538 set UGESR_IUG_TSBP, %g2 1539 andcc %g1, %g2, %g0 1540 bz,pt %xcc, opl_uger_pstate 1541 nop 1542 GET_CPU_IMPL(%g2) 1543 cmp %g2, JUPITER_IMPL 1544 bne %xcc, 1f 1545 nop 1546 RESET_TSB_PREFETCH(%g2) 15471: 1548 RESET_TSB_TAGPTR(%g2) 1549 1550 /* 1551 * IUG_TSBP error may corrupt MMU registers 1552 * Reset them here. 1553 */ 1554 RESET_MMU_REGS(%g2, %g3, %g4) 1555 ba %xcc, opl_uger_panic 1556 nop 1557 1558opl_uger_pstate: 1559 set UGESR_IUG_PSTATE, %g2 1560 andcc %g1, %g2, %g0 1561 bz,pt %xcc, opl_uger_tstate 1562 nop 1563 RESET_CUR_TSTATE(%g2) 1564 ba %xcc, opl_uger_panic1 1565 nop 1566 1567opl_uger_tstate: 1568 set UGESR_IUG_TSTATE, %g2 1569 andcc %g1, %g2, %g0 1570 bz,pt %xcc, opl_uger_f 1571 nop 1572 RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1) 1573 ba %xcc, opl_uger_panic1 1574 nop 1575 1576opl_uger_f: 1577 set UGESR_IUG_F, %g2 1578 andcc %g1, %g2, %g0 1579 bz,pt %xcc, opl_uger_r 1580 nop 1581 CLEAR_FPREGS(%g2) 1582 ba %xcc, opl_uger_panic 1583 nop 1584 1585opl_uger_r: 1586 set UGESR_IUG_R, %g2 1587 andcc %g1, %g2, %g0 1588 bz,pt %xcc, opl_uger_panic1 1589 nop 1590 CLEAR_GEN_REGS(%g2, opl_uger_r_1) 1591 ba %xcc, opl_uger_panic1 1592 nop 1593 1594opl_uger_panic: 1595 mov %g1, %g2 ! %g2 = arg #1 1596 LOG_UGER_REG(%g1, %g3, %g4) 1597 ba %xcc, opl_uger_panic_cmn 1598 nop 1599 1600opl_uger_panic1: 1601 mov %g1, %g2 ! %g2 = arg #1 1602 LOG_UGER_REG(%g1, %g3, %g4) 1603 RESET_TO_PRIV(%g1, %g3, %g4, %l0) 1604 1605 /* 1606 * Set up the argument for sys_trap. 1607 * %g2 = arg #1 already set above 1608 */ 1609opl_uger_panic_cmn: 1610 RESET_USER_RTT_REGS(%g4, %g5, 1f) 16111: 1612 rdpr %tl, %g3 ! arg #2 1613 set opl_cpu_urgent_error, %g1 ! pc 1614 sethi %hi(sys_trap), %g5 1615 jmp %g5 + %lo(sys_trap) 1616 mov PIL_15, %g4 1617 SET_SIZE(opl_uger_trap) 1618#endif /* lint */ 1619 1620#if defined(lint) 1621void 1622opl_ta3_trap(void) 1623{} 1624void 1625opl_cleanw_subr(void) 1626{} 1627#else /* lint */ 1628/* 1629 * OPL ta3 support (note please, that win_reg 1630 * area size for each cpu is 2^7 bytes) 1631 */ 1632 1633#define RESTORE_WREGS(tmp1, tmp2) \ 1634 CPU_INDEX(tmp1, tmp2) ;\ 1635 sethi %hi(opl_ta3_save), tmp2 ;\ 1636 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\ 1637 sllx tmp1, 7, tmp1 ;\ 1638 add tmp2, tmp1, tmp2 ;\ 1639 ldx [tmp2 + 0], %l0 ;\ 1640 ldx [tmp2 + 8], %l1 ;\ 1641 ldx [tmp2 + 16], %l2 ;\ 1642 ldx [tmp2 + 24], %l3 ;\ 1643 ldx [tmp2 + 32], %l4 ;\ 1644 ldx [tmp2 + 40], %l5 ;\ 1645 ldx [tmp2 + 48], %l6 ;\ 1646 ldx [tmp2 + 56], %l7 ;\ 1647 ldx [tmp2 + 64], %i0 ;\ 1648 ldx [tmp2 + 72], %i1 ;\ 1649 ldx [tmp2 + 80], %i2 ;\ 1650 ldx [tmp2 + 88], %i3 ;\ 1651 ldx [tmp2 + 96], %i4 ;\ 1652 ldx [tmp2 + 104], %i5 ;\ 1653 ldx [tmp2 + 112], %i6 ;\ 1654 ldx [tmp2 + 120], %i7 1655 1656#define SAVE_WREGS(tmp1, tmp2) \ 1657 CPU_INDEX(tmp1, tmp2) ;\ 1658 sethi %hi(opl_ta3_save), tmp2 ;\ 1659 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\ 1660 sllx tmp1, 7, tmp1 ;\ 1661 add tmp2, tmp1, tmp2 ;\ 1662 stx %l0, [tmp2 + 0] ;\ 1663 stx %l1, [tmp2 + 8] ;\ 1664 stx %l2, [tmp2 + 16] ;\ 1665 stx %l3, [tmp2 + 24] ;\ 1666 stx %l4, [tmp2 + 32] ;\ 1667 stx %l5, [tmp2 + 40] ;\ 1668 stx %l6, [tmp2 + 48] ;\ 1669 stx %l7, [tmp2 + 56] ;\ 1670 stx %i0, [tmp2 + 64] ;\ 1671 stx %i1, [tmp2 + 72] ;\ 1672 stx %i2, [tmp2 + 80] ;\ 1673 stx %i3, [tmp2 + 88] ;\ 1674 stx %i4, [tmp2 + 96] ;\ 1675 stx %i5, [tmp2 + 104] ;\ 1676 stx %i6, [tmp2 + 112] ;\ 1677 stx %i7, [tmp2 + 120] 1678 1679 1680/* 1681 * The purpose of this function is to make sure that the restore 1682 * instruction after the flushw does not cause a fill trap. The sun4u 1683 * fill trap handler can not handle a tlb fault of an unmapped stack 1684 * except at the restore instruction at user_rtt. On OPL systems the 1685 * stack can get unmapped between the flushw and restore instructions 1686 * since multiple strands share the tlb. 1687 */ 1688 ENTRY_NP(opl_ta3_trap) 1689 set trap, %g1 1690 mov T_FLUSHW, %g3 1691 sub %g0, 1, %g4 1692 rdpr %cwp, %g5 1693 SAVE_WREGS(%g2, %g6) 1694 save 1695 flushw 1696 rdpr %cwp, %g6 1697 wrpr %g5, %cwp 1698 RESTORE_WREGS(%g2, %g5) 1699 wrpr %g6, %cwp 1700 restored 1701 restore 1702 1703 ba,a fast_trap_done 1704 SET_SIZE(opl_ta3_trap) 1705 1706 ENTRY_NP(opl_cleanw_subr) 1707 set trap, %g1 1708 mov T_FLUSHW, %g3 1709 sub %g0, 1, %g4 1710 rdpr %cwp, %g5 1711 SAVE_WREGS(%g2, %g6) 1712 save 1713 flushw 1714 rdpr %cwp, %g6 1715 wrpr %g5, %cwp 1716 RESTORE_WREGS(%g2, %g5) 1717 wrpr %g6, %cwp 1718 restored 1719 restore 1720 jmp %g7 1721 nop 1722 SET_SIZE(opl_cleanw_subr) 1723#endif /* lint */ 1724 1725#if defined(lint) 1726 1727void 1728opl_serr_instr(void) 1729{} 1730 1731#else /* lint */ 1732/* 1733 * The actual trap handler for tt=0x0a, and tt=0x32 1734 */ 1735 ENTRY_NP(opl_serr_instr) 1736 OPL_SAVE_GLOBAL(%g1,%g2,%g3) 1737 sethi %hi(opl_sync_trap), %g3 1738 jmp %g3 + %lo(opl_sync_trap) 1739 rdpr %tt, %g1 1740 .align 32 1741 SET_SIZE(opl_serr_instr) 1742 1743#endif /* lint */ 1744 1745#if defined(lint) 1746 1747void 1748opl_ugerr_instr(void) 1749{} 1750 1751#else /* lint */ 1752/* 1753 * The actual trap handler for tt=0x40 1754 */ 1755 ENTRY_NP(opl_ugerr_instr) 1756 sethi %hi(opl_uger_trap), %g3 1757 jmp %g3 + %lo(opl_uger_trap) 1758 nop 1759 .align 32 1760 SET_SIZE(opl_ugerr_instr) 1761 1762#endif /* lint */ 1763 1764#if defined(lint) 1765 1766void 1767opl_ta3_instr(void) 1768{} 1769 1770#else /* lint */ 1771/* 1772 * The actual trap handler for tt=0x103 (flushw) 1773 */ 1774 ENTRY_NP(opl_ta3_instr) 1775 sethi %hi(opl_ta3_trap), %g3 1776 jmp %g3 + %lo(opl_ta3_trap) 1777 nop 1778 .align 32 1779 SET_SIZE(opl_ta3_instr) 1780 1781#endif /* lint */ 1782 1783#if defined(lint) 1784 1785void 1786opl_ta4_instr(void) 1787{} 1788 1789#else /* lint */ 1790/* 1791 * The patch for the .clean_windows code 1792 */ 1793 ENTRY_NP(opl_ta4_instr) 1794 sethi %hi(opl_cleanw_subr), %g3 1795 add %g3, %lo(opl_cleanw_subr), %g3 1796 jmpl %g3, %g7 1797 add %g7, 8, %g7 1798 nop 1799 nop 1800 nop 1801 SET_SIZE(opl_ta4_instr) 1802 1803#endif /* lint */ 1804 1805#if defined(lint) 1806/* 1807 * Get timestamp (stick). 1808 */ 1809/* ARGSUSED */ 1810void 1811stick_timestamp(int64_t *ts) 1812{ 1813} 1814 1815#else /* lint */ 1816 1817 ENTRY_NP(stick_timestamp) 1818 rd STICK, %g1 ! read stick reg 1819 sllx %g1, 1, %g1 1820 srlx %g1, 1, %g1 ! clear npt bit 1821 1822 retl 1823 stx %g1, [%o0] ! store the timestamp 1824 SET_SIZE(stick_timestamp) 1825 1826#endif /* lint */ 1827 1828 1829#if defined(lint) 1830/* 1831 * Set STICK adjusted by skew. 1832 */ 1833/* ARGSUSED */ 1834void 1835stick_adj(int64_t skew) 1836{ 1837} 1838 1839#else /* lint */ 1840 1841 ENTRY_NP(stick_adj) 1842 rdpr %pstate, %g1 ! save processor state 1843 andn %g1, PSTATE_IE, %g3 1844 ba 1f ! cache align stick adj 1845 wrpr %g0, %g3, %pstate ! turn off interrupts 1846 1847 .align 16 18481: nop 1849 1850 rd STICK, %g4 ! read stick reg 1851 add %g4, %o0, %o1 ! adjust stick with skew 1852 wr %o1, %g0, STICK ! write stick reg 1853 1854 retl 1855 wrpr %g1, %pstate ! restore processor state 1856 SET_SIZE(stick_adj) 1857 1858#endif /* lint */ 1859 1860#if defined(lint) 1861/* 1862 * Debugger-specific stick retrieval 1863 */ 1864/*ARGSUSED*/ 1865int 1866kdi_get_stick(uint64_t *stickp) 1867{ 1868 return (0); 1869} 1870 1871#else /* lint */ 1872 1873 ENTRY_NP(kdi_get_stick) 1874 rd STICK, %g1 1875 stx %g1, [%o0] 1876 retl 1877 mov %g0, %o0 1878 SET_SIZE(kdi_get_stick) 1879 1880#endif /* lint */ 1881 1882#if defined(lint) 1883 1884/*ARGSUSED*/ 1885int 1886dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 1887{ return (0); } 1888 1889#else 1890 1891 ENTRY(dtrace_blksuword32) 1892 save %sp, -SA(MINFRAME + 4), %sp 1893 1894 rdpr %pstate, %l1 1895 andn %l1, PSTATE_IE, %l2 ! disable interrupts to 1896 wrpr %g0, %l2, %pstate ! protect our FPU diddling 1897 1898 rd %fprs, %l0 1899 andcc %l0, FPRS_FEF, %g0 1900 bz,a,pt %xcc, 1f ! if the fpu is disabled 1901 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu 1902 1903 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack 19041: 1905 set 0f, %l5 1906 /* 1907 * We're about to write a block full or either total garbage 1908 * (not kernel data, don't worry) or user floating-point data 1909 * (so it only _looks_ like garbage). 1910 */ 1911 ld [%i1], %f0 ! modify the block 1912 membar #Sync 1913 stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler 1914 stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block 1915 membar #Sync 1916 flush %i0 ! flush instruction pipeline 1917 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler 1918 1919 bz,a,pt %xcc, 1f 1920 wr %g0, %l0, %fprs ! restore %fprs 1921 1922 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0 19231: 1924 1925 wrpr %g0, %l1, %pstate ! restore interrupts 1926 1927 ret 1928 restore %g0, %g0, %o0 1929 19300: 1931 membar #Sync 1932 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler 1933 1934 bz,a,pt %xcc, 1f 1935 wr %g0, %l0, %fprs ! restore %fprs 1936 1937 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0 19381: 1939 1940 wrpr %g0, %l1, %pstate ! restore interrupts 1941 1942 /* 1943 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err() 1944 * which deals with watchpoints. Otherwise, just return -1. 1945 */ 1946 brnz,pt %i2, 1f 1947 nop 1948 ret 1949 restore %g0, -1, %o0 19501: 1951 call dtrace_blksuword32_err 1952 restore 1953 1954 SET_SIZE(dtrace_blksuword32) 1955#endif /* lint */ 1956 1957#if defined(lint) 1958/*ARGSUSED*/ 1959void 1960ras_cntr_reset(void *arg) 1961{ 1962} 1963#else 1964 ENTRY_NP(ras_cntr_reset) 1965 set OPL_SCRATCHPAD_ERRLOG, %o1 1966 ldxa [%o1]ASI_SCRATCHPAD, %o0 1967 or %o0, ERRLOG_REG_NUMERR_MASK, %o0 1968 retl 1969 stxa %o0, [%o1]ASI_SCRATCHPAD 1970 SET_SIZE(ras_cntr_reset) 1971#endif /* lint */ 1972 1973#if defined(lint) 1974/* ARGSUSED */ 1975void 1976opl_error_setup(uint64_t cpu_err_log_pa) 1977{ 1978} 1979 1980#else /* lint */ 1981 ENTRY_NP(opl_error_setup) 1982 /* 1983 * Initialize the error log scratchpad register 1984 */ 1985 ldxa [%g0]ASI_EIDR, %o2 1986 sethi %hi(ERRLOG_REG_EIDR_MASK), %o1 1987 or %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1 1988 and %o2, %o1, %o3 1989 sllx %o3, ERRLOG_REG_EIDR_SHIFT, %o2 1990 or %o2, %o0, %o3 1991 or %o3, ERRLOG_REG_NUMERR_MASK, %o0 1992 set OPL_SCRATCHPAD_ERRLOG, %o1 1993 stxa %o0, [%o1]ASI_SCRATCHPAD 1994 /* 1995 * Disable all restrainable error traps 1996 */ 1997 mov AFSR_ECR, %o1 1998 ldxa [%o1]ASI_AFSR, %o0 1999 andn %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0 2000 retl 2001 stxa %o0, [%o1]ASI_AFSR 2002 SET_SIZE(opl_error_setup) 2003#endif /* lint */ 2004 2005#if defined(lint) 2006/* ARGSUSED */ 2007void 2008opl_mpg_enable(void) 2009{ 2010} 2011#else /* lint */ 2012 ENTRY_NP(opl_mpg_enable) 2013 /* 2014 * Enable MMU translating multiple page sizes for 2015 * sITLB and sDTLB. 2016 */ 2017 mov LSU_MCNTL, %o0 2018 ldxa [%o0] ASI_MCNTL, %o1 2019 or %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1 2020 retl 2021 stxa %o1, [%o0] ASI_MCNTL 2022 SET_SIZE(opl_mpg_enable) 2023#endif /* lint */ 2024 2025#if defined(lint) 2026/* 2027 * This function is called for each (enabled) CPU. We use it to 2028 * initialize error handling related registers. 2029 */ 2030/*ARGSUSED*/ 2031void 2032cpu_feature_init(void) 2033{} 2034#else /* lint */ 2035 ENTRY(cpu_feature_init) 2036 ! 2037 ! get the device_id and store the device_id 2038 ! in the appropriate cpunodes structure 2039 ! given the cpus index 2040 ! 2041 CPU_INDEX(%o0, %o1) 2042 mulx %o0, CPU_NODE_SIZE, %o0 2043 set cpunodes + DEVICE_ID, %o1 2044 ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2 2045 stx %o2, [%o0 + %o1] 2046 ! 2047 ! initialize CPU registers 2048 ! 2049 ba opl_cpu_reg_init 2050 nop 2051 SET_SIZE(cpu_feature_init) 2052#endif /* lint */ 2053 2054#if defined(lint) 2055 2056void 2057cpu_cleartickpnt(void) 2058{} 2059 2060#else /* lint */ 2061 /* 2062 * Clear the NPT (non-privileged trap) bit in the %tick/%stick 2063 * registers. In an effort to make the change in the 2064 * tick/stick counter as consistent as possible, we disable 2065 * all interrupts while we're changing the registers. We also 2066 * ensure that the read and write instructions are in the same 2067 * line in the instruction cache. 2068 */ 2069 ENTRY_NP(cpu_clearticknpt) 2070 rdpr %pstate, %g1 /* save processor state */ 2071 andn %g1, PSTATE_IE, %g3 /* turn off */ 2072 wrpr %g0, %g3, %pstate /* interrupts */ 2073 rdpr %tick, %g2 /* get tick register */ 2074 brgez,pn %g2, 1f /* if NPT bit off, we're done */ 2075 mov 1, %g3 /* create mask */ 2076 sllx %g3, 63, %g3 /* for NPT bit */ 2077 ba,a,pt %xcc, 2f 2078 .align 8 /* Ensure rd/wr in same i$ line */ 20792: 2080 rdpr %tick, %g2 /* get tick register */ 2081 wrpr %g3, %g2, %tick /* write tick register, */ 2082 /* clearing NPT bit */ 20831: 2084 rd STICK, %g2 /* get stick register */ 2085 brgez,pn %g2, 3f /* if NPT bit off, we're done */ 2086 mov 1, %g3 /* create mask */ 2087 sllx %g3, 63, %g3 /* for NPT bit */ 2088 ba,a,pt %xcc, 4f 2089 .align 8 /* Ensure rd/wr in same i$ line */ 20904: 2091 rd STICK, %g2 /* get stick register */ 2092 wr %g3, %g2, STICK /* write stick register, */ 2093 /* clearing NPT bit */ 20943: 2095 jmp %g4 + 4 2096 wrpr %g0, %g1, %pstate /* restore processor state */ 2097 2098 SET_SIZE(cpu_clearticknpt) 2099 2100#endif /* lint */ 2101 2102#if defined(lint) 2103 2104void 2105cpu_halt_cpu(void) 2106{} 2107 2108void 2109cpu_smt_pause(void) 2110{} 2111 2112#else /* lint */ 2113 2114 /* 2115 * Halt the current strand with the suspend instruction. 2116 * The compiler/asm currently does not support this suspend 2117 * instruction mnemonic, use byte code for now. 2118 */ 2119 ENTRY_NP(cpu_halt_cpu) 2120 .word 0x81b01040 2121 retl 2122 nop 2123 SET_SIZE(cpu_halt_cpu) 2124 2125 /* 2126 * Pause the current strand with the sleep instruction. 2127 * The compiler/asm currently does not support this sleep 2128 * instruction mnemonic, use byte code for now. 2129 */ 2130 ENTRY_NP(cpu_smt_pause) 2131 .word 0x81b01060 2132 retl 2133 nop 2134 SET_SIZE(cpu_smt_pause) 2135 2136#endif /* lint */ 2137