1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Assembly code support for the Cheetah+ module 26 */ 27 28#pragma ident "%Z%%M% %I% %E% SMI" 29 30#if !defined(lint) 31#include "assym.h" 32#endif /* lint */ 33 34#include <sys/asm_linkage.h> 35#include <sys/mmu.h> 36#include <vm/hat_sfmmu.h> 37#include <sys/machparam.h> 38#include <sys/machcpuvar.h> 39#include <sys/machthread.h> 40#include <sys/machtrap.h> 41#include <sys/privregs.h> 42#include <sys/asm_linkage.h> 43#include <sys/trap.h> 44#include <sys/cheetahregs.h> 45#include <sys/us3_module.h> 46#include <sys/xc_impl.h> 47#include <sys/intreg.h> 48#include <sys/async.h> 49#include <sys/clock.h> 50#include <sys/cheetahasm.h> 51#include <sys/cmpregs.h> 52 53#ifdef TRAPTRACE 54#include <sys/traptrace.h> 55#endif /* TRAPTRACE */ 56 57 58#if !defined(lint) 59 60/* BEGIN CSTYLED */ 61 62/* 63 * Cheetah+ version to reflush an Ecache line by index. 64 * 65 * By default we assume the Ecache is 2-way so we flush both 66 * ways. Even if the cache is direct-mapped no harm will come 67 * from performing the flush twice, apart from perhaps a performance 68 * penalty. 69 * 70 * XXX - scr2 not used. 71 */ 72#define ECACHE_REFLUSH_LINE(ec_set_size, index, scr2) \ 73 ldxa [index]ASI_EC_DIAG, %g0; \ 74 ldxa [index + ec_set_size]ASI_EC_DIAG, %g0; 75 76/* 77 * Cheetah+ version of ecache_flush_line. Uses Cheetah+ Ecache Displacement 78 * Flush feature. 79 */ 80#define ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2) \ 81 sub ec_set_size, 1, scr1; \ 82 and physaddr, scr1, scr1; \ 83 set CHP_ECACHE_IDX_DISP_FLUSH, scr2; \ 84 or scr2, scr1, scr1; \ 85 ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2) 86 87/* END CSTYLED */ 88 89/* 90 * Panther version to reflush a line from both the L2 cache and L3 91 * cache by the respective indexes. Flushes all ways of the line from 92 * each cache. 93 * 94 * l2_index Index into the L2$ of the line to be flushed. This 95 * register will not be modified by this routine. 96 * l3_index Index into the L3$ of the line to be flushed. This 97 * register will not be modified by this routine. 98 * scr2 scratch register. 99 * scr3 scratch register. 100 * 101 */ 102#define PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3) \ 103 set PN_L2_MAX_SET, scr2; \ 104 set PN_L2_SET_SIZE, scr3; \ 1051: \ 106 ldxa [l2_index + scr2]ASI_L2_TAG, %g0; \ 107 cmp scr2, %g0; \ 108 bg,a 1b; \ 109 sub scr2, scr3, scr2; \ 110 mov 6, scr2; \ 1117: \ 112 cmp scr2, %g0; \ 113 bg,a 7b; \ 114 sub scr2, 1, scr2; \ 115 set PN_L3_MAX_SET, scr2; \ 116 set PN_L3_SET_SIZE, scr3; \ 1172: \ 118 ldxa [l3_index + scr2]ASI_EC_DIAG, %g0; \ 119 cmp scr2, %g0; \ 120 bg,a 2b; \ 121 sub scr2, scr3, scr2; 122 123/* 124 * Panther version of ecache_flush_line. Flushes the line corresponding 125 * to physaddr from both the L2 cache and the L3 cache. 126 * 127 * physaddr Input: Physical address to flush. 128 * Output: Physical address to flush (preserved). 129 * l2_idx_out Input: scratch register. 130 * Output: Index into the L2$ of the line to be flushed. 131 * l3_idx_out Input: scratch register. 132 * Output: Index into the L3$ of the line to be flushed. 133 * scr3 scratch register. 134 * scr4 scratch register. 135 * 136 */ 137#define PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4) \ 138 set PN_L3_SET_SIZE, l2_idx_out; \ 139 sub l2_idx_out, 1, l2_idx_out; \ 140 and physaddr, l2_idx_out, l3_idx_out; \ 141 set PN_L3_IDX_DISP_FLUSH, l2_idx_out; \ 142 or l2_idx_out, l3_idx_out, l3_idx_out; \ 143 set PN_L2_SET_SIZE, l2_idx_out; \ 144 sub l2_idx_out, 1, l2_idx_out; \ 145 and physaddr, l2_idx_out, l2_idx_out; \ 146 set PN_L2_IDX_DISP_FLUSH, scr3; \ 147 or l2_idx_out, scr3, l2_idx_out; \ 148 PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4) 149 150#endif /* !lint */ 151 152/* 153 * Fast ECC error at TL>0 handler 154 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter 155 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate. 156 * For a complete description of the Fast ECC at TL>0 handling see the 157 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in 158 * us3_common_asm.s 159 */ 160#if defined(lint) 161 162void 163fast_ecc_tl1_err(void) 164{} 165 166#else /* lint */ 167 168 .section ".text" 169 .align 64 170 ENTRY_NP(fast_ecc_tl1_err) 171 172 /* 173 * This macro turns off the D$/I$ if they are on and saves their 174 * original state in ch_err_tl1_tmp, saves all the %g registers in the 175 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves 176 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will 177 * point to the ch_err_tl1_data structure and the original D$/I$ state 178 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1 179 * will be available. 180 */ 181 CH_ERR_TL1_FECC_ENTER; 182 183 /* 184 * Get the diagnostic logout data. %g4 must be initialized to 185 * current CEEN state, %g5 must point to logout structure in 186 * ch_err_tl1_data_t. %g3 will contain the nesting count upon 187 * return. 188 */ 189 ldxa [%g0]ASI_ESTATE_ERR, %g4 190 and %g4, EN_REG_CEEN, %g4 191 add %g1, CH_ERR_TL1_LOGOUT, %g5 192 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4) 193 194 /* 195 * If the logout nesting count is exceeded, we're probably 196 * not making any progress, try to panic instead. 197 */ 198 cmp %g3, CLO_NESTING_MAX 199 bge fecc_tl1_err 200 nop 201 202 /* 203 * Save the current CEEN and NCEEN state in %g7 and turn them off 204 * before flushing the Ecache. 205 */ 206 ldxa [%g0]ASI_ESTATE_ERR, %g7 207 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5 208 stxa %g5, [%g0]ASI_ESTATE_ERR 209 membar #Sync 210 211 /* 212 * Flush the Ecache, using the largest possible cache size with the 213 * smallest possible line size since we can't get the actual sizes 214 * from the cpu_node due to DTLB misses. 215 */ 216 PN_L2_FLUSHALL(%g3, %g4, %g5) 217 218 set CH_ECACHE_MAX_SIZE, %g4 219 set CH_ECACHE_MIN_LSIZE, %g5 220 221 GET_CPU_IMPL(%g6) 222 cmp %g6, PANTHER_IMPL 223 bne %xcc, 2f 224 nop 225 set PN_L3_SIZE, %g4 2262: 227 mov %g6, %g3 228 CHP_ECACHE_FLUSHALL(%g4, %g5, %g3) 229 230 /* 231 * Restore CEEN and NCEEN to the previous state. 232 */ 233 stxa %g7, [%g0]ASI_ESTATE_ERR 234 membar #Sync 235 236 /* 237 * If we turned off the D$, then flush it and turn it back on. 238 */ 239 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 240 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0 241 bz %xcc, 3f 242 nop 243 244 /* 245 * Flush the D$. 246 */ 247 ASM_LD(%g4, dcache_size) 248 ASM_LD(%g5, dcache_linesize) 249 CH_DCACHE_FLUSHALL(%g4, %g5, %g6) 250 251 /* 252 * Turn the D$ back on. 253 */ 254 ldxa [%g0]ASI_DCU, %g3 255 or %g3, DCU_DC, %g3 256 stxa %g3, [%g0]ASI_DCU 257 membar #Sync 2583: 259 /* 260 * If we turned off the I$, then flush it and turn it back on. 261 */ 262 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 263 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0 264 bz %xcc, 4f 265 nop 266 267 /* 268 * Flush the I$. Panther has different I$ parameters, and we 269 * can't access the logout I$ params without possibly generating 270 * a MMU miss. 271 */ 272 GET_CPU_IMPL(%g6) 273 set PN_ICACHE_SIZE, %g3 274 set CH_ICACHE_SIZE, %g4 275 mov CH_ICACHE_LSIZE, %g5 276 cmp %g6, PANTHER_IMPL 277 movz %xcc, %g3, %g4 278 movz %xcc, PN_ICACHE_LSIZE, %g5 279 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3) 280 281 /* 282 * Turn the I$ back on. Changing DCU_IC requires flush. 283 */ 284 ldxa [%g0]ASI_DCU, %g3 285 or %g3, DCU_IC, %g3 286 stxa %g3, [%g0]ASI_DCU 287 flush %g0 2884: 289 290#ifdef TRAPTRACE 291 /* 292 * Get current trap trace entry physical pointer. 293 */ 294 CPU_INDEX(%g6, %g5) 295 sll %g6, TRAPTR_SIZE_SHIFT, %g6 296 set trap_trace_ctl, %g5 297 add %g6, %g5, %g6 298 ld [%g6 + TRAPTR_LIMIT], %g5 299 tst %g5 300 be %icc, skip_traptrace 301 nop 302 ldx [%g6 + TRAPTR_PBASE], %g5 303 ld [%g6 + TRAPTR_OFFSET], %g4 304 add %g5, %g4, %g5 305 306 /* 307 * Create trap trace entry. 308 */ 309 rd %asi, %g7 310 wr %g0, TRAPTR_ASI, %asi 311 rd STICK, %g4 312 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi 313 rdpr %tl, %g4 314 stha %g4, [%g5 + TRAP_ENT_TL]%asi 315 rdpr %tt, %g4 316 stha %g4, [%g5 + TRAP_ENT_TT]%asi 317 rdpr %tpc, %g4 318 stna %g4, [%g5 + TRAP_ENT_TPC]%asi 319 rdpr %tstate, %g4 320 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi 321 stna %sp, [%g5 + TRAP_ENT_SP]%asi 322 stna %g0, [%g5 + TRAP_ENT_TR]%asi 323 wr %g0, %g7, %asi 324 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3 325 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4 326 wr %g0, TRAPTR_ASI, %asi 327 stna %g3, [%g5 + TRAP_ENT_F1]%asi 328 stna %g4, [%g5 + TRAP_ENT_F2]%asi 329 wr %g0, %g7, %asi 330 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3 331 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 332 wr %g0, TRAPTR_ASI, %asi 333 stna %g3, [%g5 + TRAP_ENT_F3]%asi 334 stna %g4, [%g5 + TRAP_ENT_F4]%asi 335 wr %g0, %g7, %asi 336 337 /* 338 * Advance trap trace pointer. 339 */ 340 ld [%g6 + TRAPTR_OFFSET], %g5 341 ld [%g6 + TRAPTR_LIMIT], %g4 342 st %g5, [%g6 + TRAPTR_LAST_OFFSET] 343 add %g5, TRAP_ENT_SIZE, %g5 344 sub %g4, TRAP_ENT_SIZE, %g4 345 cmp %g5, %g4 346 movge %icc, 0, %g5 347 st %g5, [%g6 + TRAPTR_OFFSET] 348skip_traptrace: 349#endif /* TRAPTRACE */ 350 351 /* 352 * If nesting count is not zero, skip all the AFSR/AFAR 353 * handling and just do the necessary cache-flushing. 354 */ 355 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2 356 brnz %g2, 6f 357 nop 358 359 /* 360 * If a UCU or L3_UCU followed by a WDU has occurred go ahead 361 * and panic since a UE will occur (on the retry) before the 362 * UCU and WDU messages are enqueued. 363 */ 364 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3 365 set 1, %g4 366 sllx %g4, C_AFSR_UCU_SHIFT, %g4 367 btst %g4, %g3 ! UCU in original shadow AFSR? 368 bnz %xcc, 5f 369 mov 1, %g4 370 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3 371 sllx %g4, C_AFSR_L3_UCU_SHIFT, %g4 372 btst %g4, %g3 ! L3_UCU in original shadow AFSR_EXT? 373 bz %xcc, 6f 374 nop 3755: 376 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 ! original AFSR 377 ldxa [%g0]ASI_AFSR, %g3 ! current AFSR 378 or %g3, %g4, %g3 ! %g3 = original + current AFSR 379 set 1, %g4 380 sllx %g4, C_AFSR_WDU_SHIFT, %g4 381 btst %g4, %g3 ! WDU in original or current AFSR? 382 bnz %xcc, fecc_tl1_err 383 nop 384 3856: 386 /* 387 * We fall into this macro if we've successfully logged the error in 388 * the ch_err_tl1_data structure and want the PIL15 softint to pick 389 * it up and log it. %g1 must point to the ch_err_tl1_data structure. 390 * Restores the %g registers and issues retry. 391 */ 392 CH_ERR_TL1_EXIT; 393 394 /* 395 * Establish panic exit label. 396 */ 397 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err); 398 399 SET_SIZE(fast_ecc_tl1_err) 400 401#endif /* lint */ 402 403 404#if defined(lint) 405/* 406 * scrubphys - Pass in the aligned physical memory address 407 * that you want to scrub, along with the ecache set size. 408 * 409 * 1) Displacement flush the E$ line corresponding to %addr. 410 * The first ldxa guarantees that the %addr is no longer in 411 * M, O, or E (goes to I or S (if instruction fetch also happens). 412 * 2) "Write" the data using a CAS %addr,%g0,%g0. 413 * The casxa guarantees a transition from I to M or S to M. 414 * 3) Displacement flush the E$ line corresponding to %addr. 415 * The second ldxa pushes the M line out of the ecache, into the 416 * writeback buffers, on the way to memory. 417 * 4) The "membar #Sync" pushes the cache line out of the writeback 418 * buffers onto the bus, on the way to dram finally. 419 * 420 * This is a modified version of the algorithm suggested by Gary Lauterbach. 421 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line 422 * as modified, but then we found out that for spitfire, if it misses in the 423 * E$ it will probably install as an M, but if it hits in the E$, then it 424 * will stay E, if the store doesn't happen. So the first displacement flush 425 * should ensure that the CAS will miss in the E$. Arrgh. 426 */ 427/* ARGSUSED */ 428void 429scrubphys(uint64_t paddr, int ecache_set_size) 430{} 431 432#else /* lint */ 433 ENTRY(scrubphys) 434 rdpr %pstate, %o4 435 andn %o4, PSTATE_IE | PSTATE_AM, %o5 436 wrpr %o5, %g0, %pstate ! clear IE, AM bits 437 438 GET_CPU_IMPL(%o5) ! Panther Ecache is flushed differently 439 cmp %o5, PANTHER_IMPL 440 bne scrubphys_1 441 nop 442 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5) 443 casxa [%o0]ASI_MEM, %g0, %g0 444 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0) 445 b scrubphys_2 446 nop 447scrubphys_1: 448 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 449 casxa [%o0]ASI_MEM, %g0, %g0 450 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 451scrubphys_2: 452 wrpr %g0, %o4, %pstate ! restore earlier pstate register value 453 454 retl 455 membar #Sync ! move the data out of the load buffer 456 SET_SIZE(scrubphys) 457 458#endif /* lint */ 459 460 461#if defined(lint) 462/* 463 * clearphys - Pass in the physical memory address of the checkblock 464 * that you want to push out, cleared with a recognizable pattern, 465 * from the ecache. 466 * 467 * To ensure that the ecc gets recalculated after the bad data is cleared, 468 * we must write out enough data to fill the w$ line (64 bytes). So we read 469 * in an entire ecache subblock's worth of data, and write it back out. 470 * Then we overwrite the 16 bytes of bad data with the pattern. 471 */ 472/* ARGSUSED */ 473void 474clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize) 475{ 476} 477 478#else /* lint */ 479 ENTRY(clearphys) 480 /* turn off IE, AM bits */ 481 rdpr %pstate, %o4 482 andn %o4, PSTATE_IE | PSTATE_AM, %o5 483 wrpr %o5, %g0, %pstate 484 485 /* turn off NCEEN */ 486 ldxa [%g0]ASI_ESTATE_ERR, %o5 487 andn %o5, EN_REG_NCEEN, %o3 488 stxa %o3, [%g0]ASI_ESTATE_ERR 489 membar #Sync 490 491 /* align address passed with 64 bytes subblock size */ 492 mov CH_ECACHE_SUBBLK_SIZE, %o2 493 andn %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1 494 495 /* move the good data into the W$ */ 496clearphys_1: 497 subcc %o2, 8, %o2 498 ldxa [%g1 + %o2]ASI_MEM, %g2 499 bge clearphys_1 500 stxa %g2, [%g1 + %o2]ASI_MEM 501 502 /* now overwrite the bad data */ 503 setx 0xbadecc00badecc01, %g1, %g2 504 stxa %g2, [%o0]ASI_MEM 505 mov 8, %g1 506 stxa %g2, [%o0 + %g1]ASI_MEM 507 508 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently 509 cmp %o3, PANTHER_IMPL 510 bne clearphys_2 511 nop 512 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1) 513 casxa [%o0]ASI_MEM, %g0, %g0 514 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0) 515 b clearphys_3 516 nop 517clearphys_2: 518 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 519 casxa [%o0]ASI_MEM, %g0, %g0 520 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 521clearphys_3: 522 /* clear the AFSR */ 523 ldxa [%g0]ASI_AFSR, %o1 524 stxa %o1, [%g0]ASI_AFSR 525 membar #Sync 526 527 /* turn NCEEN back on */ 528 stxa %o5, [%g0]ASI_ESTATE_ERR 529 membar #Sync 530 531 /* return and re-enable IE and AM */ 532 retl 533 wrpr %g0, %o4, %pstate 534 SET_SIZE(clearphys) 535 536#endif /* lint */ 537 538 539#if defined(lint) 540/* 541 * Cheetah+ Ecache displacement flush the specified line from the E$ 542 * 543 * For Panther, this means flushing the specified line from both the 544 * L2 cache and L3 cache. 545 * 546 * Register usage: 547 * %o0 - 64 bit physical address for flushing 548 * %o1 - Ecache set size 549 */ 550/*ARGSUSED*/ 551void 552ecache_flush_line(uint64_t flushaddr, int ec_set_size) 553{ 554} 555#else /* lint */ 556 ENTRY(ecache_flush_line) 557 558 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently 559 cmp %o3, PANTHER_IMPL 560 bne ecache_flush_line_1 561 nop 562 563 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4) 564 b ecache_flush_line_2 565 nop 566ecache_flush_line_1: 567 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 568ecache_flush_line_2: 569 retl 570 nop 571 SET_SIZE(ecache_flush_line) 572#endif /* lint */ 573 574#if defined(lint) 575void 576set_afsr_ext(uint64_t afsr_ext) 577{ 578 afsr_ext = afsr_ext; 579} 580#else /* lint */ 581 582 ENTRY(set_afsr_ext) 583 set ASI_AFSR_EXT_VA, %o1 584 stxa %o0, [%o1]ASI_AFSR ! afsr_ext reg 585 membar #Sync 586 retl 587 nop 588 SET_SIZE(set_afsr_ext) 589 590#endif /* lint */ 591 592 593#if defined(lint) 594/* 595 * The CPU jumps here from the MMU exception handler if an ITLB parity 596 * error is detected and we are running on Panther. 597 * 598 * In this routine we collect diagnostic information and write it to our 599 * logout structure (if possible) and clear all ITLB entries that may have 600 * caused our parity trap. 601 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0 602 * and log any error messages. As for parameters to cpu_tlb_parity_error, we 603 * send two: 604 * 605 * %g2 - Contains the VA whose lookup in the ITLB caused the parity error 606 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct, 607 * regardless of whether or not we actually used the logout struct. 608 * 609 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two 610 * parameters to the data contained in the logout structure in order to 611 * determine whether the logout information is valid for this particular 612 * error or not. 613 */ 614void 615itlb_parity_trap(void) 616{} 617 618#else /* lint */ 619 620 ENTRY_NP(itlb_parity_trap) 621 /* 622 * Collect important information about the trap which will be 623 * used as a parameter to the TL0 handler. 624 */ 625 wr %g0, ASI_IMMU, %asi 626 rdpr %tpc, %g2 ! VA that caused the IMMU trap 627 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page size 628 set PN_ITLB_PGSZ_MASK, %g4 629 and %g3, %g4, %g3 630 ldxa [MMU_TAG_ACCESS]%asi, %g4 631 set TAGREAD_CTX_MASK, %g5 632 and %g4, %g5, %g4 633 or %g4, %g3, %g3 ! 'or' in the trap context and 634 mov 1, %g4 ! add the IMMU flag to complete 635 sllx %g4, PN_TLO_INFO_IMMU_SHIFT, %g4 636 or %g4, %g3, %g3 ! the tlo_info field for logout 637 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR 638 membar #Sync 639 640 /* 641 * at this point: 642 * %g2 - contains the VA whose lookup caused the trap 643 * %g3 - contains the tlo_info field 644 * 645 * Next, we calculate the TLB index value for the failing VA. 646 */ 647 mov %g2, %g4 ! We need the ITLB index 648 set PN_ITLB_PGSZ_MASK, %g5 649 and %g3, %g5, %g5 650 srlx %g5, PN_ITLB_PGSZ_SHIFT, %g5 651 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the index 652 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place 653 set PN_ITLB_T512, %g5 654 or %g4, %g5, %g4 ! and add in the TLB ID 655 656 /* 657 * at this point: 658 * %g2 - contains the VA whose lookup caused the trap 659 * %g3 - contains the tlo_info field 660 * %g4 - contains the TLB access index value for the 661 * VA/PgSz in question 662 * 663 * Check to see if the logout structure is available. 664 */ 665 set CHPR_TLB_LOGOUT, %g6 666 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1) 667 set LOGOUT_INVALID_U32, %g6 668 sllx %g6, 32, %g6 ! if our logout structure is 669 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is 670 or %g5, %g6, %g5 ! already being used, then we 671 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic 672 cmp %g6, %g5 ! information before clearing 673 bne itlb_parity_trap_1 ! and logging the error. 674 nop 675 676 /* 677 * Record the logout information. %g4 contains our index + TLB ID 678 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains 679 * the pointer to our logout struct. 680 */ 681 stx %g3, [%g1 + PN_TLO_INFO] 682 stx %g2, [%g1 + PN_TLO_ADDR] 683 stx %g2, [%g1 + PN_TLO_PC] ! %tpc == fault addr for IMMU 684 685 add %g1, PN_TLO_ITLB_TTE, %g1 ! move up the pointer 686 687 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data 688 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away 689 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag 690 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away 691 692 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1 693 or %g4, %g6, %g4 694 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer 695 696 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data 697 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away 698 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag 699 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away 700 701 andn %g4, %g6, %g4 ! back to way 0 702 703itlb_parity_trap_1: 704 /* 705 * at this point: 706 * %g2 - contains the VA whose lookup caused the trap 707 * %g3 - contains the tlo_info field 708 * %g4 - contains the TLB access index value for the 709 * VA/PgSz in question 710 * 711 * Here we will clear the errors from the TLB. 712 */ 713 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of 714 stxa %g0, [%g5]ASI_IMMU ! 0 as it will be invalid. 715 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write the data and tag 716 membar #Sync 717 718 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1 719 or %g4, %g6, %g4 720 721 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write same data and tag 722 membar #Sync 723 724 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a 725 flush %g6 ! flush after writing MMU regs 726 727 /* 728 * at this point: 729 * %g2 - contains the VA whose lookup caused the trap 730 * %g3 - contains the tlo_info field 731 * 732 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're 733 * already at PIL 15. */ 734 set cpu_tlb_parity_error, %g1 735 rdpr %pil, %g4 736 cmp %g4, PIL_14 737 movl %icc, PIL_14, %g4 738 ba sys_trap 739 nop 740 SET_SIZE(itlb_parity_trap) 741 742#endif /* lint */ 743 744#if defined(lint) 745/* 746 * The CPU jumps here from the MMU exception handler if a DTLB parity 747 * error is detected and we are running on Panther. 748 * 749 * In this routine we collect diagnostic information and write it to our 750 * logout structure (if possible) and clear all DTLB entries that may have 751 * caused our parity trap. 752 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0 753 * and log any error messages. As for parameters to cpu_tlb_parity_error, we 754 * send two: 755 * 756 * %g2 - Contains the VA whose lookup in the DTLB caused the parity error 757 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct, 758 * regardless of whether or not we actually used the logout struct. 759 * 760 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two 761 * parameters to the data contained in the logout structure in order to 762 * determine whether the logout information is valid for this particular 763 * error or not. 764 */ 765void 766dtlb_parity_trap(void) 767{} 768 769#else /* lint */ 770 771 ENTRY_NP(dtlb_parity_trap) 772 /* 773 * Collect important information about the trap which will be 774 * used as a parameter to the TL0 handler. 775 */ 776 wr %g0, ASI_DMMU, %asi 777 ldxa [MMU_SFAR]%asi, %g2 ! VA that caused the IMMU trap 778 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page sizes 779 set PN_DTLB_PGSZ_MASK, %g4 780 and %g3, %g4, %g3 781 ldxa [MMU_TAG_ACCESS]%asi, %g4 782 set TAGREAD_CTX_MASK, %g5 ! 'or' in the trap context 783 and %g4, %g5, %g4 ! to complete the tlo_info 784 or %g4, %g3, %g3 ! field for logout 785 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR 786 membar #Sync 787 788 /* 789 * at this point: 790 * %g2 - contains the VA whose lookup caused the trap 791 * %g3 - contains the tlo_info field 792 * 793 * Calculate the TLB index values for the failing VA. Since the T512 794 * TLBs can be configured for different page sizes, we need to find 795 * the index into each one separately. 796 */ 797 mov %g2, %g4 ! First we get the DTLB_0 index 798 set PN_DTLB_PGSZ0_MASK, %g5 799 and %g3, %g5, %g5 800 srlx %g5, PN_DTLB_PGSZ0_SHIFT, %g5 801 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the DTLB_0 index 802 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place 803 set PN_DTLB_T512_0, %g5 804 or %g4, %g5, %g4 ! and add in the TLB ID 805 806 mov %g2, %g7 ! Next we get the DTLB_1 index 807 set PN_DTLB_PGSZ1_MASK, %g5 808 and %g3, %g5, %g5 809 srlx %g5, PN_DTLB_PGSZ1_SHIFT, %g5 810 PN_GET_TLB_INDEX(%g7, %g5) ! %g7 has the DTLB_1 index 811 sllx %g7, PN_TLB_ACC_IDX_SHIFT, %g7 ! shift the index into place 812 set PN_DTLB_T512_1, %g5 813 or %g7, %g5, %g7 ! and add in the TLB ID 814 815 /* 816 * at this point: 817 * %g2 - contains the VA whose lookup caused the trap 818 * %g3 - contains the tlo_info field 819 * %g4 - contains the T512_0 access index value for the 820 * VA/PgSz in question 821 * %g7 - contains the T512_1 access index value for the 822 * VA/PgSz in question 823 * 824 * If this trap happened at TL>0, then we don't want to mess 825 * with the normal logout struct since that could caused a TLB 826 * miss. 827 */ 828 rdpr %tl, %g6 ! read current trap level 829 cmp %g6, 1 ! skip over the tl>1 code 830 ble dtlb_parity_trap_1 ! if TL <= 1. 831 nop 832 833 /* 834 * If we are here, then the trap happened at TL>1. Simply 835 * update our tlo_info field and then skip to the TLB flush 836 * code. 837 */ 838 mov 1, %g6 839 sllx %g6, PN_TLO_INFO_TL1_SHIFT, %g6 840 or %g6, %g3, %g3 841 ba dtlb_parity_trap_2 842 nop 843 844dtlb_parity_trap_1: 845 /* 846 * at this point: 847 * %g2 - contains the VA whose lookup caused the trap 848 * %g3 - contains the tlo_info field 849 * %g4 - contains the T512_0 access index value for the 850 * VA/PgSz in question 851 * %g7 - contains the T512_1 access index value for the 852 * VA/PgSz in question 853 * 854 * Check to see if the logout structure is available. 855 */ 856 set CHPR_TLB_LOGOUT, %g6 857 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2) 858 set LOGOUT_INVALID_U32, %g6 859 sllx %g6, 32, %g6 ! if our logout structure is 860 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is 861 or %g5, %g6, %g5 ! already being used, then we 862 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic 863 cmp %g6, %g5 ! information before clearing 864 bne dtlb_parity_trap_2 ! and logging the error. 865 nop 866 867 /* 868 * Record the logout information. %g4 contains our DTLB_0 869 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID 870 * both of which will be used for ASI_DTLB_ACCESS and 871 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout 872 * struct. 873 */ 874 stx %g3, [%g1 + PN_TLO_INFO] 875 stx %g2, [%g1 + PN_TLO_ADDR] 876 rdpr %tpc, %g5 877 stx %g5, [%g1 + PN_TLO_PC] 878 879 add %g1, PN_TLO_DTLB_TTE, %g1 ! move up the pointer 880 881 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0 882 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 0 and store it away 883 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0 884 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 0 and store it away 885 886 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 0 887 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))] 888 ldxa [%g7]ASI_DTLB_TAGREAD, %g5 889 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))] 890 891 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1 892 or %g4, %g6, %g4 ! of each TLB. 893 or %g7, %g6, %g7 894 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer 895 896 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0 897 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 1 and store it away 898 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0 899 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 1 and store it away 900 901 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 1 902 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))] 903 ldxa [%g7]ASI_DTLB_TAGREAD, %g5 904 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))] 905 906 andn %g4, %g6, %g4 ! back to way 0 907 andn %g7, %g6, %g7 ! back to way 0 908 909dtlb_parity_trap_2: 910 /* 911 * at this point: 912 * %g2 - contains the VA whose lookup caused the trap 913 * %g3 - contains the tlo_info field 914 * %g4 - contains the T512_0 access index value for the 915 * VA/PgSz in question 916 * %g7 - contains the T512_1 access index value for the 917 * VA/PgSz in question 918 * 919 * Here we will clear the errors from the DTLB. 920 */ 921 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of 922 stxa %g0, [%g5]ASI_DMMU ! 0 as it will be invalid. 923 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write the data and tag. 924 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0 925 membar #Sync 926 927 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1 928 or %g4, %g6, %g4 929 or %g7, %g6, %g7 930 931 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write same data and tag. 932 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0 933 membar #Sync 934 935 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a 936 flush %g6 ! flush after writing MMU regs 937 938 /* 939 * at this point: 940 * %g2 - contains the VA whose lookup caused the trap 941 * %g3 - contains the tlo_info field 942 * 943 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're 944 * already at PIL 15. We do this even for TL>1 traps since 945 * those will lead to a system panic. 946 */ 947 set cpu_tlb_parity_error, %g1 948 rdpr %pil, %g4 949 cmp %g4, PIL_14 950 movl %icc, PIL_14, %g4 951 ba sys_trap 952 nop 953 SET_SIZE(dtlb_parity_trap) 954 955#endif /* lint */ 956 957 958#if defined(lint) 959/* 960 * Calculates the Panther TLB index based on a virtual address and page size 961 * 962 * Register usage: 963 * %o0 - virtual address whose index we want 964 * %o1 - Page Size of the TLB in question as encoded in the 965 * ASI_[D|I]MMU_TAG_ACCESS_EXT register. 966 */ 967uint64_t 968pn_get_tlb_index(uint64_t va, uint64_t pg_sz) 969{ 970 return ((va + pg_sz)-(va + pg_sz)); 971} 972#else /* lint */ 973 ENTRY(pn_get_tlb_index) 974 975 PN_GET_TLB_INDEX(%o0, %o1) 976 977 retl 978 nop 979 SET_SIZE(pn_get_tlb_index) 980#endif /* lint */ 981 982 983#if defined(lint) 984/* 985 * For Panther CPUs we need to flush the IPB after any I$ or D$ 986 * parity errors are detected. 987 */ 988void 989flush_ipb(void) 990{ return; } 991 992#else /* lint */ 993 994 ENTRY(flush_ipb) 995 clr %o0 996 997flush_ipb_1: 998 stxa %g0, [%o0]ASI_IPB_TAG 999 membar #Sync 1000 cmp %o0, PN_IPB_TAG_ADDR_MAX 1001 blt flush_ipb_1 1002 add %o0, PN_IPB_TAG_ADDR_LINESIZE, %o0 1003 1004 sethi %hi(FLUSH_ADDR), %o0 1005 flush %o0 1006 retl 1007 nop 1008 SET_SIZE(flush_ipb) 1009 1010#endif /* lint */ 1011 1012 1013