1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Assembly code support for the Cheetah module 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31#if !defined(lint) 32#include "assym.h" 33#endif /* lint */ 34 35#include <sys/asm_linkage.h> 36#include <sys/mmu.h> 37#include <vm/hat_sfmmu.h> 38#include <sys/machparam.h> 39#include <sys/machcpuvar.h> 40#include <sys/machthread.h> 41#include <sys/machtrap.h> 42#include <sys/privregs.h> 43#include <sys/asm_linkage.h> 44#include <sys/trap.h> 45#include <sys/cheetahregs.h> 46#include <sys/us3_module.h> 47#include <sys/xc_impl.h> 48#include <sys/intreg.h> 49#include <sys/async.h> 50#include <sys/clock.h> 51#include <sys/cheetahasm.h> 52 53#ifdef TRAPTRACE 54#include <sys/traptrace.h> 55#endif /* TRAPTRACE */ 56 57#if !defined(lint) 58 59/* BEGIN CSTYLED */ 60 61/* 62 * Cheetah version to flush an Ecache line by index (aliased address) 63 */ 64#define ECACHE_REFLUSH_LINE(ecache_size, alias_address, scr2) \ 65 ldxa [alias_address]ASI_MEM, %g0 66 67#define ECACHE_FLUSH_LINE(physaddr, ecache_size, scr1, scr2) \ 68 xor physaddr, ecache_size, scr1; \ 69 add ecache_size, ecache_size, scr2; \ 70 sub scr2, 1, scr2; \ 71 and scr1, scr2, scr1; \ 72 ASM_LDX(scr2, ecache_flushaddr); \ 73 add scr1, scr2, scr1; \ 74 ECACHE_REFLUSH_LINE(ecache_size, scr1, scr2) 75 76/* END CSTYLED */ 77 78#endif /* !lint */ 79 80 81/* 82 * Fast ECC error at TL>0 handler 83 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter 84 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate. 85 * For a complete description of the Fast ECC at TL>0 handling see the 86 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in 87 * us3_common_asm.s 88 */ 89#if defined(lint) 90 91void 92fast_ecc_tl1_err(void) 93{} 94 95#else /* lint */ 96 97 .section ".text" 98 .align 64 99 ENTRY_NP(fast_ecc_tl1_err) 100 101 /* 102 * This macro turns off the D$/I$ if they are on and saves their 103 * original state in ch_err_tl1_tmp, saves all the %g registers in the 104 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves 105 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will 106 * point to the ch_err_tl1_data structure and the original D$/I$ state 107 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1 108 * will be available. 109 */ 110 CH_ERR_TL1_FECC_ENTER; 111 112 /* 113 * Get the diagnostic logout data. %g4 must be initialized to 114 * current CEEN state, %g5 must point to logout structure in 115 * ch_err_tl1_data_t. %g3 will contain the nesting count upon 116 * return. 117 */ 118 ldxa [%g0]ASI_ESTATE_ERR, %g4 119 and %g4, EN_REG_CEEN, %g4 120 add %g1, CH_ERR_TL1_LOGOUT, %g5 121 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4) 122 123 /* 124 * If the logout nesting count is exceeded, we're probably 125 * not making any progress, try to panic instead. 126 */ 127 cmp %g3, CLO_NESTING_MAX 128 bge fecc_tl1_err 129 nop 130 131 /* 132 * Save the current CEEN and NCEEN state in %g7 and turn them off 133 * before flushing the Ecache. 134 */ 135 ldxa [%g0]ASI_ESTATE_ERR, %g7 136 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5 137 stxa %g5, [%g0]ASI_ESTATE_ERR 138 membar #Sync 139 140 /* 141 * Flush the Ecache, using the largest possible cache size with the 142 * smallest possible line size since we can't get the actual sizes 143 * from the cpu_node due to DTLB misses. 144 */ 145 set CH_ECACHE_8M_SIZE, %g4 146 set CH_ECACHE_MIN_LSIZE, %g5 147 148 /* 149 * Use a different flush address to avoid recursion if the error 150 * exists in ecache_flushaddr. 151 */ 152 ASM_LDX(%g6, ecache_tl1_flushaddr) 153 cmp %g6, -1 ! check if address is valid 154 be %xcc, fecc_tl1_err 155 nop 156 CH_ECACHE_FLUSHALL(%g4, %g5, %g6) 157 158 /* 159 * Restore CEEN and NCEEN to the previous state. 160 */ 161 stxa %g7, [%g0]ASI_ESTATE_ERR 162 membar #Sync 163 164 /* 165 * If we turned off the D$, then flush it and turn it back on. 166 */ 167 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 168 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0 169 bz %xcc, 3f 170 nop 171 172 /* 173 * Flush the D$. 174 */ 175 ASM_LD(%g4, dcache_size) 176 ASM_LD(%g5, dcache_linesize) 177 CH_DCACHE_FLUSHALL(%g4, %g5, %g6) 178 179 /* 180 * Turn the D$ back on. 181 */ 182 ldxa [%g0]ASI_DCU, %g3 183 or %g3, DCU_DC, %g3 184 stxa %g3, [%g0]ASI_DCU 185 membar #Sync 1863: 187 /* 188 * If we turned off the I$, then flush it and turn it back on. 189 */ 190 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 191 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0 192 bz %xcc, 4f 193 nop 194 195 /* 196 * Flush the I$. 197 */ 198 ASM_LD(%g4, icache_size) 199 ASM_LD(%g5, icache_linesize) 200 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3) 201 202 /* 203 * Turn the I$ back on. Changing DCU_IC requires flush. 204 */ 205 ldxa [%g0]ASI_DCU, %g3 206 or %g3, DCU_IC, %g3 207 stxa %g3, [%g0]ASI_DCU 208 flush %g0 2094: 210 211#ifdef TRAPTRACE 212 /* 213 * Get current trap trace entry physical pointer. 214 */ 215 CPU_INDEX(%g6, %g5) 216 sll %g6, TRAPTR_SIZE_SHIFT, %g6 217 set trap_trace_ctl, %g5 218 add %g6, %g5, %g6 219 ld [%g6 + TRAPTR_LIMIT], %g5 220 tst %g5 221 be %icc, skip_traptrace 222 nop 223 ldx [%g6 + TRAPTR_PBASE], %g5 224 ld [%g6 + TRAPTR_OFFSET], %g4 225 add %g5, %g4, %g5 226 227 /* 228 * Create trap trace entry. 229 */ 230 rd %asi, %g7 231 wr %g0, TRAPTR_ASI, %asi 232 rd STICK, %g4 233 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi 234 rdpr %tl, %g4 235 stha %g4, [%g5 + TRAP_ENT_TL]%asi 236 rdpr %tt, %g4 237 stha %g4, [%g5 + TRAP_ENT_TT]%asi 238 rdpr %tpc, %g4 239 stna %g4, [%g5 + TRAP_ENT_TPC]%asi 240 rdpr %tstate, %g4 241 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi 242 stna %sp, [%g5 + TRAP_ENT_SP]%asi 243 stna %g0, [%g5 + TRAP_ENT_TR]%asi 244 wr %g0, %g7, %asi 245 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3 246 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4 247 wr %g0, TRAPTR_ASI, %asi 248 stna %g3, [%g5 + TRAP_ENT_F1]%asi 249 stna %g4, [%g5 + TRAP_ENT_F2]%asi 250 wr %g0, %g7, %asi 251 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3 252 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 253 wr %g0, TRAPTR_ASI, %asi 254 stna %g3, [%g5 + TRAP_ENT_F3]%asi 255 stna %g4, [%g5 + TRAP_ENT_F4]%asi 256 wr %g0, %g7, %asi 257 258 /* 259 * Advance trap trace pointer. 260 */ 261 ld [%g6 + TRAPTR_OFFSET], %g5 262 ld [%g6 + TRAPTR_LIMIT], %g4 263 st %g5, [%g6 + TRAPTR_LAST_OFFSET] 264 add %g5, TRAP_ENT_SIZE, %g5 265 sub %g4, TRAP_ENT_SIZE, %g4 266 cmp %g5, %g4 267 movge %icc, 0, %g5 268 st %g5, [%g6 + TRAPTR_OFFSET] 269skip_traptrace: 270#endif /* TRAPTRACE */ 271 272 /* 273 * If nesting count is not zero, skip all the AFSR/AFAR 274 * handling and just do the necessary cache-flushing. 275 */ 276 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2 277 brnz %g2, 6f 278 nop 279 280 /* 281 * If a UCU followed by a WDU has occurred go ahead and panic 282 * since a UE will occur (on the retry) before the UCU and WDU 283 * messages are enqueued. 284 */ 285 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g3 286 set 1, %g4 287 sllx %g4, C_AFSR_UCU_SHIFT, %g4 288 btst %g4, %g3 ! UCU in original AFSR? 289 bz %xcc, 6f 290 nop 291 ldxa [%g0]ASI_AFSR, %g4 ! current AFSR 292 or %g3, %g4, %g3 ! %g3 = original + current AFSR 293 set 1, %g4 294 sllx %g4, C_AFSR_WDU_SHIFT, %g4 295 btst %g4, %g3 ! WDU in original or current AFSR? 296 bnz %xcc, fecc_tl1_err 297 nop 298 2996: 300 /* 301 * We fall into this macro if we've successfully logged the error in 302 * the ch_err_tl1_data structure and want the PIL15 softint to pick 303 * it up and log it. %g1 must point to the ch_err_tl1_data structure. 304 * Restores the %g registers and issues retry. 305 */ 306 CH_ERR_TL1_EXIT; 307 308 /* 309 * Establish panic exit label. 310 */ 311 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err); 312 313 SET_SIZE(fast_ecc_tl1_err) 314 315#endif /* lint */ 316 317 318#if defined(lint) 319/* 320 * scrubphys - Pass in the aligned physical memory address 321 * that you want to scrub, along with the ecache set size. 322 * 323 * 1) Displacement flush the E$ line corresponding to %addr. 324 * The first ldxa guarantees that the %addr is no longer in 325 * M, O, or E (goes to I or S (if instruction fetch also happens). 326 * 2) "Write" the data using a CAS %addr,%g0,%g0. 327 * The casxa guarantees a transition from I to M or S to M. 328 * 3) Displacement flush the E$ line corresponding to %addr. 329 * The second ldxa pushes the M line out of the ecache, into the 330 * writeback buffers, on the way to memory. 331 * 4) The "membar #Sync" pushes the cache line out of the writeback 332 * buffers onto the bus, on the way to dram finally. 333 * 334 * This is a modified version of the algorithm suggested by Gary Lauterbach. 335 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line 336 * as modified, but then we found out that for spitfire, if it misses in the 337 * E$ it will probably install as an M, but if it hits in the E$, then it 338 * will stay E, if the store doesn't happen. So the first displacement flush 339 * should ensure that the CAS will miss in the E$. Arrgh. 340 */ 341/* ARGSUSED */ 342void 343scrubphys(uint64_t paddr, int ecache_set_size) 344{} 345 346#else /* lint */ 347 ENTRY(scrubphys) 348 rdpr %pstate, %o4 349 andn %o4, PSTATE_IE | PSTATE_AM, %o5 350 wrpr %o5, %g0, %pstate ! clear IE, AM bits 351 352 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 353 casxa [%o0]ASI_MEM, %g0, %g0 354 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 355 356 wrpr %g0, %o4, %pstate ! restore earlier pstate register value 357 358 retl 359 membar #Sync ! move the data out of the load buffer 360 SET_SIZE(scrubphys) 361 362#endif /* lint */ 363 364 365#if defined(lint) 366 /* 367 * clearphys - Pass in the physical memory address of the checkblock 368 * that you want to push out, cleared with a recognizable pattern, 369 * from the ecache. 370 * 371 * To ensure that the ecc gets recalculated after the bad data is cleared, 372 * we must write out enough data to fill the w$ line (64 bytes). So we read 373 * in an entire ecache subblock's worth of data, and write it back out. 374 * Then we overwrite the 16 bytes of bad data with the pattern. 375 */ 376/* ARGSUSED */ 377void 378clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize) 379{ 380} 381 382#else /* lint */ 383 ENTRY(clearphys) 384 /* turn off IE, AM bits */ 385 rdpr %pstate, %o4 386 andn %o4, PSTATE_IE | PSTATE_AM, %o5 387 wrpr %o5, %g0, %pstate 388 389 /* turn off NCEEN */ 390 ldxa [%g0]ASI_ESTATE_ERR, %o5 391 andn %o5, EN_REG_NCEEN, %o3 392 stxa %o3, [%g0]ASI_ESTATE_ERR 393 membar #Sync 394 395 /* align address passed with 64 bytes subblock size */ 396 mov CH_ECACHE_SUBBLK_SIZE, %o2 397 andn %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1 398 399 /* move the good data into the W$ */ 4001: 401 subcc %o2, 8, %o2 402 ldxa [%g1 + %o2]ASI_MEM, %g2 403 bge 1b 404 stxa %g2, [%g1 + %o2]ASI_MEM 405 406 /* now overwrite the bad data */ 407 setx 0xbadecc00badecc01, %g1, %g2 408 stxa %g2, [%o0]ASI_MEM 409 mov 8, %g1 410 stxa %g2, [%o0 + %g1]ASI_MEM 411 412 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 413 casxa [%o0]ASI_MEM, %g0, %g0 414 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 415 416 /* clear the AFSR */ 417 ldxa [%g0]ASI_AFSR, %o1 418 stxa %o1, [%g0]ASI_AFSR 419 membar #Sync 420 421 /* turn NCEEN back on */ 422 stxa %o5, [%g0]ASI_ESTATE_ERR 423 membar #Sync 424 425 /* return and re-enable IE and AM */ 426 retl 427 wrpr %g0, %o4, %pstate 428 SET_SIZE(clearphys) 429 430#endif /* lint */ 431 432 433#if defined(lint) 434/* 435 * Cheetah Ecache displacement flush the specified line from the E$ 436 * 437 * Register usage: 438 * %o0 - 64 bit physical address for flushing 439 * %o1 - Ecache set size 440 */ 441/*ARGSUSED*/ 442void 443ecache_flush_line(uint64_t flushaddr, int ec_set_size) 444{ 445} 446#else /* lint */ 447 ENTRY(ecache_flush_line) 448 449 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 450 451 retl 452 nop 453 SET_SIZE(ecache_flush_line) 454#endif /* lint */ 455 456 457#if defined(lint) 458/* 459 * This routine will not be called in Cheetah systems. 460 */ 461void 462flush_ipb(void) 463{ return; } 464 465#else /* lint */ 466 467 ENTRY(flush_ipb) 468 retl 469 nop 470 SET_SIZE(flush_ipb) 471 472#endif /* lint */ 473