1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Assembly code support for the Cheetah module 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31#if !defined(lint) 32#include "assym.h" 33#endif /* lint */ 34 35#include <sys/asm_linkage.h> 36#include <sys/mmu.h> 37#include <vm/hat_sfmmu.h> 38#include <sys/machparam.h> 39#include <sys/machcpuvar.h> 40#include <sys/machthread.h> 41#include <sys/machtrap.h> 42#include <sys/privregs.h> 43#include <sys/asm_linkage.h> 44#include <sys/trap.h> 45#include <sys/cheetahregs.h> 46#include <sys/us3_module.h> 47#include <sys/xc_impl.h> 48#include <sys/intreg.h> 49#include <sys/async.h> 50#include <sys/clock.h> 51#include <sys/cheetahasm.h> 52 53#ifdef TRAPTRACE 54#include <sys/traptrace.h> 55#endif /* TRAPTRACE */ 56 57#if !defined(lint) 58 59/* BEGIN CSTYLED */ 60 61/* 62 * Cheetah version to flush an Ecache line by index (aliased address) 63 */ 64#define ECACHE_REFLUSH_LINE(ecache_size, alias_address, scr2) \ 65 ldxa [alias_address]ASI_MEM, %g0 66 67#define ECACHE_FLUSH_LINE(physaddr, ecache_size, scr1, scr2) \ 68 xor physaddr, ecache_size, scr1; \ 69 add ecache_size, ecache_size, scr2; \ 70 sub scr2, 1, scr2; \ 71 and scr1, scr2, scr1; \ 72 ASM_LDX(scr2, ecache_flushaddr); \ 73 add scr1, scr2, scr1; \ 74 ECACHE_REFLUSH_LINE(ecache_size, scr1, scr2) 75 76/* END CSTYLED */ 77 78#endif /* !lint */ 79 80 81/* 82 * Fast ECC error at TL>0 handler 83 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter 84 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate. 85 * For a complete description of the Fast ECC at TL>0 handling see the 86 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in 87 * us3_common_asm.s 88 */ 89#if defined(lint) 90 91void 92fast_ecc_tl1_err(void) 93{} 94 95#else /* lint */ 96 97 .section ".text" 98 .align 64 99 ENTRY_NP(fast_ecc_tl1_err) 100 101 /* 102 * This macro turns off the D$/I$ if they are on and saves their 103 * original state in ch_err_tl1_tmp, saves all the %g registers in the 104 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves 105 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will 106 * point to the ch_err_tl1_data structure and the original D$/I$ state 107 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1 108 * will be available. 109 */ 110 CH_ERR_TL1_FECC_ENTER; 111 112 /* 113 * Get the diagnostic logout data. %g4 must be initialized to 114 * current CEEN state, %g5 must point to logout structure in 115 * ch_err_tl1_data_t. %g3 will contain the nesting count upon 116 * return. 117 */ 118 ldxa [%g0]ASI_ESTATE_ERR, %g4 119 and %g4, EN_REG_CEEN, %g4 120 add %g1, CH_ERR_TL1_LOGOUT, %g5 121 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4) 122 123 /* 124 * If the logout nesting count is exceeded, we're probably 125 * not making any progress, try to panic instead. 126 */ 127 cmp %g3, CLO_NESTING_MAX 128 bge fecc_tl1_err 129 nop 130 131 /* 132 * Save the current CEEN and NCEEN state in %g7 and turn them off 133 * before flushing the Ecache. 134 */ 135 ldxa [%g0]ASI_ESTATE_ERR, %g7 136 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5 137 stxa %g5, [%g0]ASI_ESTATE_ERR 138 membar #Sync 139 140 /* 141 * Flush the Ecache, using the largest possible cache size with the 142 * smallest possible line size since we can't get the actual sizes 143 * from the cpu_node due to DTLB misses. 144 */ 145 set CH_ECACHE_8M_SIZE, %g4 146 set CH_ECACHE_MIN_LSIZE, %g5 147 148 /* 149 * Use a different flush address to avoid recursion if the error 150 * exists in ecache_flushaddr. 151 */ 152 ASM_LDX(%g6, ecache_tl1_flushaddr) 153 cmp %g6, -1 ! check if address is valid 154 be %xcc, fecc_tl1_err 155 nop 156 CH_ECACHE_FLUSHALL(%g4, %g5, %g6) 157 158 /* 159 * Restore CEEN and NCEEN to the previous state. 160 */ 161 stxa %g7, [%g0]ASI_ESTATE_ERR 162 membar #Sync 163 164 /* 165 * If we turned off the D$, then flush it and turn it back on. 166 */ 167 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 168 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0 169 bz %xcc, 3f 170 nop 171 172 /* 173 * Flush the D$. 174 */ 175 ASM_LD(%g4, dcache_size) 176 ASM_LD(%g5, dcache_linesize) 177 CH_DCACHE_FLUSHALL(%g4, %g5, %g6) 178 179 /* 180 * Turn the D$ back on. 181 */ 182 ldxa [%g0]ASI_DCU, %g3 183 or %g3, DCU_DC, %g3 184 stxa %g3, [%g0]ASI_DCU 185 membar #Sync 1863: 187 /* 188 * If we turned off the I$, then flush it and turn it back on. 189 */ 190 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3 191 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0 192 bz %xcc, 4f 193 nop 194 195 /* 196 * Flush the I$. 197 */ 198 ASM_LD(%g4, icache_size) 199 ASM_LD(%g5, icache_linesize) 200 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3) 201 202 /* 203 * Turn the I$ back on. Changing DCU_IC requires flush. 204 */ 205 ldxa [%g0]ASI_DCU, %g3 206 or %g3, DCU_IC, %g3 207 stxa %g3, [%g0]ASI_DCU 208 flush %g0 2094: 210 211#ifdef TRAPTRACE 212 /* 213 * Get current trap trace entry physical pointer. 214 */ 215 CPU_INDEX(%g6, %g5) 216 sll %g6, TRAPTR_SIZE_SHIFT, %g6 217 set trap_trace_ctl, %g5 218 add %g6, %g5, %g6 219 ld [%g6 + TRAPTR_LIMIT], %g5 220 tst %g5 221 be %icc, skip_traptrace 222 nop 223 ldx [%g6 + TRAPTR_PBASE], %g5 224 ld [%g6 + TRAPTR_OFFSET], %g4 225 add %g5, %g4, %g5 226 227 /* 228 * Create trap trace entry. 229 */ 230 rd %asi, %g7 231 wr %g0, TRAPTR_ASI, %asi 232 rd STICK, %g4 233 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi 234 rdpr %tl, %g4 235 stha %g4, [%g5 + TRAP_ENT_TL]%asi 236 rdpr %tt, %g4 237 stha %g4, [%g5 + TRAP_ENT_TT]%asi 238 rdpr %tpc, %g4 239 stna %g4, [%g5 + TRAP_ENT_TPC]%asi 240 rdpr %tstate, %g4 241 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi 242 stna %sp, [%g5 + TRAP_ENT_SP]%asi 243 stna %g0, [%g5 + TRAP_ENT_TR]%asi 244 wr %g0, %g7, %asi 245 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3 246 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4 247 wr %g0, TRAPTR_ASI, %asi 248 stna %g3, [%g5 + TRAP_ENT_F1]%asi 249 stna %g4, [%g5 + TRAP_ENT_F2]%asi 250 wr %g0, %g7, %asi 251 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3 252 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 253 wr %g0, TRAPTR_ASI, %asi 254 stna %g3, [%g5 + TRAP_ENT_F3]%asi 255 stna %g4, [%g5 + TRAP_ENT_F4]%asi 256 wr %g0, %g7, %asi 257 258 /* 259 * Advance trap trace pointer. 260 */ 261 ld [%g6 + TRAPTR_OFFSET], %g5 262 ld [%g6 + TRAPTR_LIMIT], %g4 263 st %g5, [%g6 + TRAPTR_LAST_OFFSET] 264 add %g5, TRAP_ENT_SIZE, %g5 265 sub %g4, TRAP_ENT_SIZE, %g4 266 cmp %g5, %g4 267 movge %icc, 0, %g5 268 st %g5, [%g6 + TRAPTR_OFFSET] 269skip_traptrace: 270#endif /* TRAPTRACE */ 271 272 /* 273 * If nesting count is not zero, skip all the AFSR/AFAR 274 * handling and just do the necessary cache-flushing. 275 */ 276 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2 277 brnz %g2, 6f 278 nop 279 280 /* 281 * If a UCU followed by a WDU has occurred go ahead and panic 282 * since a UE will occur (on the retry) before the UCU and WDU 283 * messages are enqueued. 284 */ 285 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g3 286 set 1, %g4 287 sllx %g4, C_AFSR_UCU_SHIFT, %g4 288 btst %g4, %g3 ! UCU in original AFSR? 289 bz %xcc, 6f 290 nop 291 ldxa [%g0]ASI_AFSR, %g4 ! current AFSR 292 or %g3, %g4, %g3 ! %g3 = original + current AFSR 293 set 1, %g4 294 sllx %g4, C_AFSR_WDU_SHIFT, %g4 295 btst %g4, %g3 ! WDU in original or current AFSR? 296 bnz %xcc, fecc_tl1_err 297 nop 298 2996: 300 /* 301 * We fall into this macro if we've successfully logged the error in 302 * the ch_err_tl1_data structure and want the PIL15 softint to pick 303 * it up and log it. %g1 must point to the ch_err_tl1_data structure. 304 * Restores the %g registers and issues retry. 305 */ 306 CH_ERR_TL1_EXIT; 307 308 /* 309 * Establish panic exit label. 310 */ 311 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err); 312 313 SET_SIZE(fast_ecc_tl1_err) 314 315#endif /* lint */ 316 317 318#if defined(lint) 319/* 320 * scrubphys - Pass in the aligned physical memory address 321 * that you want to scrub, along with the ecache set size. 322 * 323 * 1) Displacement flush the E$ line corresponding to %addr. 324 * The first ldxa guarantees that the %addr is no longer in 325 * M, O, or E (goes to I or S (if instruction fetch also happens). 326 * 2) "Write" the data using a CAS %addr,%g0,%g0. 327 * The casxa guarantees a transition from I to M or S to M. 328 * 3) Displacement flush the E$ line corresponding to %addr. 329 * The second ldxa pushes the M line out of the ecache, into the 330 * writeback buffers, on the way to memory. 331 * 4) The "membar #Sync" pushes the cache line out of the writeback 332 * buffers onto the bus, on the way to dram finally. 333 * 334 * This is a modified version of the algorithm suggested by Gary Lauterbach. 335 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line 336 * as modified, but then we found out that for spitfire, if it misses in the 337 * E$ it will probably install as an M, but if it hits in the E$, then it 338 * will stay E, if the store doesn't happen. So the first displacement flush 339 * should ensure that the CAS will miss in the E$. Arrgh. 340 */ 341/* ARGSUSED */ 342void 343scrubphys(uint64_t paddr, int ecache_set_size) 344{} 345 346#else /* lint */ 347 ENTRY(scrubphys) 348 rdpr %pstate, %o4 349 andn %o4, PSTATE_IE | PSTATE_AM, %o5 350 wrpr %o5, %g0, %pstate ! clear IE, AM bits 351 352 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 353 casxa [%o0]ASI_MEM, %g0, %g0 354 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 355 356 wrpr %g0, %o4, %pstate ! restore earlier pstate register value 357 358 retl 359 membar #Sync ! move the data out of the load buffer 360 SET_SIZE(scrubphys) 361 362#endif /* lint */ 363 364 365#if defined(lint) 366/* 367 * clearphys - Pass in the aligned physical memory address 368 * that you want to push out, as a ecache_linesize byte block of zeros, 369 * from the ecache zero-filled. 370 */ 371/* ARGSUSED */ 372void 373clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize) 374{ 375} 376 377#else /* lint */ 378 ENTRY(clearphys) 379 /* turn off IE, AM bits */ 380 rdpr %pstate, %o4 381 andn %o4, PSTATE_IE | PSTATE_AM, %o5 382 wrpr %o5, %g0, %pstate 383 384 /* turn off NCEEN */ 385 ldxa [%g0]ASI_ESTATE_ERR, %o5 386 andn %o5, EN_REG_NCEEN, %o3 387 stxa %o3, [%g0]ASI_ESTATE_ERR 388 membar #Sync 389 390 /* zero the E$ line */ 3911: 392 subcc %o2, 8, %o2 393 bge 1b 394 stxa %g0, [%o0 + %o2]ASI_MEM 395 396 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 397 casxa [%o0]ASI_MEM, %g0, %g0 398 ECACHE_REFLUSH_LINE(%o1, %o2, %o3) 399 400 /* clear the AFSR */ 401 ldxa [%g0]ASI_AFSR, %o1 402 stxa %o1, [%g0]ASI_AFSR 403 membar #Sync 404 405 /* turn NCEEN back on */ 406 stxa %o5, [%g0]ASI_ESTATE_ERR 407 membar #Sync 408 409 /* return and re-enable IE and AM */ 410 retl 411 wrpr %g0, %o4, %pstate 412 SET_SIZE(clearphys) 413 414#endif /* lint */ 415 416 417#if defined(lint) 418/* 419 * Cheetah Ecache displacement flush the specified line from the E$ 420 * 421 * Register usage: 422 * %o0 - 64 bit physical address for flushing 423 * %o1 - Ecache set size 424 */ 425/*ARGSUSED*/ 426void 427ecache_flush_line(uint64_t flushaddr, int ec_set_size) 428{ 429} 430#else /* lint */ 431 ENTRY(ecache_flush_line) 432 433 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3) 434 435 retl 436 nop 437 SET_SIZE(ecache_flush_line) 438#endif /* lint */ 439 440 441#if defined(lint) 442/* 443 * This routine will not be called in Cheetah systems. 444 */ 445void 446flush_ipb(void) 447{ return; } 448 449#else /* lint */ 450 451 ENTRY(flush_ipb) 452 retl 453 nop 454 SET_SIZE(flush_ipb) 455 456#endif /* lint */ 457