1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26# ident "%Z%%M% %I% %E% SMI" 27 28#if !defined(lint) 29#include "assym.h" 30#endif 31 32/* 33 * General assembly language routines. 34 * It is the intent of this file to contain routines that are 35 * specific to cpu architecture. 36 */ 37 38/* 39 * WARNING: If you add a fast trap handler which can be invoked by a 40 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 41 * instead of "done" instruction to return back to the user mode. See 42 * comments for the "fast_trap_done" entry point for more information. 43 */ 44#define FAST_TRAP_DONE \ 45 ba,a fast_trap_done 46 47/* 48 * Override GET_NATIVE_TIME for the cpu module code. This is not 49 * guaranteed to be exactly one instruction, be careful of using 50 * the macro in delay slots. 51 * 52 * Do not use any instruction that modifies condition codes as the 53 * caller may depend on these to remain unchanged across the macro. 54 */ 55 56#define GET_NATIVE_TIME(out, scr1, scr2) \ 57 rd STICK, out 58 59#define RD_TICKCMPR(out, scr) \ 60 rd STICK_COMPARE, out 61 62#define WR_TICKCMPR(in,scr1,scr2,label) \ 63 wr in, STICK_COMPARE 64 65 66#include <sys/clock.h> 67 68#if defined(lint) 69#include <sys/types.h> 70#include <sys/scb.h> 71#include <sys/systm.h> 72#include <sys/regset.h> 73#include <sys/sunddi.h> 74#include <sys/lockstat.h> 75#endif /* lint */ 76 77 78#include <sys/asm_linkage.h> 79#include <sys/privregs.h> 80#include <vm/hat_sfmmu.h> 81#include <sys/machparam.h> /* To get SYSBASE and PAGESIZE */ 82#include <sys/machthread.h> 83#include <sys/clock.h> 84#include <sys/intreg.h> 85#include <sys/psr_compat.h> 86#include <sys/isa_defs.h> 87#include <sys/dditypes.h> 88#include <sys/intr.h> 89#include <sys/hypervisor_api.h> 90 91#if !defined(lint) 92#include "assym.h" 93#endif 94 95#define ICACHE_FLUSHSZ 0x20 96 97#if defined(lint) 98/* 99 * Softint generated when counter field of tick reg matches value field 100 * of tick_cmpr reg 101 */ 102/*ARGSUSED*/ 103void 104tickcmpr_set(uint64_t clock_cycles) 105{} 106 107#else /* lint */ 108 109 ENTRY_NP(tickcmpr_set) 110 ! get 64-bit clock_cycles interval 111 mov %o0, %o2 112 mov 8, %o3 ! A reasonable initial step size 1131: 114 WR_TICKCMPR(%o2,%o4,%o5,__LINE__) ! Write to TICK_CMPR 115 116 GET_NATIVE_TIME(%o0, %o4, %o5) ! Read %tick to confirm the 117 sllx %o0, 1, %o0 ! value we wrote was in the future. 118 srlx %o0, 1, %o0 119 120 cmp %o2, %o0 ! If the value we wrote was in the 121 bg,pt %xcc, 2f ! future, then blow out of here. 122 sllx %o3, 1, %o3 ! If not, then double our step size, 123 ba,pt %xcc, 1b ! and take another lap. 124 add %o0, %o3, %o2 ! 1252: 126 retl 127 nop 128 SET_SIZE(tickcmpr_set) 129 130#endif /* lint */ 131 132#if defined(lint) 133 134void 135tickcmpr_disable(void) 136{} 137 138#else 139 140 ENTRY_NP(tickcmpr_disable) 141 mov 1, %g1 142 sllx %g1, TICKINT_DIS_SHFT, %o0 143 WR_TICKCMPR(%o0,%o4,%o5,__LINE__) ! Write to TICK_CMPR 144 retl 145 nop 146 SET_SIZE(tickcmpr_disable) 147 148#endif 149 150#if defined(lint) 151 152/* 153 * tick_write_delta() increments %tick by the specified delta. This should 154 * only be called after a CPR event to assure that gethrtime() continues to 155 * increase monotonically. Obviously, writing %tick needs to de done very 156 * carefully to avoid introducing unnecessary %tick skew across CPUs. For 157 * this reason, we make sure we're i-cache hot before actually writing to 158 * %tick. 159 * 160 * NOTE: No provision for this on sun4v right now. 161 */ 162 163/*ARGSUSED*/ 164void 165tick_write_delta(uint64_t delta) 166{} 167 168#else /* lint */ 169 170 .seg ".text" 171tick_write_delta_panic: 172 .asciz "tick_write_delta: not supported" 173 174 ENTRY_NP(tick_write_delta) 175 sethi %hi(tick_write_delta_panic), %o1 176 save %sp, -SA(MINFRAME), %sp ! get a new window to preserve caller 177 call panic 178 or %i1, %lo(tick_write_delta_panic), %o0 179 /*NOTREACHED*/ 180 retl 181 nop 182#endif 183 184#if defined(lint) 185/* 186 * return 1 if disabled 187 */ 188 189int 190tickcmpr_disabled(void) 191{ return (0); } 192 193#else /* lint */ 194 195 ENTRY_NP(tickcmpr_disabled) 196 RD_TICKCMPR(%g1, %o0) 197 retl 198 srlx %g1, TICKINT_DIS_SHFT, %o0 199 SET_SIZE(tickcmpr_disabled) 200 201#endif /* lint */ 202 203/* 204 * Get current tick 205 */ 206#if defined(lint) 207 208u_longlong_t 209gettick(void) 210{ return (0); } 211 212#else /* lint */ 213 214 ENTRY(gettick) 215 GET_NATIVE_TIME(%o0, %o2, %o3) 216 retl 217 nop 218 SET_SIZE(gettick) 219 220#endif /* lint */ 221 222 223/* 224 * Return the counter portion of the tick register. 225 */ 226 227#if defined(lint) 228 229uint64_t 230gettick_counter(void) 231{ return(0); } 232 233#else /* lint */ 234 235 ENTRY_NP(gettick_counter) 236 rdpr %tick, %o0 237 sllx %o0, 1, %o0 238 retl 239 srlx %o0, 1, %o0 ! shake off npt bit 240 SET_SIZE(gettick_counter) 241#endif /* lint */ 242 243/* 244 * Provide a C callable interface to the trap that reads the hi-res timer. 245 * Returns 64-bit nanosecond timestamp in %o0 and %o1. 246 */ 247 248#if defined(lint) 249 250hrtime_t 251gethrtime(void) 252{ 253 return ((hrtime_t)0); 254} 255 256hrtime_t 257gethrtime_unscaled(void) 258{ 259 return ((hrtime_t)0); 260} 261 262hrtime_t 263gethrtime_max(void) 264{ 265 return ((hrtime_t)0); 266} 267 268void 269scalehrtime(hrtime_t *hrt) 270{ 271 *hrt = 0; 272} 273 274void 275gethrestime(timespec_t *tp) 276{ 277 tp->tv_sec = 0; 278 tp->tv_nsec = 0; 279} 280 281time_t 282gethrestime_sec(void) 283{ 284 return (0); 285} 286 287void 288gethrestime_lasttick(timespec_t *tp) 289{ 290 tp->tv_sec = 0; 291 tp->tv_nsec = 0; 292} 293 294/*ARGSUSED*/ 295void 296hres_tick(void) 297{ 298} 299 300void 301panic_hres_tick(void) 302{ 303} 304 305#else /* lint */ 306 307 ENTRY_NP(gethrtime) 308 GET_HRTIME(%g1, %o0, %o1, %o2, %o3, %o4, %o5, %g2) 309 ! %g1 = hrtime 310 retl 311 mov %g1, %o0 312 SET_SIZE(gethrtime) 313 314 ENTRY_NP(gethrtime_unscaled) 315 GET_NATIVE_TIME(%g1, %o2, %o3) ! %g1 = native time 316 retl 317 mov %g1, %o0 318 SET_SIZE(gethrtime_unscaled) 319 320 ENTRY_NP(gethrtime_waitfree) 321 ALTENTRY(dtrace_gethrtime) 322 GET_NATIVE_TIME(%g1, %o2, %o3) ! %g1 = native time 323 NATIVE_TIME_TO_NSEC(%g1, %o2, %o3) 324 retl 325 mov %g1, %o0 326 SET_SIZE(dtrace_gethrtime) 327 SET_SIZE(gethrtime_waitfree) 328 329 ENTRY(gethrtime_max) 330 NATIVE_TIME_MAX(%g1) 331 NATIVE_TIME_TO_NSEC(%g1, %o0, %o1) 332 333 ! hrtime_t's are signed, max hrtime_t must be positive 334 mov -1, %o2 335 brlz,a %g1, 1f 336 srlx %o2, 1, %g1 3371: 338 retl 339 mov %g1, %o0 340 SET_SIZE(gethrtime_max) 341 342 ENTRY(scalehrtime) 343 ldx [%o0], %o1 344 NATIVE_TIME_TO_NSEC(%o1, %o2, %o3) 345 retl 346 stx %o1, [%o0] 347 SET_SIZE(scalehrtime) 348 349/* 350 * Fast trap to return a timestamp, uses trap window, leaves traps 351 * disabled. Returns a 64-bit nanosecond timestamp in %o0 and %o1. 352 * 353 * This is the handler for the ST_GETHRTIME trap. 354 */ 355 356 ENTRY_NP(get_timestamp) 357 GET_HRTIME(%g1, %g2, %g3, %g4, %g5, %o0, %o1, %o2) ! %g1 = hrtime 358 srlx %g1, 32, %o0 ! %o0 = hi32(%g1) 359 srl %g1, 0, %o1 ! %o1 = lo32(%g1) 360 FAST_TRAP_DONE 361 SET_SIZE(get_timestamp) 362 363/* 364 * Macro to convert GET_HRESTIME() bits into a timestamp. 365 * 366 * We use two separate macros so that the platform-dependent GET_HRESTIME() 367 * can be as small as possible; CONV_HRESTIME() implements the generic part. 368 */ 369#define CONV_HRESTIME(hrestsec, hrestnsec, adj, nslt, nano) \ 370 brz,pt adj, 3f; /* no adjustments, it's easy */ \ 371 add hrestnsec, nslt, hrestnsec; /* hrest.tv_nsec += nslt */ \ 372 brlz,pn adj, 2f; /* if hrestime_adj negative */ \ 373 srlx nslt, ADJ_SHIFT, nslt; /* delay: nslt >>= 4 */ \ 374 subcc adj, nslt, %g0; /* hrestime_adj - nslt/16 */ \ 375 movg %xcc, nslt, adj; /* adj by min(adj, nslt/16) */ \ 376 ba 3f; /* go convert to sec/nsec */ \ 377 add hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \ 3782: addcc adj, nslt, %g0; /* hrestime_adj + nslt/16 */ \ 379 bge,a,pt %xcc, 3f; /* is adj less negative? */ \ 380 add hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */ \ 381 sub hrestnsec, nslt, hrestnsec; /* no: hrest.nsec -= nslt/16 */ \ 3823: cmp hrestnsec, nano; /* more than a billion? */ \ 383 bl,pt %xcc, 4f; /* if not, we're done */ \ 384 nop; /* delay: do nothing :( */ \ 385 add hrestsec, 1, hrestsec; /* hrest.tv_sec++; */ \ 386 sub hrestnsec, nano, hrestnsec; /* hrest.tv_nsec -= NANOSEC; */ \ 387 ba,a 3b; /* check >= billion again */ \ 3884: 389 390 ENTRY_NP(gethrestime) 391 GET_HRESTIME(%o1, %o2, %o3, %o4, %o5, %g1, %g2, %g3, %g4) 392 CONV_HRESTIME(%o1, %o2, %o3, %o4, %o5) 393 stn %o1, [%o0] 394 retl 395 stn %o2, [%o0 + CLONGSIZE] 396 SET_SIZE(gethrestime) 397 398/* 399 * Similar to gethrestime(), but gethrestime_sec() returns current hrestime 400 * seconds. 401 */ 402 ENTRY_NP(gethrestime_sec) 403 GET_HRESTIME(%o0, %o2, %o3, %o4, %o5, %g1, %g2, %g3, %g4) 404 CONV_HRESTIME(%o0, %o2, %o3, %o4, %o5) 405 retl ! %o0 current hrestime seconds 406 nop 407 SET_SIZE(gethrestime_sec) 408 409/* 410 * Returns the hrestime on the last tick. This is simpler than gethrestime() 411 * and gethrestime_sec(): no conversion is required. gethrestime_lasttick() 412 * follows the same locking algorithm as GET_HRESTIME and GET_HRTIME, 413 * outlined in detail in clock.h. (Unlike GET_HRESTIME/GET_HRTIME, we don't 414 * rely on load dependencies to effect the membar #LoadLoad, instead declaring 415 * it explicitly.) 416 */ 417 ENTRY_NP(gethrestime_lasttick) 418 sethi %hi(hres_lock), %o1 4190: 420 lduw [%o1 + %lo(hres_lock)], %o2 ! Load lock value 421 membar #LoadLoad ! Load of lock must complete 422 andn %o2, 1, %o2 ! Mask off lowest bit 423 ldn [%o1 + %lo(hrestime)], %g1 ! Seconds. 424 add %o1, %lo(hrestime), %o4 425 ldn [%o4 + CLONGSIZE], %g2 ! Nanoseconds. 426 membar #LoadLoad ! All loads must complete 427 lduw [%o1 + %lo(hres_lock)], %o3 ! Reload lock value 428 cmp %o3, %o2 ! If lock is locked or has 429 bne 0b ! changed, retry. 430 stn %g1, [%o0] ! Delay: store seconds 431 retl 432 stn %g2, [%o0 + CLONGSIZE] ! Delay: store nanoseconds 433 SET_SIZE(gethrestime_lasttick) 434 435/* 436 * Fast trap for gettimeofday(). Returns a timestruc_t in %o0 and %o1. 437 * 438 * This is the handler for the ST_GETHRESTIME trap. 439 */ 440 441 ENTRY_NP(get_hrestime) 442 GET_HRESTIME(%o0, %o1, %g1, %g2, %g3, %g4, %g5, %o2, %o3) 443 CONV_HRESTIME(%o0, %o1, %g1, %g2, %g3) 444 FAST_TRAP_DONE 445 SET_SIZE(get_hrestime) 446 447/* 448 * Fast trap to return lwp virtual time, uses trap window, leaves traps 449 * disabled. Returns a 64-bit number in %o0:%o1, which is the number 450 * of nanoseconds consumed. 451 * 452 * This is the handler for the ST_GETHRVTIME trap. 453 * 454 * Register usage: 455 * %o0, %o1 = return lwp virtual time 456 * %o2 = CPU/thread 457 * %o3 = lwp 458 * %g1 = scratch 459 * %g5 = scratch 460 */ 461 ENTRY_NP(get_virtime) 462 GET_NATIVE_TIME(%g5, %g1, %g2) ! %g5 = native time in ticks 463 CPU_ADDR(%g2, %g3) ! CPU struct ptr to %g2 464 ldn [%g2 + CPU_THREAD], %g2 ! thread pointer to %g2 465 ldn [%g2 + T_LWP], %g3 ! lwp pointer to %g3 466 467 /* 468 * Subtract start time of current microstate from time 469 * of day to get increment for lwp virtual time. 470 */ 471 ldx [%g3 + LWP_STATE_START], %g1 ! ms_state_start 472 sub %g5, %g1, %g5 473 474 /* 475 * Add current value of ms_acct[LMS_USER] 476 */ 477 ldx [%g3 + LWP_ACCT_USER], %g1 ! ms_acct[LMS_USER] 478 add %g5, %g1, %g5 479 NATIVE_TIME_TO_NSEC(%g5, %g1, %o0) 480 481 srl %g5, 0, %o1 ! %o1 = lo32(%g5) 482 srlx %g5, 32, %o0 ! %o0 = hi32(%g5) 483 484 FAST_TRAP_DONE 485 SET_SIZE(get_virtime) 486 487 488 489 .seg ".text" 490hrtime_base_panic: 491 .asciz "hrtime_base stepping back" 492 493 494 ENTRY_NP(hres_tick) 495 save %sp, -SA(MINFRAME), %sp ! get a new window 496 497 sethi %hi(hrestime), %l4 498 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 ! try locking 4997: tst %l5 500 bz,pt %xcc, 8f ! if we got it, drive on 501 ld [%l4 + %lo(nsec_scale)], %l5 ! delay: %l5 = scaling factor 502 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 5039: tst %l5 504 bz,a,pn %xcc, 7b 505 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 506 ba,pt %xcc, 9b 507 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 5088: 509 membar #StoreLoad|#StoreStore 510 511 ! 512 ! update hres_last_tick. %l5 has the scaling factor (nsec_scale). 513 ! 514 ldx [%l4 + %lo(hrtime_base)], %g1 ! load current hrtime_base 515 GET_NATIVE_TIME(%l0, %l3, %l6) ! current native time 516 stx %l0, [%l4 + %lo(hres_last_tick)]! prev = current 517 ! convert native time to nsecs 518 NATIVE_TIME_TO_NSEC_SCALE(%l0, %l5, %l2, NSEC_SHIFT) 519 520 sub %l0, %g1, %i1 ! get accurate nsec delta 521 522 ldx [%l4 + %lo(hrtime_base)], %l1 523 cmp %l1, %l0 524 bg,pn %xcc, 9f 525 nop 526 527 stx %l0, [%l4 + %lo(hrtime_base)] ! update hrtime_base 528 529 ! 530 ! apply adjustment, if any 531 ! 532 ldx [%l4 + %lo(hrestime_adj)], %l0 ! %l0 = hrestime_adj 533 brz %l0, 2f 534 ! hrestime_adj == 0 ? 535 ! yes, skip adjustments 536 clr %l5 ! delay: set adj to zero 537 tst %l0 ! is hrestime_adj >= 0 ? 538 bge,pt %xcc, 1f ! yes, go handle positive case 539 srl %i1, ADJ_SHIFT, %l5 ! delay: %l5 = adj 540 541 addcc %l0, %l5, %g0 ! hrestime_adj < -adj ? 542 bl,pt %xcc, 2f ! yes, use current adj 543 neg %l5 ! delay: %l5 = -adj 544 ba,pt %xcc, 2f 545 mov %l0, %l5 ! no, so set adj = hrestime_adj 5461: 547 subcc %l0, %l5, %g0 ! hrestime_adj < adj ? 548 bl,a,pt %xcc, 2f ! yes, set adj = hrestime_adj 549 mov %l0, %l5 ! delay: adj = hrestime_adj 5502: 551 ldx [%l4 + %lo(timedelta)], %l0 ! %l0 = timedelta 552 sub %l0, %l5, %l0 ! timedelta -= adj 553 554 stx %l0, [%l4 + %lo(timedelta)] ! store new timedelta 555 stx %l0, [%l4 + %lo(hrestime_adj)] ! hrestime_adj = timedelta 556 557 or %l4, %lo(hrestime), %l2 558 ldn [%l2], %i2 ! %i2:%i3 = hrestime sec:nsec 559 ldn [%l2 + CLONGSIZE], %i3 560 add %i3, %l5, %i3 ! hrestime.nsec += adj 561 add %i3, %i1, %i3 ! hrestime.nsec += nslt 562 563 set NANOSEC, %l5 ! %l5 = NANOSEC 564 cmp %i3, %l5 565 bl,pt %xcc, 5f ! if hrestime.tv_nsec < NANOSEC 566 sethi %hi(one_sec), %i1 ! delay 567 add %i2, 0x1, %i2 ! hrestime.tv_sec++ 568 sub %i3, %l5, %i3 ! hrestime.tv_nsec - NANOSEC 569 mov 0x1, %l5 570 st %l5, [%i1 + %lo(one_sec)] 5715: 572 stn %i2, [%l2] 573 stn %i3, [%l2 + CLONGSIZE] ! store the new hrestime 574 575 membar #StoreStore 576 577 ld [%l4 + %lo(hres_lock)], %i1 578 inc %i1 ! release lock 579 st %i1, [%l4 + %lo(hres_lock)] ! clear hres_lock 580 581 ret 582 restore 583 5849: 585 ! 586 ! release hres_lock 587 ! 588 ld [%l4 + %lo(hres_lock)], %i1 589 inc %i1 590 st %i1, [%l4 + %lo(hres_lock)] 591 592 sethi %hi(hrtime_base_panic), %o0 593 call panic 594 or %o0, %lo(hrtime_base_panic), %o0 595 596 SET_SIZE(hres_tick) 597 598#endif /* lint */ 599 600#if !defined(lint) && !defined(__lint) 601 602 .seg ".text" 603kstat_q_panic_msg: 604 .asciz "kstat_q_exit: qlen == 0" 605 606 ENTRY(kstat_q_panic) 607 save %sp, -SA(MINFRAME), %sp 608 sethi %hi(kstat_q_panic_msg), %o0 609 call panic 610 or %o0, %lo(kstat_q_panic_msg), %o0 611 /*NOTREACHED*/ 612 SET_SIZE(kstat_q_panic) 613 614#define BRZPN brz,pn 615#define BRZPT brz,pt 616 617#define KSTAT_Q_UPDATE(QOP, QBR, QZERO, QRETURN, QTYPE) \ 618 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \ 619 QOP %o1, 1, %o2; /* %o2 = new qlen */ \ 620 QBR %o1, QZERO; /* done if qlen == 0 */ \ 621 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \ 622 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \ 623 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \ 624 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \ 625 sub %g1, %o3, %o2; /* %o2 = time delta */ \ 626 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \ 627 add %o4, %o2, %o4; /* %o4 = new time */ \ 628 add %o5, %o3, %o5; /* %o5 = new lentime */ \ 629 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \ 630 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \ 631QRETURN; \ 632 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */ 633 634 .align 16 635 ENTRY(kstat_waitq_enter) 636 GET_NATIVE_TIME(%g1, %g2, %g3) 637 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W) 638 SET_SIZE(kstat_waitq_enter) 639 640 .align 16 641 ENTRY(kstat_waitq_exit) 642 GET_NATIVE_TIME(%g1, %g2, %g3) 643 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_W) 644 SET_SIZE(kstat_waitq_exit) 645 646 .align 16 647 ENTRY(kstat_runq_enter) 648 GET_NATIVE_TIME(%g1, %g2, %g3) 649 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R) 650 SET_SIZE(kstat_runq_enter) 651 652 .align 16 653 ENTRY(kstat_runq_exit) 654 GET_NATIVE_TIME(%g1, %g2, %g3) 655 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_R) 656 SET_SIZE(kstat_runq_exit) 657 658 .align 16 659 ENTRY(kstat_waitq_to_runq) 660 GET_NATIVE_TIME(%g1, %g2, %g3) 661 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_W) 662 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R) 663 SET_SIZE(kstat_waitq_to_runq) 664 665 .align 16 666 ENTRY(kstat_runq_back_to_waitq) 667 GET_NATIVE_TIME(%g1, %g2, %g3) 668 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_R) 669 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W) 670 SET_SIZE(kstat_runq_back_to_waitq) 671 672#endif /* lint */ 673 674#ifdef lint 675 676int64_t timedelta; 677hrtime_t hres_last_tick; 678volatile timestruc_t hrestime; 679int64_t hrestime_adj; 680volatile int hres_lock; 681uint_t nsec_scale; 682hrtime_t hrtime_base; 683int traptrace_use_stick; 684 685#else 686 /* 687 * -- WARNING -- 688 * 689 * The following variables MUST be together on a 128-byte boundary. 690 * In addition to the primary performance motivation (having them all 691 * on the same cache line(s)), code here and in the GET*TIME() macros 692 * assumes that they all have the same high 22 address bits (so 693 * there's only one sethi). 694 */ 695 .seg ".data" 696 .global timedelta, hres_last_tick, hrestime, hrestime_adj 697 .global hres_lock, nsec_scale, hrtime_base, traptrace_use_stick 698 .global nsec_shift, adj_shift 699 700 /* XXX - above comment claims 128-bytes is necessary */ 701 .align 64 702timedelta: 703 .word 0, 0 /* int64_t */ 704hres_last_tick: 705 .word 0, 0 /* hrtime_t */ 706hrestime: 707 .nword 0, 0 /* 2 longs */ 708hrestime_adj: 709 .word 0, 0 /* int64_t */ 710hres_lock: 711 .word 0 712nsec_scale: 713 .word 0 714hrtime_base: 715 .word 0, 0 716traptrace_use_stick: 717 .word 0 718nsec_shift: 719 .word NSEC_SHIFT 720adj_shift: 721 .word ADJ_SHIFT 722 723#endif 724 725 726/* 727 * drv_usecwait(clock_t n) [DDI/DKI - section 9F] 728 * usec_delay(int n) [compatibility - should go one day] 729 * Delay by spinning. 730 * 731 * delay for n microseconds. numbers <= 0 delay 1 usec 732 * 733 * With UltraSPARC-III the combination of supporting mixed-speed CPUs 734 * and variable clock rate for power management requires that we 735 * use %stick to implement this routine. 736 */ 737 738#if defined(lint) 739 740/*ARGSUSED*/ 741void 742drv_usecwait(clock_t n) 743{} 744 745/*ARGSUSED*/ 746void 747usec_delay(int n) 748{} 749 750#else /* lint */ 751 752 ENTRY(drv_usecwait) 753 ALTENTRY(usec_delay) 754 brlez,a,pn %o0, 0f 755 mov 1, %o0 7560: 757 sethi %hi(sticks_per_usec), %o1 758 lduw [%o1 + %lo(sticks_per_usec)], %o1 759 mulx %o1, %o0, %o1 ! Scale usec to ticks 760 inc %o1 ! We don't start on a tick edge 761 GET_NATIVE_TIME(%o2, %o3, %o4) 762 add %o1, %o2, %o1 763 7641: cmp %o1, %o2 765 GET_NATIVE_TIME(%o2, %o3, %o4) 766 bgeu,pt %xcc, 1b 767 nop 768 retl 769 nop 770 SET_SIZE(usec_delay) 771 SET_SIZE(drv_usecwait) 772#endif /* lint */ 773 774#if defined(lint) 775 776/* ARGSUSED */ 777void 778pil14_interrupt(int level) 779{} 780 781#else 782 783/* 784 * Level-14 interrupt prologue. 785 */ 786 ENTRY_NP(pil14_interrupt) 787 CPU_ADDR(%g1, %g2) 788 rdpr %pil, %g6 ! %g6 = interrupted PIL 789 stn %g6, [%g1 + CPU_PROFILE_PIL] ! record interrupted PIL 790 rdpr %tstate, %g6 791 rdpr %tpc, %g5 792 btst TSTATE_PRIV, %g6 ! trap from supervisor mode? 793 bnz,a,pt %xcc, 1f 794 stn %g5, [%g1 + CPU_PROFILE_PC] ! if so, record kernel PC 795 stn %g5, [%g1 + CPU_PROFILE_UPC] ! if not, record user PC 796 ba pil_interrupt_common ! must be large-disp branch 797 stn %g0, [%g1 + CPU_PROFILE_PC] ! zero kernel PC 7981: ba pil_interrupt_common ! must be large-disp branch 799 stn %g0, [%g1 + CPU_PROFILE_UPC] ! zero user PC 800 SET_SIZE(pil14_interrupt) 801 802 ENTRY_NP(tick_rtt) 803 ! 804 ! Load TICK_COMPARE into %o5; if bit 63 is set, then TICK_COMPARE is 805 ! disabled. If TICK_COMPARE is enabled, we know that we need to 806 ! reenqueue the interrupt request structure. We'll then check TICKINT 807 ! in SOFTINT; if it's set, then we know that we were in a TICK_COMPARE 808 ! interrupt. In this case, TICK_COMPARE may have been rewritten 809 ! recently; we'll compare %o5 to the current time to verify that it's 810 ! in the future. 811 ! 812 ! Note that %o5 is live until after 1f. 813 ! XXX - there is a subroutine call while %o5 is live! 814 ! 815 RD_TICKCMPR(%o5, %g1) 816 srlx %o5, TICKINT_DIS_SHFT, %g1 817 brnz,pt %g1, 2f 818 nop 819 820 rdpr %pstate, %g5 821 andn %g5, PSTATE_IE, %g1 822 wrpr %g0, %g1, %pstate ! Disable vec interrupts 823 824 sethi %hi(cbe_level14_inum), %o1 825 ldx [%o1 + %lo(cbe_level14_inum)], %o1 826 call intr_enqueue_req ! preserves %o5 and %g5 827 mov PIL_14, %o0 828 829 ! Check SOFTINT for TICKINT/STICKINT 830 rd SOFTINT, %o4 831 set (TICK_INT_MASK | STICK_INT_MASK), %o0 832 andcc %o4, %o0, %g0 833 bz,a,pn %icc, 2f 834 wrpr %g0, %g5, %pstate ! Enable vec interrupts 835 836 ! clear TICKINT/STICKINT 837 wr %o0, CLEAR_SOFTINT 838 839 ! 840 ! Now that we've cleared TICKINT, we can reread %tick and confirm 841 ! that the value we programmed is still in the future. If it isn't, 842 ! we need to reprogram TICK_COMPARE to fire as soon as possible. 843 ! 844 GET_NATIVE_TIME(%o0, %g1, %g2) ! %o0 = tick 845 sllx %o0, 1, %o0 ! Clear the DIS bit 846 srlx %o0, 1, %o0 847 cmp %o5, %o0 ! In the future? 848 bg,a,pt %xcc, 2f ! Yes, drive on. 849 wrpr %g0, %g5, %pstate ! delay: enable vec intr 850 851 ! 852 ! If we're here, then we have programmed TICK_COMPARE with a %tick 853 ! which is in the past; we'll now load an initial step size, and loop 854 ! until we've managed to program TICK_COMPARE to fire in the future. 855 ! 856 mov 8, %o4 ! 8 = arbitrary inital step 8571: add %o0, %o4, %o5 ! Add the step 858 WR_TICKCMPR(%o5,%g1,%g2,__LINE__) ! Write to TICK_CMPR 859 GET_NATIVE_TIME(%o0, %g1, %g2) ! %o0 = tick 860 sllx %o0, 1, %o0 ! Clear the DIS bit 861 srlx %o0, 1, %o0 862 cmp %o5, %o0 ! In the future? 863 bg,a,pt %xcc, 2f ! Yes, drive on. 864 wrpr %g0, %g5, %pstate ! delay: enable vec intr 865 ba 1b ! No, try again. 866 sllx %o4, 1, %o4 ! delay: double step size 867 8682: ba current_thread_complete 869 nop 870 SET_SIZE(tick_rtt) 871 872#endif /* lint */ 873 874#if defined(lint) 875/* 876 * Prefetch a page_t for write or read, this assumes a linear 877 * scan of sequential page_t's. 878 */ 879/*ARGSUSED*/ 880void 881prefetch_page_w(void *pp) 882{} 883 884/*ARGSUSED*/ 885void 886prefetch_page_r(void *pp) 887{} 888#else /* lint */ 889 890/* XXXQ These should be inline templates, not functions */ 891 ENTRY(prefetch_page_w) 892 retl 893 nop 894 SET_SIZE(prefetch_page_w) 895 896 ENTRY(prefetch_page_r) 897 retl 898 nop 899 SET_SIZE(prefetch_page_r) 900 901#endif /* lint */ 902 903#if defined(lint) 904/* 905 * Prefetch struct smap for write. 906 */ 907/*ARGSUSED*/ 908void 909prefetch_smap_w(void *smp) 910{} 911#else /* lint */ 912 913/* XXXQ These should be inline templates, not functions */ 914 ENTRY(prefetch_smap_w) 915 retl 916 nop 917 SET_SIZE(prefetch_smap_w) 918 919#endif /* lint */ 920 921/* 922 * Generic sun4v MMU and Cache operations. 923 */ 924 925#if defined(lint) 926 927/*ARGSUSED*/ 928void 929vtag_flushpage(caddr_t vaddr, uint64_t sfmmup) 930{} 931 932/*ARGSUSED*/ 933void 934vtag_flushall(void) 935{} 936 937/*ARGSUSED*/ 938void 939vtag_unmap_perm_tl1(uint64_t vaddr, uint64_t ctxnum) 940{} 941 942/*ARGSUSED*/ 943void 944vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup) 945{} 946 947/*ARGSUSED*/ 948void 949vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt) 950{} 951 952/*ARGSUSED*/ 953void 954vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2) 955{} 956 957/*ARGSUSED*/ 958void 959vac_flushpage(pfn_t pfnum, int vcolor) 960{} 961 962/*ARGSUSED*/ 963void 964vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor) 965{} 966 967/*ARGSUSED*/ 968void 969flush_instr_mem(caddr_t vaddr, size_t len) 970{} 971 972#else /* lint */ 973 974 ENTRY_NP(vtag_flushpage) 975 /* 976 * flush page from the tlb 977 * 978 * %o0 = vaddr 979 * %o1 = sfmmup 980 */ 981 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */ 982 983 mov %g1, %o1 984 mov MAP_ITLB | MAP_DTLB, %o2 985 ta MMU_UNMAP_ADDR 986 brz,pt %o0, 1f 987 nop 988 ba panic_bad_hcall 989 mov MMU_UNMAP_ADDR, %o1 9901: 991 retl 992 nop 993 SET_SIZE(vtag_flushpage) 994 995 ENTRY_NP(vtag_flushall) 996 mov %g0, %o0 ! XXX no cpu list yet 997 mov %g0, %o1 ! XXX no cpu list yet 998 mov MAP_ITLB | MAP_DTLB, %o2 999 mov MMU_DEMAP_ALL, %o5 1000 ta FAST_TRAP 1001 brz,pt %o0, 1f 1002 nop 1003 ba panic_bad_hcall 1004 mov MMU_DEMAP_ALL, %o1 10051: 1006 retl 1007 nop 1008 SET_SIZE(vtag_flushall) 1009 1010 ENTRY_NP(vtag_unmap_perm_tl1) 1011 /* 1012 * x-trap to unmap perm map entry 1013 * %g1 = vaddr 1014 * %g2 = ctxnum (KCONTEXT only) 1015 */ 1016 mov %o0, %g3 1017 mov %o1, %g4 1018 mov %o2, %g5 1019 mov %o5, %g6 1020 mov %g1, %o0 1021 mov %g2, %o1 1022 mov MAP_ITLB | MAP_DTLB, %o2 1023 mov UNMAP_PERM_ADDR, %o5 1024 ta FAST_TRAP 1025 brz,pt %o0, 1f 1026 nop 1027 1028 mov PTL1_BAD_HCALL, %g1 1029 1030 cmp %o0, H_ENOMAP 1031 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP, %g1 1032 1033 cmp %o0, H_EINVAL 1034 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_EINVAL, %g1 1035 1036 ba,a ptl1_panic 10371: 1038 mov %g6, %o5 1039 mov %g5, %o2 1040 mov %g4, %o1 1041 mov %g3, %o0 1042 retry 1043 SET_SIZE(vtag_unmap_perm_tl1) 1044 1045 ENTRY_NP(vtag_flushpage_tl1) 1046 /* 1047 * x-trap to flush page from tlb and tsb 1048 * 1049 * %g1 = vaddr, zero-extended on 32-bit kernel 1050 * %g2 = sfmmup 1051 * 1052 * assumes TSBE_TAG = 0 1053 */ 1054 srln %g1, MMU_PAGESHIFT, %g1 1055 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 1056 mov %o0, %g3 1057 mov %o1, %g4 1058 mov %o2, %g5 1059 mov %g1, %o0 /* vaddr */ 1060 1061 SFMMU_CPU_CNUM(%g2, %o1, %g6) /* %o1 = sfmmu cnum on this CPU */ 1062 1063 mov MAP_ITLB | MAP_DTLB, %o2 1064 ta MMU_UNMAP_ADDR 1065 brz,pt %o0, 1f 1066 nop 1067 ba ptl1_panic 1068 mov PTL1_BAD_HCALL, %g1 10691: 1070 mov %g5, %o2 1071 mov %g4, %o1 1072 mov %g3, %o0 1073 membar #Sync 1074 retry 1075 SET_SIZE(vtag_flushpage_tl1) 1076 1077 ENTRY_NP(vtag_flush_pgcnt_tl1) 1078 /* 1079 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb 1080 * 1081 * %g1 = vaddr, zero-extended on 32-bit kernel 1082 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits. 1083 * 1084 * NOTE: this handler relies on the fact that no 1085 * interrupts or traps can occur during the loop 1086 * issuing the TLB_DEMAP operations. It is assumed 1087 * that interrupts are disabled and this code is 1088 * fetching from the kernel locked text address. 1089 * 1090 * assumes TSBE_TAG = 0 1091 */ 1092 srln %g1, MMU_PAGESHIFT, %g1 1093 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 1094 mov %o0, %g3 1095 mov %o1, %g4 1096 mov %o2, %g5 1097 1098 and %g2, SFMMU_PGCNT_MASK, %g7 /* g7 = pgcnt - 1 */ 1099 add %g7, 1, %g7 /* g7 = pgcnt */ 1100 1101 andn %g2, SFMMU_PGCNT_MASK, %o0 /* %o0 = sfmmup */ 1102 1103 SFMMU_CPU_CNUM(%o0, %g2, %g6) /* %g2 = sfmmu cnum on this CPU */ 1104 1105 set MMU_PAGESIZE, %g6 /* g6 = pgsize */ 1106 11071: 1108 mov %g1, %o0 /* vaddr */ 1109 mov %g2, %o1 /* cnum */ 1110 mov MAP_ITLB | MAP_DTLB, %o2 1111 ta MMU_UNMAP_ADDR 1112 brz,pt %o0, 2f 1113 nop 1114 ba ptl1_panic 1115 mov PTL1_BAD_HCALL, %g1 11162: 1117 deccc %g7 /* decr pgcnt */ 1118 bnz,pt %icc,1b 1119 add %g1, %g6, %g1 /* go to nextpage */ 1120 1121 mov %g5, %o2 1122 mov %g4, %o1 1123 mov %g3, %o0 1124 membar #Sync 1125 retry 1126 SET_SIZE(vtag_flush_pgcnt_tl1) 1127 1128 ! Not implemented on US1/US2 1129 ENTRY_NP(vtag_flushall_tl1) 1130 mov %o0, %g3 1131 mov %o1, %g4 1132 mov %o2, %g5 1133 mov %o3, %g6 ! XXXQ not used? 1134 mov %o5, %g7 1135 mov %g0, %o0 ! XXX no cpu list yet 1136 mov %g0, %o1 ! XXX no cpu list yet 1137 mov MAP_ITLB | MAP_DTLB, %o2 1138 mov MMU_DEMAP_ALL, %o5 1139 ta FAST_TRAP 1140 brz,pt %o0, 1f 1141 nop 1142 ba ptl1_panic 1143 mov PTL1_BAD_HCALL, %g1 11441: 1145 mov %g7, %o5 1146 mov %g6, %o3 ! XXXQ not used? 1147 mov %g5, %o2 1148 mov %g4, %o1 1149 mov %g3, %o0 1150 retry 1151 SET_SIZE(vtag_flushall_tl1) 1152 1153/* 1154 * flush_instr_mem: 1155 * Flush a portion of the I-$ starting at vaddr 1156 * %o0 vaddr 1157 * %o1 bytes to be flushed 1158 */ 1159 1160 ENTRY(flush_instr_mem) 1161 membar #StoreStore ! Ensure the stores 1162 ! are globally visible 11631: 1164 flush %o0 1165 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20 1166 bgu,pt %ncc, 1b 1167 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20 1168 1169 retl 1170 nop 1171 SET_SIZE(flush_instr_mem) 1172 1173#endif /* !lint */ 1174 1175/* 1176 * fp_zero() - clear all fp data registers and the fsr 1177 */ 1178 1179#if defined(lint) || defined(__lint) 1180 1181void 1182fp_zero(void) 1183{} 1184 1185#else /* lint */ 1186 1187.global fp_zero_zero 1188.align 8 1189fp_zero_zero: 1190 .xword 0 1191 1192 ENTRY_NP(fp_zero) 1193 sethi %hi(fp_zero_zero), %o0 1194 ldd [%o0 + %lo(fp_zero_zero)], %fsr 1195 ldd [%o0 + %lo(fp_zero_zero)], %f0 1196 fmovd %f0, %f2 1197 fmovd %f0, %f4 1198 fmovd %f0, %f6 1199 fmovd %f0, %f8 1200 fmovd %f0, %f10 1201 fmovd %f0, %f12 1202 fmovd %f0, %f14 1203 fmovd %f0, %f16 1204 fmovd %f0, %f18 1205 fmovd %f0, %f20 1206 fmovd %f0, %f22 1207 fmovd %f0, %f24 1208 fmovd %f0, %f26 1209 fmovd %f0, %f28 1210 fmovd %f0, %f30 1211 fmovd %f0, %f32 1212 fmovd %f0, %f34 1213 fmovd %f0, %f36 1214 fmovd %f0, %f38 1215 fmovd %f0, %f40 1216 fmovd %f0, %f42 1217 fmovd %f0, %f44 1218 fmovd %f0, %f46 1219 fmovd %f0, %f48 1220 fmovd %f0, %f50 1221 fmovd %f0, %f52 1222 fmovd %f0, %f54 1223 fmovd %f0, %f56 1224 fmovd %f0, %f58 1225 fmovd %f0, %f60 1226 retl 1227 fmovd %f0, %f62 1228 SET_SIZE(fp_zero) 1229 1230#endif /* lint */ 1231