1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * Process switching routines. 28 */ 29 30#if !defined(lint) 31#include "assym.h" 32#else /* lint */ 33#include <sys/thread.h> 34#endif /* lint */ 35 36#include <sys/param.h> 37#include <sys/asm_linkage.h> 38#include <sys/mmu.h> 39#include <sys/pcb.h> 40#include <sys/machthread.h> 41#include <sys/machclock.h> 42#include <sys/privregs.h> 43#include <sys/vtrace.h> 44#include <vm/hat_sfmmu.h> 45 46/* 47 * resume(kthread_id_t) 48 * 49 * a thread can only run on one processor at a time. there 50 * exists a window on MPs where the current thread on one 51 * processor is capable of being dispatched by another processor. 52 * some overlap between outgoing and incoming threads can happen 53 * when they are the same thread. in this case where the threads 54 * are the same, resume() on one processor will spin on the incoming 55 * thread until resume() on the other processor has finished with 56 * the outgoing thread. 57 * 58 * The MMU context changes when the resuming thread resides in a different 59 * process. Kernel threads are known by resume to reside in process 0. 60 * The MMU context, therefore, only changes when resuming a thread in 61 * a process different from curproc. 62 * 63 * resume_from_intr() is called when the thread being resumed was not 64 * passivated by resume (e.g. was interrupted). This means that the 65 * resume lock is already held and that a restore context is not needed. 66 * Also, the MMU context is not changed on the resume in this case. 67 * 68 * resume_from_zombie() is the same as resume except the calling thread 69 * is a zombie and must be put on the deathrow list after the CPU is 70 * off the stack. 71 */ 72 73#if defined(lint) 74 75/* ARGSUSED */ 76void 77resume(kthread_id_t t) 78{} 79 80#else /* lint */ 81 82 ENTRY(resume) 83 save %sp, -SA(MINFRAME), %sp ! save ins and locals 84 85 call __dtrace_probe___sched_off__cpu ! DTrace probe 86 mov %i0, %o0 ! arg for DTrace probe 87 88 membar #Sync ! flush writebuffers 89 flushw ! flushes all but this window 90 91 stn %i7, [THREAD_REG + T_PC] ! save return address 92 stn %fp, [THREAD_REG + T_SP] ! save sp 93 94 ! 95 ! Save GSR (Graphics Status Register). 96 ! 97 ! Read fprs, call fp_save if FPRS_FEF set. 98 ! This handles floating-point state saving. 99 ! The fprs could be turned on by hw bcopy software, 100 ! *or* by fp_disabled. Handle it either way. 101 ! 102 ldn [THREAD_REG + T_LWP], %o4 ! get lwp pointer 103 rd %fprs, %g4 ! read fprs 104 brnz,pt %o4, 0f ! if user thread skip 105 ldn [THREAD_REG + T_CPU], %i1 ! get CPU pointer 106 107 ! 108 ! kernel thread 109 ! 110 ! we save fprs at the beginning the stack so we know 111 ! where to check at resume time 112 ldn [THREAD_REG + T_STACK], %i2 113 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer 114 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set 115 bz,pt %icc, 1f ! nope, skip 116 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs 117 118 ! save kernel fp state in stack 119 add %i2, SA(MINFRAME), %o0 ! o0 = kfpu_t ptr 120 rd %gsr, %g5 121 call fp_save 122 stx %g5, [%o0 + FPU_GSR] ! store GSR 123 ba,a,pt %icc, 1f 124 nop 125 1260: 127 ! user thread 128 ! o4 = lwp ptr 129 ! g4 = fprs 130 ! i1 = CPU ptr 131 ldn [%o4 + LWP_FPU], %o0 ! fp pointer 132 stn %fp, [THREAD_REG + T_SP] ! save sp 133 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set 134 st %g4, [%o0 + FPU_FPRS] ! store FPRS 135#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 136 sethi %hi(fpu_exists), %g5 137 ld [%g5 + %lo(fpu_exists)], %g5 138 brz,pn %g5, 1f 139 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer 140#endif 141 bz,pt %icc, 1f ! most apps don't use fp 142 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer 143 ldn [%o4 + LWP_FPU], %o0 ! fp pointer 144 rd %gsr, %g5 145 call fp_save ! doesn't touch globals 146 stx %g5, [%o0 + FPU_GSR] ! store GSR 1471: 148 ! 149 ! Perform context switch callback if set. 150 ! This handles coprocessor state saving. 151 ! i1 = cpu ptr 152 ! g3 = ctx pointer 153 ! 154 wr %g0, %g0, %fprs ! disable fpu and clear fprs 155 brz,pt %g3, 2f ! skip call when zero 156 ldn [%i0 + T_PROCP], %i3 ! delay slot - get proc pointer 157 call savectx 158 mov THREAD_REG, %o0 ! delay - arg = thread pointer 1592: 160 ldn [THREAD_REG + T_PROCP], %i2 ! load old curproc - for mmu 161 162 ! 163 ! Temporarily switch to idle thread's stack 164 ! 165 ldn [%i1 + CPU_IDLE_THREAD], %o0 ! idle thread pointer 166 ldn [%o0 + T_SP], %o1 ! get onto idle thread stack 167 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals 168 clr %fp 169 170 ! 171 ! Set the idle thread as the current thread 172 ! 173 mov THREAD_REG, %l3 ! save %g7 (current thread) 174 mov %o0, THREAD_REG ! set %g7 to idle 175 stn %o0, [%i1 + CPU_THREAD] ! set CPU's thread to idle 176 177 ! 178 ! Clear and unlock previous thread's t_lock 179 ! to allow it to be dispatched by another processor. 180 ! 181 clrb [%l3 + T_LOCK] ! clear tp->t_lock 182 183 ! 184 ! IMPORTANT: Registers at this point must be: 185 ! %i0 = new thread 186 ! %i1 = cpu pointer 187 ! %i2 = old proc pointer 188 ! %i3 = new proc pointer 189 ! 190 ! Here we are in the idle thread, have dropped the old thread. 191 ! 192 ALTENTRY(_resume_from_idle) 193 194 ! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) 195 SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3) 196 197 cmp %i2, %i3 ! resuming the same process? 198 be,pt %xcc, 5f ! yes. 199 nop 200 201 ldx [%i3 + P_AS], %o0 ! load p->p_as 202 ldx [%o0 + A_HAT], %i5 ! %i5 = new proc hat 203 204 ! 205 ! update cpusran field 206 ! 207 ld [%i1 + CPU_ID], %o4 208 add %i5, SFMMU_CPUSRAN, %o5 209 CPU_INDEXTOSET(%o5, %o4, %g1) 210 ldx [%o5], %o2 ! %o2 = cpusran field 211 mov 1, %g2 212 sllx %g2, %o4, %o4 ! %o4 = bit for this cpu 213 andcc %o4, %o2, %g0 214 bnz,pn %xcc, 0f ! bit already set, go to 0 215 nop 2163: 217 or %o2, %o4, %o1 ! or in this cpu's bit mask 218 casx [%o5], %o2, %o1 219 cmp %o2, %o1 220 bne,a,pn %xcc, 3b 221 ldx [%o5], %o2 ! o2 = cpusran field 222 membar #LoadLoad|#StoreLoad 223 2240: 225 ! 226 ! disable interrupts 227 ! 228 ! if resume from user to kernel thread 229 ! call sfmmu_setctx_sec 230 ! if resume from kernel (or a different user) thread to user thread 231 ! call sfmmu_alloc_ctx 232 ! sfmmu_load_mmustate 233 ! 234 ! enable interrupts 235 ! 236 ! %i5 = new proc hat 237 ! 238 239 sethi %hi(ksfmmup), %o2 240 ldx [%o2 + %lo(ksfmmup)], %o2 241 242 rdpr %pstate, %i4 243 cmp %i5, %o2 ! new proc hat == ksfmmup ? 244 bne,pt %xcc, 3f ! new proc is not kernel as, go to 3 245 wrpr %i4, PSTATE_IE, %pstate 246 247 SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1) 248 249 ! new proc is kernel as 250 251 call sfmmu_setctx_sec ! switch to kernel context 252 or %o0, %o1, %o0 253 254 ba,a,pt %icc, 4f 255 256 ! 257 ! Switch to user address space. 258 ! 2593: 260 mov %i5, %o0 ! %o0 = sfmmup 261 mov %i1, %o2 ! %o2 = CPU 262 set SFMMU_PRIVATE, %o3 ! %o3 = sfmmu private flag 263 call sfmmu_alloc_ctx 264 mov %g0, %o1 ! %o1 = allocate flag = 0 265 266 brz,a,pt %o0, 4f ! %o0 == 0, no private alloc'ed 267 nop 268 269 ldn [%i5 + SFMMU_SCDP], %o0 ! using shared contexts? 270 brz,a,pt %o0, 4f 271 nop 272 273 ldn [%o0 + SCD_SFMMUP], %o0 ! %o0 = scdp->scd_sfmmup 274 mov %i1, %o2 ! %o2 = CPU 275 set SFMMU_SHARED, %o3 ! %o3 = sfmmu shared flag 276 call sfmmu_alloc_ctx 277 mov 1, %o1 ! %o1 = allocate flag = 1 278 2794: 280 call sfmmu_load_mmustate ! program MMU registers 281 mov %i5, %o0 282 283 wrpr %g0, %i4, %pstate ! enable interrupts 284 2855: 286 ! 287 ! spin until dispatched thread's mutex has 288 ! been unlocked. this mutex is unlocked when 289 ! it becomes safe for the thread to run. 290 ! 291 ldstub [%i0 + T_LOCK], %o0 ! lock curthread's t_lock 2926: 293 brnz,pn %o0, 7f ! lock failed 294 ldx [%i0 + T_PC], %i7 ! delay - restore resuming thread's pc 295 296 ! 297 ! Fix CPU structure to indicate new running thread. 298 ! Set pointer in new thread to the CPU structure. 299 ! XXX - Move migration statistic out of here 300 ! 301 ldx [%i0 + T_CPU], %g2 ! last CPU to run the new thread 302 cmp %g2, %i1 ! test for migration 303 be,pt %xcc, 4f ! no migration 304 ldn [%i0 + T_LWP], %o1 ! delay - get associated lwp (if any) 305 ldx [%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2 306 inc %g2 307 stx %g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE] 308 stx %i1, [%i0 + T_CPU] ! set new thread's CPU pointer 3094: 310 stx %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer 311 membar #StoreLoad ! synchronize with mutex_exit() 312 mov %i0, THREAD_REG ! update global thread register 313 stx %o1, [%i1 + CPU_LWP] ! set CPU's lwp ptr 314 brz,a,pn %o1, 1f ! if no lwp, branch and clr mpcb 315 stx %g0, [%i1 + CPU_MPCB] 316 ! 317 ! user thread 318 ! o1 = lwp 319 ! i0 = new thread 320 ! 321 ldx [%i0 + T_STACK], %o0 322 stx %o0, [%i1 + CPU_MPCB] ! set CPU's mpcb pointer 323#ifdef CPU_MPCB_PA 324 ldx [%o0 + MPCB_PA], %o0 325 stx %o0, [%i1 + CPU_MPCB_PA] 326#endif 327 ! Switch to new thread's stack 328 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp 329 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore 330 mov %o0, %fp 331 ! 332 ! Restore resuming thread's GSR reg and floating-point regs 333 ! Note that the ld to the gsr register ensures that the loading of 334 ! the floating point saved state has completed without necessity 335 ! of a membar #Sync. 336 ! 337#if defined(DEBUG) || defined(NEED_FPU_EXISTS) 338 sethi %hi(fpu_exists), %g3 339 ld [%g3 + %lo(fpu_exists)], %g3 340 brz,pn %g3, 2f 341 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx? 342#endif 343 ldx [%o1 + LWP_FPU], %o0 ! fp pointer 344 ld [%o0 + FPU_FPRS], %g5 ! get fpu_fprs 345 andcc %g5, FPRS_FEF, %g0 ! is FPRS_FEF set? 346 bz,a,pt %icc, 9f ! no, skip fp_restore 347 wr %g0, FPRS_FEF, %fprs ! enable fprs so fp_zero works 348 349 ldx [THREAD_REG + T_CPU], %o4 ! cpu pointer 350 call fp_restore 351 wr %g5, %g0, %fprs ! enable fpu and restore fprs 352 353 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data 354 wr %g5, %g0, %gsr ! restore %gsr data 355 ba,pt %icc,2f 356 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx? 357 3589: 359 ! 360 ! Zero resuming thread's fp registers, for *all* non-fp program 361 ! Remove all possibility of using the fp regs as a "covert channel". 362 ! 363 call fp_zero 364 wr %g0, %g0, %gsr 365 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx? 366 ba,pt %icc, 2f 367 wr %g0, %g0, %fprs ! disable fprs 368 3691: 370#ifdef CPU_MPCB_PA 371 mov -1, %o1 372 stx %o1, [%i1 + CPU_MPCB_PA] 373#endif 374 ! 375 ! kernel thread 376 ! i0 = new thread 377 ! 378 ! Switch to new thread's stack 379 ! 380 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp 381 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore 382 mov %o0, %fp 383 ! 384 ! Restore resuming thread's GSR reg and floating-point regs 385 ! Note that the ld to the gsr register ensures that the loading of 386 ! the floating point saved state has completed without necessity 387 ! of a membar #Sync. 388 ! 389 ldx [%i0 + T_STACK], %o0 390 ld [%o0 + SA(MINFRAME) + FPU_FPRS], %g5 ! load fprs 391 ldx [%i0 + T_CTX], %i5 ! should thread restorectx? 392 andcc %g5, FPRS_FEF, %g0 ! did we save fp in stack? 393 bz,a,pt %icc, 2f 394 wr %g0, %g0, %fprs ! clr fprs 395 396 wr %g5, %g0, %fprs ! enable fpu and restore fprs 397 call fp_restore 398 add %o0, SA(MINFRAME), %o0 ! o0 = kpu_t ptr 399 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data 400 wr %g5, %g0, %gsr ! restore %gsr data 401 4022: 403 ! 404 ! Restore resuming thread's context 405 ! i5 = ctx ptr 406 ! 407 brz,a,pt %i5, 8f ! skip restorectx() when zero 408 ld [%i1 + CPU_BASE_SPL], %o0 409 call restorectx ! thread can not sleep on temp stack 410 mov THREAD_REG, %o0 ! delay slot - arg = thread pointer 411 ! 412 ! Set priority as low as possible, blocking all interrupt threads 413 ! that may be active. 414 ! 415 ld [%i1 + CPU_BASE_SPL], %o0 4168: 417 wrpr %o0, 0, %pil 418 wrpr %g0, WSTATE_KERN, %wstate 419 ! 420 ! If we are resuming an interrupt thread, store a starting timestamp 421 ! in the thread structure. 422 ! 423 lduh [THREAD_REG + T_FLAGS], %o0 424 andcc %o0, T_INTR_THREAD, %g0 425 bnz,pn %xcc, 0f 426 nop 4275: 428 call __dtrace_probe___sched_on__cpu ! DTrace probe 429 nop 430 431 ret ! resume curthread 432 restore 4330: 434 add THREAD_REG, T_INTR_START, %o2 4351: 436 ldx [%o2], %o1 437 RD_TICK(%o0,%o3,%g5,__LINE__) 438 casx [%o2], %o1, %o0 439 cmp %o0, %o1 440 be,pt %xcc, 5b 441 nop 442 ! If an interrupt occurred while we were attempting to store 443 ! the timestamp, try again. 444 ba,pt %xcc, 1b 445 nop 446 447 ! 448 ! lock failed - spin with regular load to avoid cache-thrashing. 449 ! 4507: 451 brnz,a,pt %o0, 7b ! spin while locked 452 ldub [%i0 + T_LOCK], %o0 453 ba %xcc, 6b 454 ldstub [%i0 + T_LOCK], %o0 ! delay - lock curthread's mutex 455 SET_SIZE(_resume_from_idle) 456 SET_SIZE(resume) 457 458#endif /* lint */ 459 460#if defined(lint) 461 462/* ARGSUSED */ 463void 464resume_from_zombie(kthread_id_t t) 465{} 466 467#else /* lint */ 468 469 ENTRY(resume_from_zombie) 470 save %sp, -SA(MINFRAME), %sp ! save ins and locals 471 472 call __dtrace_probe___sched_off__cpu ! DTrace probe 473 mov %i0, %o0 ! arg for DTrace probe 474 475 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer 476 477 flushw ! flushes all but this window 478 ldn [THREAD_REG + T_PROCP], %i2 ! old procp for mmu ctx 479 480 ! 481 ! Temporarily switch to the idle thread's stack so that 482 ! the zombie thread's stack can be reclaimed by the reaper. 483 ! 484 ldn [%i1 + CPU_IDLE_THREAD], %o2 ! idle thread pointer 485 ldn [%o2 + T_SP], %o1 ! get onto idle thread stack 486 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals 487 clr %fp 488 ! 489 ! Set the idle thread as the current thread. 490 ! Put the zombie on death-row. 491 ! 492 mov THREAD_REG, %o0 ! save %g7 = curthread for arg 493 mov %o2, THREAD_REG ! set %g7 to idle 494 stn %g0, [%i1 + CPU_MPCB] ! clear mpcb 495#ifdef CPU_MPCB_PA 496 mov -1, %o1 497 stx %o1, [%i1 + CPU_MPCB_PA] 498#endif 499 call reapq_add ! reapq_add(old_thread); 500 stn %o2, [%i1 + CPU_THREAD] ! delay - CPU's thread = idle 501 502 ! 503 ! resume_from_idle args: 504 ! %i0 = new thread 505 ! %i1 = cpu 506 ! %i2 = old proc 507 ! %i3 = new proc 508 ! 509 b _resume_from_idle ! finish job of resume 510 ldn [%i0 + T_PROCP], %i3 ! new process 511 SET_SIZE(resume_from_zombie) 512 513#endif /* lint */ 514 515#if defined(lint) 516 517/* ARGSUSED */ 518void 519resume_from_intr(kthread_id_t t) 520{} 521 522#else /* lint */ 523 524 ENTRY(resume_from_intr) 525 save %sp, -SA(MINFRAME), %sp ! save ins and locals 526 527 ! 528 ! We read in the fprs and call fp_save if FPRS_FEF is set 529 ! to save the floating-point state if fprs has been 530 ! modified by operations such as hw bcopy or fp_disabled. 531 ! This is to resolve an issue where an interrupting thread 532 ! doesn't retain their floating-point registers when 533 ! switching out of the interrupt context. 534 ! 535 rd %fprs, %g4 536 ldn [THREAD_REG + T_STACK], %i2 537 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set 538 bz,pt %icc, 4f 539 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs 540 541 ! save kernel fp state in stack 542 add %i2, SA(MINFRAME), %o0 ! %o0 = kfpu_t ptr 543 rd %gsr, %g5 544 call fp_save 545 stx %g5, [%o0 + FPU_GSR] ! store GSR 546 5474: 548 549 flushw ! flushes all but this window 550 stn %fp, [THREAD_REG + T_SP] ! delay - save sp 551 stn %i7, [THREAD_REG + T_PC] ! save return address 552 553 ldn [%i0 + T_PC], %i7 ! restore resuming thread's pc 554 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer 555 556 ! 557 ! Fix CPU structure to indicate new running thread. 558 ! The pinned thread we're resuming already has the CPU pointer set. 559 ! 560 mov THREAD_REG, %l3 ! save old thread 561 stn %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer 562 membar #StoreLoad ! synchronize with mutex_exit() 563 mov %i0, THREAD_REG ! update global thread register 564 565 ! 566 ! Switch to new thread's stack 567 ! 568 ldn [THREAD_REG + T_SP], %o0 ! restore resuming thread's sp 569 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore 570 mov %o0, %fp 571 clrb [%l3 + T_LOCK] ! clear intr thread's tp->t_lock 572 573 ! 574 ! If we are resuming an interrupt thread, store a timestamp in the 575 ! thread structure. 576 ! 577 lduh [THREAD_REG + T_FLAGS], %o0 578 andcc %o0, T_INTR_THREAD, %g0 579 bnz,pn %xcc, 0f 580 ! 581 ! We're resuming a non-interrupt thread. 582 ! Clear CPU_INTRCNT and check if cpu_kprunrun set? 583 ! 584 ldub [%i1 + CPU_KPRUNRUN], %o5 ! delay 585 brnz,pn %o5, 3f ! call kpreempt(KPREEMPT_SYNC); 586 stub %g0, [%i1 + CPU_INTRCNT] 5871: 588 ret ! resume curthread 589 restore 5900: 591 ! 592 ! We're an interrupt thread. Update t_intr_start and cpu_intrcnt 593 ! 594 add THREAD_REG, T_INTR_START, %o2 5952: 596 ldx [%o2], %o1 597 RD_TICK(%o0,%o3,%l1,__LINE__) 598 casx [%o2], %o1, %o0 599 cmp %o0, %o1 600 bne,pn %xcc, 2b 601 ldn [THREAD_REG + T_INTR], %l1 ! delay 602 ! Reset cpu_intrcnt if we aren't pinning anyone 603 brz,a,pt %l1, 2f 604 stub %g0, [%i1 + CPU_INTRCNT] 6052: 606 ba,pt %xcc, 1b 607 nop 6083: 609 ! 610 ! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt. 611 ! 612 call kpreempt 613 mov KPREEMPT_SYNC, %o0 614 ba,pt %xcc, 1b 615 nop 616 SET_SIZE(resume_from_intr) 617 618#endif /* lint */ 619 620 621/* 622 * thread_start() 623 * 624 * the current register window was crafted by thread_run() to contain 625 * an address of a procedure (in register %i7), and its args in registers 626 * %i0 through %i5. a stack trace of this thread will show the procedure 627 * that thread_start() invoked at the bottom of the stack. an exit routine 628 * is stored in %l0 and called when started thread returns from its called 629 * procedure. 630 */ 631 632#if defined(lint) 633 634void 635thread_start(void) 636{} 637 638#else /* lint */ 639 640 ENTRY(thread_start) 641 mov %i0, %o0 642 jmpl %i7, %o7 ! call thread_run()'s start() procedure. 643 mov %i1, %o1 644 645 call thread_exit ! destroy thread if it returns. 646 nop 647 unimp 0 648 SET_SIZE(thread_start) 649 650#endif /* lint */ 651