xref: /titanic_51/usr/src/uts/sun4/ml/swtch.s (revision 160abee025ef30c34521b981edd40ffcaab560aa)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Process switching routines.
30 */
31
32#if !defined(lint)
33#include "assym.h"
34#else	/* lint */
35#include <sys/thread.h>
36#endif	/* lint */
37
38#include <sys/param.h>
39#include <sys/asm_linkage.h>
40#include <sys/mmu.h>
41#include <sys/pcb.h>
42#include <sys/machthread.h>
43#include <sys/privregs.h>
44#include <sys/vtrace.h>
45#include <vm/hat_sfmmu.h>
46
47/*
48 * resume(kthread_id_t)
49 *
50 * a thread can only run on one processor at a time. there
51 * exists a window on MPs where the current thread on one
52 * processor is capable of being dispatched by another processor.
53 * some overlap between outgoing and incoming threads can happen
54 * when they are the same thread. in this case where the threads
55 * are the same, resume() on one processor will spin on the incoming
56 * thread until resume() on the other processor has finished with
57 * the outgoing thread.
58 *
59 * The MMU context changes when the resuming thread resides in a different
60 * process.  Kernel threads are known by resume to reside in process 0.
61 * The MMU context, therefore, only changes when resuming a thread in
62 * a process different from curproc.
63 *
64 * resume_from_intr() is called when the thread being resumed was not
65 * passivated by resume (e.g. was interrupted).  This means that the
66 * resume lock is already held and that a restore context is not needed.
67 * Also, the MMU context is not changed on the resume in this case.
68 *
69 * resume_from_zombie() is the same as resume except the calling thread
70 * is a zombie and must be put on the deathrow list after the CPU is
71 * off the stack.
72 */
73
74#if defined(lint)
75
76/* ARGSUSED */
77void
78resume(kthread_id_t t)
79{}
80
81#else	/* lint */
82
83	ENTRY(resume)
84	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
85
86	call	__dtrace_probe___sched_off__cpu	! DTrace probe
87	mov	%i0, %o0			! arg for DTrace probe
88
89	membar	#Sync				! flush writebuffers
90	flushw					! flushes all but this window
91
92	stn	%i7, [THREAD_REG + T_PC]	! save return address
93	stn	%fp, [THREAD_REG + T_SP]	! save sp
94
95	!
96	! Save GSR (Graphics Status Register).
97	!
98	! Read fprs, call fp_save if FPRS_FEF set.
99	! This handles floating-point state saving.
100	! The fprs could be turned on by hw bcopy software,
101	! *or* by fp_disabled. Handle it either way.
102	!
103	ldn	[THREAD_REG + T_LWP], %o4	! get lwp pointer
104	rd	%fprs, %g4			! read fprs
105	brnz,pt	%o4, 0f				! if user thread skip
106	  ldn	[THREAD_REG + T_CPU], %i1	! get CPU pointer
107
108	!
109	! kernel thread
110	!
111	! we save fprs at the beginning the stack so we know
112	! where to check at resume time
113	ldn	[THREAD_REG + T_STACK], %i2
114	ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
115	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
116	bz,pt	%icc, 1f			! nope, skip
117	  st	%g4, [%i2 + SA(MINFRAME) + FPU_FPRS]	! save fprs
118
119	! save kernel fp state in stack
120	add	%i2, SA(MINFRAME), %o0		! o0 = kfpu_t ptr
121	rd	%gsr, %g5
122	call	fp_save
123	stx	%g5, [%o0 + FPU_GSR]		! store GSR
124	ba,a,pt	%icc, 1f
125	  nop
126
1270:
128	! user thread
129	! o4 = lwp ptr
130	! g4 = fprs
131	! i1 = CPU ptr
132	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
133	stn	%fp, [THREAD_REG + T_SP]	! save sp
134	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
135	st	%g4, [%o0 + FPU_FPRS]		! store FPRS
136#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
137	sethi	%hi(fpu_exists), %g5
138	ld	[%g5 + %lo(fpu_exists)], %g5
139	brz,pn	%g5, 1f
140	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
141#endif
142	bz,pt	%icc, 1f			! most apps don't use fp
143	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
144	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
145	rd	%gsr, %g5
146	call	fp_save				! doesn't touch globals
147	stx	%g5, [%o0 + FPU_GSR]		! store GSR
1481:
149	!
150	! Perform context switch callback if set.
151	! This handles coprocessor state saving.
152	! i1 = cpu ptr
153	! g3 = ctx pointer
154	!
155	wr	%g0, %g0, %fprs			! disable fpu and clear fprs
156	brz,pt	%g3, 2f				! skip call when zero
157	ldn	[%i0 + T_PROCP], %i3		! delay slot - get proc pointer
158	call	savectx
159	mov	THREAD_REG, %o0			! delay - arg = thread pointer
1602:
161	ldn	[THREAD_REG + T_PROCP], %i2	! load old curproc - for mmu
162
163	!
164	! Temporarily switch to idle thread's stack
165	!
166	ldn	[%i1 + CPU_IDLE_THREAD], %o0	! idle thread pointer
167	ldn	[%o0 + T_SP], %o1		! get onto idle thread stack
168	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
169	clr	%fp
170
171	!
172	! Set the idle thread as the current thread
173	!
174	mov	THREAD_REG, %l3			! save %g7 (current thread)
175	mov	%o0, THREAD_REG			! set %g7 to idle
176	stn	%o0, [%i1 + CPU_THREAD]		! set CPU's thread to idle
177
178	!
179	! Clear and unlock previous thread's t_lock
180	! to allow it to be dispatched by another processor.
181	!
182	clrb	[%l3 + T_LOCK]			! clear tp->t_lock
183
184	!
185	! IMPORTANT: Registers at this point must be:
186	!	%i0 = new thread
187	!	%i1 = cpu pointer
188	!	%i2 = old proc pointer
189	!	%i3 = new proc pointer
190	!
191	! Here we are in the idle thread, have dropped the old thread.
192	!
193	ALTENTRY(_resume_from_idle)
194
195	! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
196	SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
197
198	cmp 	%i2, %i3		! resuming the same process?
199	be,pt	%xcc, 5f		! yes.
200	  nop
201
202	ldx	[%i3 + P_AS], %o0	! load p->p_as
203	ldx	[%o0 + A_HAT], %i5	! %i5 = new proc hat
204
205	!
206	! update cpusran field
207	!
208	ld	[%i1 + CPU_ID], %o4
209	add	%i5, SFMMU_CPUSRAN, %o5
210	CPU_INDEXTOSET(%o5, %o4, %g1)
211	ldx	[%o5], %o2		! %o2 = cpusran field
212	mov	1, %g2
213	sllx	%g2, %o4, %o4		! %o4 = bit for this cpu
214	andcc	%o4, %o2, %g0
215	bnz,pn	%xcc, 0f		! bit already set, go to 0
216	  nop
2173:
218	or	%o2, %o4, %o1		! or in this cpu's bit mask
219	casx	[%o5], %o2, %o1
220	cmp	%o2, %o1
221	bne,a,pn %xcc, 3b
222	  ldx	[%o5], %o2		! o2 = cpusran field
223	membar	#LoadLoad|#StoreLoad
224
2250:
226	!
227	! disable interrupts
228	!
229	! if resume from user to kernel thread
230	!	call sfmmu_setctx_sec
231	! if resume from kernel (or a different user) thread to user thread
232	!	call sfmmu_alloc_ctx
233	! sfmmu_load_mmustate
234	!
235	! enable interrupts
236	!
237	! %i5 = new proc hat
238	!
239
240	sethi	%hi(ksfmmup), %o2
241        ldx	[%o2 + %lo(ksfmmup)], %o2
242
243	rdpr	%pstate, %i4
244        cmp	%i5, %o2		! new proc hat == ksfmmup ?
245	bne,pt	%xcc, 3f		! new proc is not kernel as, go to 3
246	  wrpr	%i4, PSTATE_IE, %pstate
247
248	SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1)
249
250	! new proc is kernel as
251
252	call	sfmmu_setctx_sec		! switch to kernel context
253	  or	%o0, %o1, %o0
254
255	ba,a,pt	%icc, 4f
256
257	!
258	! Switch to user address space.
259	!
2603:
261	mov	%i5, %o0			! %o0 = sfmmup
262	mov	%i1, %o2			! %o2 = CPU
263	set	SFMMU_PRIVATE, %o3		! %o3 = sfmmu private flag
264	call	sfmmu_alloc_ctx
265	  mov	%g0, %o1			! %o1 = allocate flag = 0
266#ifdef sun4v
267	brz,a,pt %o0, 4f			! %o0 == 0, no private alloc'ed
268          nop
269
270        ldn     [%i5 + SFMMU_SCDP], %o0         ! using shared contexts?
271        brz,a,pt %o0, 4f
272          nop
273
274	ldn   [%o0 + SCD_SFMMUP], %o0		! %o0 = scdp->scd_sfmmup
275	mov	%i1, %o2			! %o2 = CPU
276	set	SFMMU_SHARED, %o3		! %o3 = sfmmu shared flag
277	call	sfmmu_alloc_ctx
278	  mov	1, %o1				! %o1 = allocate flag = 1
279
280#endif
281
2824:
283	call	sfmmu_load_mmustate		! program MMU registers
284	  mov	%i5, %o0
285
286	wrpr    %g0, %i4, %pstate               ! enable interrupts
287
2885:
289	!
290	! spin until dispatched thread's mutex has
291	! been unlocked. this mutex is unlocked when
292	! it becomes safe for the thread to run.
293	!
294	ldstub	[%i0 + T_LOCK], %o0	! lock curthread's t_lock
2956:
296	brnz,pn	%o0, 7f			! lock failed
297	  ldx	[%i0 + T_PC], %i7	! delay - restore resuming thread's pc
298
299	!
300	! Fix CPU structure to indicate new running thread.
301	! Set pointer in new thread to the CPU structure.
302	! XXX - Move migration statistic out of here
303	!
304        ldx	[%i0 + T_CPU], %g2	! last CPU to run the new thread
305        cmp     %g2, %i1		! test for migration
306        be,pt	%xcc, 4f		! no migration
307          ldn	[%i0 + T_LWP], %o1	! delay - get associated lwp (if any)
308        ldx	[%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
309        inc     %g2
310        stx	%g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
311	stx	%i1, [%i0 + T_CPU]	! set new thread's CPU pointer
3124:
313	stx	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
314	membar	#StoreLoad		! synchronize with mutex_exit()
315	mov	%i0, THREAD_REG		! update global thread register
316	stx	%o1, [%i1 + CPU_LWP]	! set CPU's lwp ptr
317	brz,a,pn %o1, 1f		! if no lwp, branch and clr mpcb
318	  stx	%g0, [%i1 + CPU_MPCB]
319	!
320	! user thread
321	! o1 = lwp
322	! i0 = new thread
323	!
324	ldx	[%i0 + T_STACK], %o0
325	stx	%o0, [%i1 + CPU_MPCB]	! set CPU's mpcb pointer
326#ifdef CPU_MPCB_PA
327	ldx	[%o0 + MPCB_PA], %o0
328	stx	%o0, [%i1 + CPU_MPCB_PA]
329#endif
330	! Switch to new thread's stack
331	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
332	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
333	mov	%o0, %fp
334	!
335	! Restore resuming thread's GSR reg and floating-point regs
336	! Note that the ld to the gsr register ensures that the loading of
337	! the floating point saved state has completed without necessity
338	! of a membar #Sync.
339	!
340#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
341	sethi	%hi(fpu_exists), %g3
342	ld	[%g3 + %lo(fpu_exists)], %g3
343	brz,pn	%g3, 2f
344	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
345#endif
346	ldx	[%o1 + LWP_FPU], %o0		! fp pointer
347	ld	[%o0 + FPU_FPRS], %g5		! get fpu_fprs
348	andcc	%g5, FPRS_FEF, %g0		! is FPRS_FEF set?
349	bz,a,pt	%icc, 9f			! no, skip fp_restore
350	  wr	%g0, FPRS_FEF, %fprs		! enable fprs so fp_zero works
351
352	ldx	[THREAD_REG + T_CPU], %o4	! cpu pointer
353	call	fp_restore
354	  wr	%g5, %g0, %fprs			! enable fpu and restore fprs
355
356	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
357	wr	%g5, %g0, %gsr			! restore %gsr data
358	ba,pt	%icc,2f
359	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
360
3619:
362	!
363	! Zero resuming thread's fp registers, for *all* non-fp program
364	! Remove all possibility of using the fp regs as a "covert channel".
365	!
366	call	fp_zero
367	  wr	%g0, %g0, %gsr
368	ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
369	ba,pt	%icc, 2f
370	  wr	%g0, %g0, %fprs			! disable fprs
371
3721:
373#ifdef CPU_MPCB_PA
374	mov	-1, %o1
375	stx	%o1, [%i1 + CPU_MPCB_PA]
376#endif
377	!
378	! kernel thread
379	! i0 = new thread
380	!
381	! Switch to new thread's stack
382	!
383	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
384	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
385	mov	%o0, %fp
386	!
387	! Restore resuming thread's GSR reg and floating-point regs
388	! Note that the ld to the gsr register ensures that the loading of
389	! the floating point saved state has completed without necessity
390	! of a membar #Sync.
391	!
392	ldx	[%i0 + T_STACK], %o0
393	ld	[%o0 + SA(MINFRAME) + FPU_FPRS], %g5	! load fprs
394	ldx	[%i0 + T_CTX], %i5		! should thread restorectx?
395	andcc	%g5, FPRS_FEF, %g0		! did we save fp in stack?
396	bz,a,pt	%icc, 2f
397	  wr	%g0, %g0, %fprs			! clr fprs
398
399	wr	%g5, %g0, %fprs			! enable fpu and restore fprs
400	call	fp_restore
401	add	%o0, SA(MINFRAME), %o0		! o0 = kpu_t ptr
402	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
403	wr	%g5, %g0, %gsr			! restore %gsr data
404
4052:
406	!
407	! Restore resuming thread's context
408	! i5 = ctx ptr
409	!
410	brz,a,pt %i5, 8f		! skip restorectx() when zero
411	  ld	[%i1 + CPU_BASE_SPL], %o0
412	call	restorectx		! thread can not sleep on temp stack
413	  mov	THREAD_REG, %o0		! delay slot - arg = thread pointer
414	!
415	! Set priority as low as possible, blocking all interrupt threads
416	! that may be active.
417	!
418	ld	[%i1 + CPU_BASE_SPL], %o0
4198:
420	wrpr	%o0, 0, %pil
421	wrpr	%g0, WSTATE_KERN, %wstate
422	!
423	! If we are resuming an interrupt thread, store a starting timestamp
424	! in the thread structure.
425	!
426	lduh	[THREAD_REG + T_FLAGS], %o0
427	andcc	%o0, T_INTR_THREAD, %g0
428	bnz,pn	%xcc, 0f
429	  nop
4305:
431	call	__dtrace_probe___sched_on__cpu	! DTrace probe
432	nop
433
434	ret				! resume curthread
435	restore
4360:
437	add	THREAD_REG, T_INTR_START, %o2
4381:
439	ldx	[%o2], %o1
440	rdpr	%tick, %o0
441	sllx	%o0, 1, %o0
442	srlx	%o0, 1, %o0			! shift off NPT bit
443	casx	[%o2], %o1, %o0
444	cmp	%o0, %o1
445	be,pt	%xcc, 5b
446	  nop
447	! If an interrupt occurred while we were attempting to store
448	! the timestamp, try again.
449	ba,pt	%xcc, 1b
450	  nop
451
452	!
453	! lock failed - spin with regular load to avoid cache-thrashing.
454	!
4557:
456	brnz,a,pt %o0, 7b		! spin while locked
457	  ldub	[%i0 + T_LOCK], %o0
458	ba	%xcc, 6b
459	  ldstub  [%i0 + T_LOCK], %o0	! delay - lock curthread's mutex
460	SET_SIZE(_resume_from_idle)
461	SET_SIZE(resume)
462
463#endif	/* lint */
464
465#if defined(lint)
466
467/* ARGSUSED */
468void
469resume_from_zombie(kthread_id_t t)
470{}
471
472#else	/* lint */
473
474	ENTRY(resume_from_zombie)
475	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
476
477	call	__dtrace_probe___sched_off__cpu	! DTrace probe
478	mov	%i0, %o0			! arg for DTrace probe
479
480	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
481
482	flushw					! flushes all but this window
483	ldn	[THREAD_REG + T_PROCP], %i2	! old procp for mmu ctx
484
485	!
486	! Temporarily switch to the idle thread's stack so that
487	! the zombie thread's stack can be reclaimed by the reaper.
488	!
489	ldn	[%i1 + CPU_IDLE_THREAD], %o2	! idle thread pointer
490	ldn	[%o2 + T_SP], %o1		! get onto idle thread stack
491	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
492	clr	%fp
493	!
494	! Set the idle thread as the current thread.
495	! Put the zombie on death-row.
496	!
497	mov	THREAD_REG, %o0			! save %g7 = curthread for arg
498	mov	%o2, THREAD_REG			! set %g7 to idle
499	stn	%g0, [%i1 + CPU_MPCB]		! clear mpcb
500#ifdef CPU_MPCB_PA
501	mov	-1, %o1
502	stx	%o1, [%i1 + CPU_MPCB_PA]
503#endif
504	call	reapq_add			! reapq_add(old_thread);
505	stn	%o2, [%i1 + CPU_THREAD]		! delay - CPU's thread = idle
506
507	!
508	! resume_from_idle args:
509	!	%i0 = new thread
510	!	%i1 = cpu
511	!	%i2 = old proc
512	!	%i3 = new proc
513	!
514	b	_resume_from_idle		! finish job of resume
515	ldn	[%i0 + T_PROCP], %i3		! new process
516	SET_SIZE(resume_from_zombie)
517
518#endif	/* lint */
519
520#if defined(lint)
521
522/* ARGSUSED */
523void
524resume_from_intr(kthread_id_t t)
525{}
526
527#else	/* lint */
528
529	ENTRY(resume_from_intr)
530	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
531
532	flushw					! flushes all but this window
533	stn	%fp, [THREAD_REG + T_SP]	! delay - save sp
534	stn	%i7, [THREAD_REG + T_PC]	! save return address
535
536	ldn	[%i0 + T_PC], %i7		! restore resuming thread's pc
537	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
538
539	!
540	! Fix CPU structure to indicate new running thread.
541	! The pinned thread we're resuming already has the CPU pointer set.
542	!
543	mov	THREAD_REG, %l3		! save old thread
544	stn	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
545	membar	#StoreLoad		! synchronize with mutex_exit()
546	mov	%i0, THREAD_REG		! update global thread register
547
548	!
549	! Switch to new thread's stack
550	!
551	ldn	[THREAD_REG + T_SP], %o0	! restore resuming thread's sp
552	sub	%o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
553	mov	%o0, %fp
554	clrb	[%l3 + T_LOCK]		! clear intr thread's tp->t_lock
555
556	!
557	! If we are resuming an interrupt thread, store a timestamp in the
558	! thread structure.
559	!
560	lduh	[THREAD_REG + T_FLAGS], %o0
561	andcc	%o0, T_INTR_THREAD, %g0
562	bnz,pn	%xcc, 0f
563	!
564	! We're resuming a non-interrupt thread.
565	! Clear CPU_INTRCNT and check if cpu_kprunrun set?
566	!
567	ldub	[%i1 + CPU_KPRUNRUN], %o5	! delay
568	brnz,pn	%o5, 3f				! call kpreempt(KPREEMPT_SYNC);
569	stub	%g0, [%i1 + CPU_INTRCNT]
5701:
571	ret				! resume curthread
572	restore
5730:
574	!
575	! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
576	!
577	add	THREAD_REG, T_INTR_START, %o2
5782:
579	ldx	[%o2], %o1
580	rdpr	%tick, %o0
581	sllx	%o0, 1, %o0
582	srlx	%o0, 1, %o0			! shift off NPT bit
583	casx	[%o2], %o1, %o0
584	cmp	%o0, %o1
585	bne,pn	%xcc, 2b
586	ldn	[THREAD_REG + T_INTR], %l1	! delay
587	! Reset cpu_intrcnt if we aren't pinning anyone
588	brz,a,pt %l1, 2f
589	stub	%g0, [%i1 + CPU_INTRCNT]
5902:
591	ba,pt	%xcc, 1b
592	nop
5933:
594	!
595	! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
596	!
597	call	kpreempt
598	mov	KPREEMPT_SYNC, %o0
599	ba,pt	%xcc, 1b
600	nop
601	SET_SIZE(resume_from_intr)
602
603#endif /* lint */
604
605
606/*
607 * thread_start()
608 *
609 * the current register window was crafted by thread_run() to contain
610 * an address of a procedure (in register %i7), and its args in registers
611 * %i0 through %i5. a stack trace of this thread will show the procedure
612 * that thread_start() invoked at the bottom of the stack. an exit routine
613 * is stored in %l0 and called when started thread returns from its called
614 * procedure.
615 */
616
617#if defined(lint)
618
619void
620thread_start(void)
621{}
622
623#else	/* lint */
624
625	ENTRY(thread_start)
626	mov	%i0, %o0
627	jmpl 	%i7, %o7	! call thread_run()'s start() procedure.
628	mov	%i1, %o1
629
630	call	thread_exit	! destroy thread if it returns.
631	nop
632	unimp 0
633	SET_SIZE(thread_start)
634
635#endif	/* lint */
636