xref: /titanic_52/usr/src/uts/intel/ia32/ml/swtch.s (revision b6c3f7863936abeae522e48a13887dddeb691a45)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Process switching routines.
30 */
31
32#if defined(__lint)
33#include <sys/thread.h>
34#include <sys/systm.h>
35#include <sys/time.h>
36#else	/* __lint */
37#include "assym.h"
38#endif	/* __lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/asm_misc.h>
42#include <sys/regset.h>
43#include <sys/privregs.h>
44#include <sys/stack.h>
45#include <sys/segments.h>
46
47/*
48 * resume(thread_id_t t);
49 *
50 * a thread can only run on one processor at a time. there
51 * exists a window on MPs where the current thread on one
52 * processor is capable of being dispatched by another processor.
53 * some overlap between outgoing and incoming threads can happen
54 * when they are the same thread. in this case where the threads
55 * are the same, resume() on one processor will spin on the incoming
56 * thread until resume() on the other processor has finished with
57 * the outgoing thread.
58 *
59 * The MMU context changes when the resuming thread resides in a different
60 * process.  Kernel threads are known by resume to reside in process 0.
61 * The MMU context, therefore, only changes when resuming a thread in
62 * a process different from curproc.
63 *
64 * resume_from_intr() is called when the thread being resumed was not
65 * passivated by resume (e.g. was interrupted).  This means that the
66 * resume lock is already held and that a restore context is not needed.
67 * Also, the MMU context is not changed on the resume in this case.
68 *
69 * resume_from_zombie() is the same as resume except the calling thread
70 * is a zombie and must be put on the deathrow list after the CPU is
71 * off the stack.
72 */
73
74#if !defined(__lint)
75
76#if LWP_PCB_FPU != 0
77#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
78#endif	/* LWP_PCB_FPU != 0 */
79
80#endif	/* !__lint */
81
82#if defined(__amd64)
83
84/*
85 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
86 *
87 * The stack frame must be created before the save of %rsp so that tracebacks
88 * of swtch()ed-out processes show the process as having last called swtch().
89 */
90#define SAVE_REGS(thread_t, retaddr)			\
91	movq	%rbp, T_RBP(thread_t);			\
92	movq	%rbx, T_RBX(thread_t);			\
93	movq	%r12, T_R12(thread_t);			\
94	movq	%r13, T_R13(thread_t);			\
95	movq	%r14, T_R14(thread_t);			\
96	movq	%r15, T_R15(thread_t);			\
97	pushq	%rbp;					\
98	movq	%rsp, %rbp;				\
99	movq	%rsp, T_SP(thread_t);			\
100	movq	retaddr, T_PC(thread_t);		\
101	movq	%rdi, %r12;				\
102	call	__dtrace_probe___sched_off__cpu
103
104/*
105 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
106 *
107 * We load up %rsp from the label_t as part of the context switch, so
108 * we don't repeat that here.
109 *
110 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
111 * already has the effect of putting the stack back the way it was when
112 * we came in.
113 */
114#define RESTORE_REGS(scratch_reg)			\
115	movq	%gs:CPU_THREAD, scratch_reg;		\
116	movq	T_RBP(scratch_reg), %rbp;		\
117	movq	T_RBX(scratch_reg), %rbx;		\
118	movq	T_R12(scratch_reg), %r12;		\
119	movq	T_R13(scratch_reg), %r13;		\
120	movq	T_R14(scratch_reg), %r14;		\
121	movq	T_R15(scratch_reg), %r15
122
123/*
124 * Get pointer to a thread's hat structure
125 */
126#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
127	movq	T_PROCP(thread_t), hatp;		\
128	movq	P_AS(hatp), scratch_reg;		\
129	movq	A_HAT(scratch_reg), hatp
130
131#define	TSC_READ()					\
132	call	tsc_read;				\
133	movq	%rax, %r14;
134
135/*
136 * If we are resuming an interrupt thread, store a timestamp in the thread
137 * structure.  If an interrupt occurs between tsc_read() and its subsequent
138 * store, the timestamp will be stale by the time it is stored.  We can detect
139 * this by doing a compare-and-swap on the thread's timestamp, since any
140 * interrupt occurring in this window will put a new timestamp in the thread's
141 * t_intr_start field.
142 */
143#define	STORE_INTR_START(thread_t)			\
144	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
145	jz	1f;					\
1460:							\
147	TSC_READ();					\
148	movq	T_INTR_START(thread_t), %rax;		\
149	cmpxchgq %r14, T_INTR_START(thread_t);		\
150	jnz	0b;					\
1511:
152
153#elif defined (__i386)
154
155/*
156 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
157 *
158 * The stack frame must be created before the save of %esp so that tracebacks
159 * of swtch()ed-out processes show the process as having last called swtch().
160 */
161#define SAVE_REGS(thread_t, retaddr)			\
162	movl	%ebp, T_EBP(thread_t);			\
163	movl	%ebx, T_EBX(thread_t);			\
164	movl	%esi, T_ESI(thread_t);			\
165	movl	%edi, T_EDI(thread_t);			\
166	pushl	%ebp;					\
167	movl	%esp, %ebp;				\
168	movl	%esp, T_SP(thread_t);			\
169	movl	retaddr, T_PC(thread_t);		\
170	movl	8(%ebp), %edi;				\
171	pushl	%edi;					\
172	call	__dtrace_probe___sched_off__cpu;	\
173	addl	$CLONGSIZE, %esp
174
175/*
176 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
177 *
178 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
179 * already has the effect of putting the stack back the way it was when
180 * we came in.
181 */
182#define RESTORE_REGS(scratch_reg)			\
183	movl	%gs:CPU_THREAD, scratch_reg;		\
184	movl	T_EBP(scratch_reg), %ebp;		\
185	movl	T_EBX(scratch_reg), %ebx;		\
186	movl	T_ESI(scratch_reg), %esi;		\
187	movl	T_EDI(scratch_reg), %edi
188
189/*
190 * Get pointer to a thread's hat structure
191 */
192#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
193	movl	T_PROCP(thread_t), hatp;		\
194	movl	P_AS(hatp), scratch_reg;		\
195	movl	A_HAT(scratch_reg), hatp
196
197/*
198 * If we are resuming an interrupt thread, store a timestamp in the thread
199 * structure.  If an interrupt occurs between tsc_read() and its subsequent
200 * store, the timestamp will be stale by the time it is stored.  We can detect
201 * this by doing a compare-and-swap on the thread's timestamp, since any
202 * interrupt occurring in this window will put a new timestamp in the thread's
203 * t_intr_start field.
204 */
205#define	STORE_INTR_START(thread_t)			\
206	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
207	jz	1f;					\
208	pushl	%ecx;					\
2090:							\
210	pushl	T_INTR_START(thread_t);			\
211	pushl	T_INTR_START+4(thread_t);		\
212	call	tsc_read;				\
213	movl	%eax, %ebx;				\
214	movl	%edx, %ecx;				\
215	popl	%edx;					\
216	popl	%eax;					\
217	cmpxchg8b T_INTR_START(thread_t);		\
218	jnz	0b;					\
219	popl	%ecx;					\
2201:
221
222#endif	/* __amd64 */
223
224#if defined(__lint)
225
226/* ARGSUSED */
227void
228resume(kthread_t *t)
229{}
230
231#else	/* __lint */
232
233#if defined(__amd64)
234
235	ENTRY(resume)
236	movq	%gs:CPU_THREAD, %rax
237	leaq	resume_return(%rip), %r11
238
239	/*
240	 * Save non-volatile registers, and set return address for current
241	 * thread to resume_return.
242	 *
243	 * %r12 = t (new thread) when done
244	 */
245	SAVE_REGS(%rax, %r11)
246
247	LOADCPU(%r15)				/* %r15 = CPU */
248	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
249
250	/*
251	 * Call savectx if thread has installed context ops.
252	 *
253	 * Note that if we have floating point context, the save op
254	 * (either fpsave_begin or fpxsave_begin) will issue the
255	 * async save instruction (fnsave or fxsave respectively)
256	 * that we fwait for below.
257	 */
258	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
259	je	.nosavectx		/* skip call when zero */
260
261	movq	%r13, %rdi		/* arg = thread pointer */
262	call	savectx			/* call ctx ops */
263.nosavectx:
264
265        /*
266         * Call savepctx if process has installed context ops.
267         */
268	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
269        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
270        je      .nosavepctx              /* skip call when zero */
271
272        movq    %r14, %rdi              /* arg = proc pointer */
273        call    savepctx                 /* call ctx ops */
274.nosavepctx:
275
276	/*
277	 * Temporarily switch to the idle thread's stack
278	 */
279	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
280
281	/*
282	 * Set the idle thread as the current thread
283	 */
284	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
285	movq	%rax, CPU_THREAD(%r15)
286
287	/*
288	 * Switch in the hat context for the new thread
289	 *
290	 */
291	GET_THREAD_HATP(%rdi, %r12, %r11)
292	call	hat_switch
293
294	/*
295	 * Clear and unlock previous thread's t_lock
296	 * to allow it to be dispatched by another processor.
297	 */
298	movb	$0, T_LOCK(%r13)
299
300	/*
301	 * IMPORTANT: Registers at this point must be:
302	 *       %r12 = new thread
303	 *
304	 * Here we are in the idle thread, have dropped the old thread.
305	 */
306	ALTENTRY(_resume_from_idle)
307	/*
308	 * spin until dispatched thread's mutex has
309	 * been unlocked. this mutex is unlocked when
310	 * it becomes safe for the thread to run.
311	 */
312.lock_thread_mutex:
313	lock
314	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
315	jnc	.thread_mutex_locked	/* got it */
316
317.spin_thread_mutex:
318	pause
319	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
320	jz	.lock_thread_mutex	/* clear, retry lock */
321	jmp	.spin_thread_mutex	/* still locked, spin... */
322
323.thread_mutex_locked:
324	/*
325	 * Fix CPU structure to indicate new running thread.
326	 * Set pointer in new thread to the CPU structure.
327	 */
328	LOADCPU(%r13)			/* load current CPU pointer */
329	cmpq	%r13, T_CPU(%r12)
330	je	.setup_cpu
331
332	/* cp->cpu_stats.sys.cpumigrate++ */
333	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
334	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
335
336.setup_cpu:
337	/*
338	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
339	 * (Note: Since we don't have saved 'regs' structure for all
340	 *	  the threads we can't easily determine if we need to
341	 *	  change rsp0. So, we simply change the rsp0 to bottom
342	 *	  of the thread stack and it will work for all cases.)
343	 *
344	 * XX64 - Is this correct?
345	 */
346	movq	CPU_TSS(%r13), %r14
347	movq	T_STACK(%r12), %rax
348	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
349#if !defined(__xpv)
350	movq	%rax, TSS_RSP0(%r14)
351#else
352	movl	$KDS_SEL, %edi
353	movq	%rax, %rsi
354	call	HYPERVISOR_stack_switch
355#endif	/* __xpv */
356
357	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
358	xorl	%ebp, %ebp		/* make $<threadlist behave better */
359	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
360	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
361
362	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
363	movq	T_PC(%r12), %r13	/* saved return addr */
364
365	/*
366	 * Call restorectx if context ops have been installed.
367	 */
368	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
369	jz	.norestorectx		/* skip call when zero */
370	movq	%r12, %rdi		/* arg = thread pointer */
371	call	restorectx		/* call ctx ops */
372.norestorectx:
373
374	/*
375	 * Call restorepctx if context ops have been installed for the proc.
376	 */
377	movq	T_PROCP(%r12), %rcx
378	cmpq	$0, P_PCTX(%rcx)
379	jz	.norestorepctx
380	movq	%rcx, %rdi
381	call	restorepctx
382.norestorepctx:
383
384	STORE_INTR_START(%r12)
385
386	/*
387	 * Restore non-volatile registers, then have spl0 return to the
388	 * resuming thread's PC after first setting the priority as low as
389	 * possible and blocking all interrupt threads that may be active.
390	 */
391	movq	%r13, %rax	/* save return address */
392	RESTORE_REGS(%r11)
393	pushq	%rax		/* push return address for spl0() */
394	call	__dtrace_probe___sched_on__cpu
395	jmp	spl0
396
397resume_return:
398	/*
399	 * Remove stack frame created in SAVE_REGS()
400	 */
401	addq	$CLONGSIZE, %rsp
402	ret
403	SET_SIZE(_resume_from_idle)
404	SET_SIZE(resume)
405
406#elif defined (__i386)
407
408	ENTRY(resume)
409	movl	%gs:CPU_THREAD, %eax
410	movl	$resume_return, %ecx
411
412	/*
413	 * Save non-volatile registers, and set return address for current
414	 * thread to resume_return.
415	 *
416	 * %edi = t (new thread) when done.
417	 */
418	SAVE_REGS(%eax,  %ecx)
419
420	LOADCPU(%ebx)			/* %ebx = CPU */
421	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
422
423#ifdef DEBUG
424	call	assert_ints_enabled	/* panics if we are cli'd */
425#endif
426	/*
427	 * Call savectx if thread has installed context ops.
428	 *
429	 * Note that if we have floating point context, the save op
430	 * (either fpsave_begin or fpxsave_begin) will issue the
431	 * async save instruction (fnsave or fxsave respectively)
432	 * that we fwait for below.
433	 */
434	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
435	testl	%eax, %eax
436	jz	.nosavectx		/* skip call when zero */
437	pushl	%esi			/* arg = thread pointer */
438	call	savectx			/* call ctx ops */
439	addl	$4, %esp		/* restore stack pointer */
440.nosavectx:
441
442        /*
443         * Call savepctx if process has installed context ops.
444         */
445	movl	T_PROCP(%esi), %eax	/* %eax = proc */
446	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
447	je	.nosavepctx		/* skip call when zero */
448	pushl	%eax			/* arg = proc pointer */
449	call	savepctx		/* call ctx ops */
450	addl	$4, %esp
451.nosavepctx:
452
453	/*
454	 * Temporarily switch to the idle thread's stack
455	 */
456	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
457
458	/*
459	 * Set the idle thread as the current thread
460	 */
461	movl	T_SP(%eax), %esp	/* It is safe to set esp */
462	movl	%eax, CPU_THREAD(%ebx)
463
464	/* switch in the hat context for the new thread */
465	GET_THREAD_HATP(%ecx, %edi, %ecx)
466	pushl	%ecx
467	call	hat_switch
468	addl	$4, %esp
469
470	/*
471	 * Clear and unlock previous thread's t_lock
472	 * to allow it to be dispatched by another processor.
473	 */
474	movb	$0, T_LOCK(%esi)
475
476	/*
477	 * IMPORTANT: Registers at this point must be:
478	 *       %edi = new thread
479	 *
480	 * Here we are in the idle thread, have dropped the old thread.
481	 */
482	ALTENTRY(_resume_from_idle)
483	/*
484	 * spin until dispatched thread's mutex has
485	 * been unlocked. this mutex is unlocked when
486	 * it becomes safe for the thread to run.
487	 */
488.L4:
489	lock
490	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
491	jc	.L4_2			/* lock did not succeed */
492
493	/*
494	 * Fix CPU structure to indicate new running thread.
495	 * Set pointer in new thread to the CPU structure.
496	 */
497	LOADCPU(%esi)			/* load current CPU pointer */
498	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
499					/* Pentium. Used few lines below */
500	cmpl	%esi, T_CPU(%edi)
501	jne	.L5_2
502.L5_1:
503	/*
504	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
505	 * (Note: Since we don't have saved 'regs' structure for all
506	 *	  the threads we can't easily determine if we need to
507	 *	  change esp0. So, we simply change the esp0 to bottom
508	 *	  of the thread stack and it will work for all cases.)
509	 */
510	movl	CPU_TSS(%esi), %ecx
511	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
512#if !defined(__xpv)
513	movl	%eax, TSS_ESP0(%ecx)
514#else
515	pushl	%eax
516	pushl	$KDS_SEL
517	call	HYPERVISOR_stack_switch
518	addl	$8, %esp
519#endif	/* __xpv */
520
521	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
522	xorl	%ebp, %ebp		/* make $<threadlist behave better */
523	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
524	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
525
526	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
527	movl	T_PC(%edi), %esi	/* saved return addr */
528
529	/*
530	 * Call restorectx if context ops have been installed.
531	 */
532	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
533	testl	%eax, %eax
534	jz	.norestorectx		/* skip call when zero */
535	pushl	%edi			/* arg = thread pointer */
536	call	restorectx		/* call ctx ops */
537	addl	$4, %esp		/* restore stack pointer */
538.norestorectx:
539
540	/*
541	 * Call restorepctx if context ops have been installed for the proc.
542	 */
543	movl	T_PROCP(%edi), %eax
544	cmpl	$0, P_PCTX(%eax)
545	je	.norestorepctx
546	pushl	%eax			/* arg = proc pointer */
547	call	restorepctx
548	addl	$4, %esp		/* restore stack pointer */
549.norestorepctx:
550
551	STORE_INTR_START(%edi)
552
553	/*
554	 * Restore non-volatile registers, then have spl0 return to the
555	 * resuming thread's PC after first setting the priority as low as
556	 * possible and blocking all interrupt threads that may be active.
557	 */
558	movl	%esi, %eax		/* save return address */
559	RESTORE_REGS(%ecx)
560	pushl	%eax			/* push return address for spl0() */
561	call	__dtrace_probe___sched_on__cpu
562	jmp	spl0
563
564resume_return:
565	/*
566	 * Remove stack frame created in SAVE_REGS()
567	 */
568	addl	$CLONGSIZE, %esp
569	ret
570
571.L4_2:
572	pause
573	cmpb	$0, T_LOCK(%edi)
574	je	.L4
575	jmp	.L4_2
576
577.L5_2:
578	/* cp->cpu_stats.sys.cpumigrate++ */
579	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
580	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
581	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
582	jmp	.L5_1
583
584	SET_SIZE(_resume_from_idle)
585	SET_SIZE(resume)
586
587#endif	/* __amd64 */
588#endif	/* __lint */
589
590#if defined(__lint)
591
592/* ARGSUSED */
593void
594resume_from_zombie(kthread_t *t)
595{}
596
597#else	/* __lint */
598
599#if defined(__amd64)
600
601	ENTRY(resume_from_zombie)
602	movq	%gs:CPU_THREAD, %rax
603	leaq	resume_from_zombie_return(%rip), %r11
604
605	/*
606	 * Save non-volatile registers, and set return address for current
607	 * thread to resume_from_zombie_return.
608	 *
609	 * %r12 = t (new thread) when done
610	 */
611	SAVE_REGS(%rax, %r11)
612
613	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
614
615	/* clean up the fp unit. It might be left enabled */
616
617#if defined(__xpv)		/* XXPV XXtclayton */
618	/*
619	 * Remove this after bringup.
620	 * (Too many #gp's for an instrumented hypervisor.)
621	 */
622	STTS(%rax)
623#else
624	movq	%cr0, %rax
625	testq	$CR0_TS, %rax
626	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
627	fninit				/* init fpu & discard pending error */
628	orq	$CR0_TS, %rax
629	movq	%rax, %cr0
630.zfpu_disabled:
631
632#endif	/* __xpv */
633
634	/*
635	 * Temporarily switch to the idle thread's stack so that the zombie
636	 * thread's stack can be reclaimed by the reaper.
637	 */
638	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
639	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
640
641	/*
642	 * Sigh. If the idle thread has never run thread_start()
643	 * then t_sp is mis-aligned by thread_load().
644	 */
645	andq	$_BITNOT(STACK_ALIGN-1), %rsp
646
647	/*
648	 * Set the idle thread as the current thread.
649	 */
650	movq	%rax, %gs:CPU_THREAD
651
652	/* switch in the hat context for the new thread */
653	GET_THREAD_HATP(%rdi, %r12, %r11)
654	call	hat_switch
655
656	/*
657	 * Put the zombie on death-row.
658	 */
659	movq	%r13, %rdi
660	call	reapq_add
661
662	jmp	_resume_from_idle	/* finish job of resume */
663
664resume_from_zombie_return:
665	RESTORE_REGS(%r11)		/* restore non-volatile registers */
666	call	__dtrace_probe___sched_on__cpu
667
668	/*
669	 * Remove stack frame created in SAVE_REGS()
670	 */
671	addq	$CLONGSIZE, %rsp
672	ret
673	SET_SIZE(resume_from_zombie)
674
675#elif defined (__i386)
676
677	ENTRY(resume_from_zombie)
678	movl	%gs:CPU_THREAD, %eax
679	movl	$resume_from_zombie_return, %ecx
680
681	/*
682	 * Save non-volatile registers, and set return address for current
683	 * thread to resume_from_zombie_return.
684	 *
685	 * %edi = t (new thread) when done.
686	 */
687	SAVE_REGS(%eax, %ecx)
688
689#ifdef DEBUG
690	call	assert_ints_enabled	/* panics if we are cli'd */
691#endif
692	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
693
694	/* clean up the fp unit. It might be left enabled */
695
696	movl	%cr0, %eax
697	testl	$CR0_TS, %eax
698	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
699	fninit				/* init fpu & discard pending error */
700	orl	$CR0_TS, %eax
701	movl	%eax, %cr0
702.zfpu_disabled:
703
704	/*
705	 * Temporarily switch to the idle thread's stack so that the zombie
706	 * thread's stack can be reclaimed by the reaper.
707	 */
708	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
709	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
710
711	/*
712	 * Set the idle thread as the current thread.
713	 */
714	movl	%eax, %gs:CPU_THREAD
715
716	/*
717	 * switch in the hat context for the new thread
718	 */
719	GET_THREAD_HATP(%ecx, %edi, %ecx)
720	pushl	%ecx
721	call	hat_switch
722	addl	$4, %esp
723
724	/*
725	 * Put the zombie on death-row.
726	 */
727	pushl	%esi
728	call	reapq_add
729	addl	$4, %esp
730	jmp	_resume_from_idle	/* finish job of resume */
731
732resume_from_zombie_return:
733	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
734	call	__dtrace_probe___sched_on__cpu
735
736	/*
737	 * Remove stack frame created in SAVE_REGS()
738	 */
739	addl	$CLONGSIZE, %esp
740	ret
741	SET_SIZE(resume_from_zombie)
742
743#endif	/* __amd64 */
744#endif	/* __lint */
745
746#if defined(__lint)
747
748/* ARGSUSED */
749void
750resume_from_intr(kthread_t *t)
751{}
752
753#else	/* __lint */
754
755#if defined(__amd64)
756
757	ENTRY(resume_from_intr)
758	movq	%gs:CPU_THREAD, %rax
759	leaq	resume_from_intr_return(%rip), %r11
760
761	/*
762	 * Save non-volatile registers, and set return address for current
763	 * thread to resume_from_intr_return.
764	 *
765	 * %r12 = t (new thread) when done
766	 */
767	SAVE_REGS(%rax, %r11)
768
769	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
770	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
771	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
772	xorl	%ebp, %ebp		/* make $<threadlist behave better */
773
774	/*
775	 * Unlock outgoing thread's mutex dispatched by another processor.
776	 */
777	xorl	%eax, %eax
778	xchgb	%al, T_LOCK(%r13)
779
780	STORE_INTR_START(%r12)
781
782	/*
783	 * Restore non-volatile registers, then have spl0 return to the
784	 * resuming thread's PC after first setting the priority as low as
785	 * possible and blocking all interrupt threads that may be active.
786	 */
787	movq	T_PC(%r12), %rax	/* saved return addr */
788	RESTORE_REGS(%r11);
789	pushq	%rax			/* push return address for spl0() */
790	call	__dtrace_probe___sched_on__cpu
791	jmp	spl0
792
793resume_from_intr_return:
794	/*
795	 * Remove stack frame created in SAVE_REGS()
796	 */
797	addq 	$CLONGSIZE, %rsp
798	ret
799	SET_SIZE(resume_from_intr)
800
801#elif defined (__i386)
802
803	ENTRY(resume_from_intr)
804	movl	%gs:CPU_THREAD, %eax
805	movl	$resume_from_intr_return, %ecx
806
807	/*
808	 * Save non-volatile registers, and set return address for current
809	 * thread to resume_return.
810	 *
811	 * %edi = t (new thread) when done.
812	 */
813	SAVE_REGS(%eax, %ecx)
814
815#ifdef DEBUG
816	call	assert_ints_enabled	/* panics if we are cli'd */
817#endif
818	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
819	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
820	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
821	xorl	%ebp, %ebp		/* make $<threadlist behave better */
822
823	/*
824	 * Unlock outgoing thread's mutex dispatched by another processor.
825	 */
826	xorl	%eax,%eax
827	xchgb	%al, T_LOCK(%esi)
828
829	STORE_INTR_START(%edi)
830
831	/*
832	 * Restore non-volatile registers, then have spl0 return to the
833	 * resuming thread's PC after first setting the priority as low as
834	 * possible and blocking all interrupt threads that may be active.
835	 */
836	movl	T_PC(%edi), %eax	/* saved return addr */
837	RESTORE_REGS(%ecx)
838	pushl	%eax			/* push return address for spl0() */
839	call	__dtrace_probe___sched_on__cpu
840	jmp	spl0
841
842resume_from_intr_return:
843	/*
844	 * Remove stack frame created in SAVE_REGS()
845	 */
846	addl	$CLONGSIZE, %esp
847	ret
848	SET_SIZE(resume_from_intr)
849
850#endif	/* __amd64 */
851#endif /* __lint */
852
853#if defined(__lint)
854
855void
856thread_start(void)
857{}
858
859#else   /* __lint */
860
861#if defined(__amd64)
862
863	ENTRY(thread_start)
864	popq	%rax		/* start() */
865	popq	%rdi		/* arg */
866	popq	%rsi		/* len */
867	movq	%rsp, %rbp
868	call	*%rax
869	call	thread_exit	/* destroy thread if it returns. */
870	/*NOTREACHED*/
871	SET_SIZE(thread_start)
872
873#elif defined(__i386)
874
875	ENTRY(thread_start)
876	popl	%eax
877	movl	%esp, %ebp
878	addl	$8, %ebp
879	call	*%eax
880	addl	$8, %esp
881	call	thread_exit	/* destroy thread if it returns. */
882	/*NOTREACHED*/
883	SET_SIZE(thread_start)
884
885#endif	/* __i386 */
886
887#endif  /* __lint */
888