xref: /titanic_51/usr/src/uts/intel/ia32/ml/swtch.s (revision 511ef1d6e74f3d1db7bd854375a0926d87bdd7e6)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 */
29
30/*
31 * Process switching routines.
32 */
33
34#if defined(__lint)
35#include <sys/thread.h>
36#include <sys/systm.h>
37#include <sys/time.h>
38#else	/* __lint */
39#include "assym.h"
40#endif	/* __lint */
41
42#include <sys/asm_linkage.h>
43#include <sys/asm_misc.h>
44#include <sys/regset.h>
45#include <sys/privregs.h>
46#include <sys/stack.h>
47#include <sys/segments.h>
48
49/*
50 * resume(thread_id_t t);
51 *
52 * a thread can only run on one processor at a time. there
53 * exists a window on MPs where the current thread on one
54 * processor is capable of being dispatched by another processor.
55 * some overlap between outgoing and incoming threads can happen
56 * when they are the same thread. in this case where the threads
57 * are the same, resume() on one processor will spin on the incoming
58 * thread until resume() on the other processor has finished with
59 * the outgoing thread.
60 *
61 * The MMU context changes when the resuming thread resides in a different
62 * process.  Kernel threads are known by resume to reside in process 0.
63 * The MMU context, therefore, only changes when resuming a thread in
64 * a process different from curproc.
65 *
66 * resume_from_intr() is called when the thread being resumed was not
67 * passivated by resume (e.g. was interrupted).  This means that the
68 * resume lock is already held and that a restore context is not needed.
69 * Also, the MMU context is not changed on the resume in this case.
70 *
71 * resume_from_zombie() is the same as resume except the calling thread
72 * is a zombie and must be put on the deathrow list after the CPU is
73 * off the stack.
74 */
75
76#if !defined(__lint)
77
78#if LWP_PCB_FPU != 0
79#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
80#endif	/* LWP_PCB_FPU != 0 */
81
82#endif	/* !__lint */
83
84#if defined(__amd64)
85
86/*
87 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
88 *
89 * The stack frame must be created before the save of %rsp so that tracebacks
90 * of swtch()ed-out processes show the process as having last called swtch().
91 */
92#define SAVE_REGS(thread_t, retaddr)			\
93	movq	%rbp, T_RBP(thread_t);			\
94	movq	%rbx, T_RBX(thread_t);			\
95	movq	%r12, T_R12(thread_t);			\
96	movq	%r13, T_R13(thread_t);			\
97	movq	%r14, T_R14(thread_t);			\
98	movq	%r15, T_R15(thread_t);			\
99	pushq	%rbp;					\
100	movq	%rsp, %rbp;				\
101	movq	%rsp, T_SP(thread_t);			\
102	movq	retaddr, T_PC(thread_t);		\
103	movq	%rdi, %r12;				\
104	call	__dtrace_probe___sched_off__cpu
105
106/*
107 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
108 *
109 * We load up %rsp from the label_t as part of the context switch, so
110 * we don't repeat that here.
111 *
112 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
113 * already has the effect of putting the stack back the way it was when
114 * we came in.
115 */
116#define RESTORE_REGS(scratch_reg)			\
117	movq	%gs:CPU_THREAD, scratch_reg;		\
118	movq	T_RBP(scratch_reg), %rbp;		\
119	movq	T_RBX(scratch_reg), %rbx;		\
120	movq	T_R12(scratch_reg), %r12;		\
121	movq	T_R13(scratch_reg), %r13;		\
122	movq	T_R14(scratch_reg), %r14;		\
123	movq	T_R15(scratch_reg), %r15
124
125/*
126 * Get pointer to a thread's hat structure
127 */
128#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
129	movq	T_PROCP(thread_t), hatp;		\
130	movq	P_AS(hatp), scratch_reg;		\
131	movq	A_HAT(scratch_reg), hatp
132
133#define	TSC_READ()					\
134	call	tsc_read;				\
135	movq	%rax, %r14;
136
137/*
138 * If we are resuming an interrupt thread, store a timestamp in the thread
139 * structure.  If an interrupt occurs between tsc_read() and its subsequent
140 * store, the timestamp will be stale by the time it is stored.  We can detect
141 * this by doing a compare-and-swap on the thread's timestamp, since any
142 * interrupt occurring in this window will put a new timestamp in the thread's
143 * t_intr_start field.
144 */
145#define	STORE_INTR_START(thread_t)			\
146	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
147	jz	1f;					\
1480:							\
149	TSC_READ();					\
150	movq	T_INTR_START(thread_t), %rax;		\
151	cmpxchgq %r14, T_INTR_START(thread_t);		\
152	jnz	0b;					\
1531:
154
155#elif defined (__i386)
156
157/*
158 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
159 *
160 * The stack frame must be created before the save of %esp so that tracebacks
161 * of swtch()ed-out processes show the process as having last called swtch().
162 */
163#define SAVE_REGS(thread_t, retaddr)			\
164	movl	%ebp, T_EBP(thread_t);			\
165	movl	%ebx, T_EBX(thread_t);			\
166	movl	%esi, T_ESI(thread_t);			\
167	movl	%edi, T_EDI(thread_t);			\
168	pushl	%ebp;					\
169	movl	%esp, %ebp;				\
170	movl	%esp, T_SP(thread_t);			\
171	movl	retaddr, T_PC(thread_t);		\
172	movl	8(%ebp), %edi;				\
173	pushl	%edi;					\
174	call	__dtrace_probe___sched_off__cpu;	\
175	addl	$CLONGSIZE, %esp
176
177/*
178 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
179 *
180 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
181 * already has the effect of putting the stack back the way it was when
182 * we came in.
183 */
184#define RESTORE_REGS(scratch_reg)			\
185	movl	%gs:CPU_THREAD, scratch_reg;		\
186	movl	T_EBP(scratch_reg), %ebp;		\
187	movl	T_EBX(scratch_reg), %ebx;		\
188	movl	T_ESI(scratch_reg), %esi;		\
189	movl	T_EDI(scratch_reg), %edi
190
191/*
192 * Get pointer to a thread's hat structure
193 */
194#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
195	movl	T_PROCP(thread_t), hatp;		\
196	movl	P_AS(hatp), scratch_reg;		\
197	movl	A_HAT(scratch_reg), hatp
198
199/*
200 * If we are resuming an interrupt thread, store a timestamp in the thread
201 * structure.  If an interrupt occurs between tsc_read() and its subsequent
202 * store, the timestamp will be stale by the time it is stored.  We can detect
203 * this by doing a compare-and-swap on the thread's timestamp, since any
204 * interrupt occurring in this window will put a new timestamp in the thread's
205 * t_intr_start field.
206 */
207#define	STORE_INTR_START(thread_t)			\
208	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
209	jz	1f;					\
210	pushl	%ecx;					\
2110:							\
212	pushl	T_INTR_START(thread_t);			\
213	pushl	T_INTR_START+4(thread_t);		\
214	call	tsc_read;				\
215	movl	%eax, %ebx;				\
216	movl	%edx, %ecx;				\
217	popl	%edx;					\
218	popl	%eax;					\
219	cmpxchg8b T_INTR_START(thread_t);		\
220	jnz	0b;					\
221	popl	%ecx;					\
2221:
223
224#endif	/* __amd64 */
225
226#if defined(__lint)
227
228/* ARGSUSED */
229void
230resume(kthread_t *t)
231{}
232
233#else	/* __lint */
234
235#if defined(__amd64)
236
237	ENTRY(resume)
238	movq	%gs:CPU_THREAD, %rax
239	leaq	resume_return(%rip), %r11
240
241	/*
242	 * Save non-volatile registers, and set return address for current
243	 * thread to resume_return.
244	 *
245	 * %r12 = t (new thread) when done
246	 */
247	SAVE_REGS(%rax, %r11)
248
249	LOADCPU(%r15)				/* %r15 = CPU */
250	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
251
252	/*
253	 * Call savectx if thread has installed context ops.
254	 *
255	 * Note that if we have floating point context, the save op
256	 * (either fpsave_begin or fpxsave_begin) will issue the
257	 * async save instruction (fnsave or fxsave respectively)
258	 * that we fwait for below.
259	 */
260	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
261	je	.nosavectx		/* skip call when zero */
262
263	movq	%r13, %rdi		/* arg = thread pointer */
264	call	savectx			/* call ctx ops */
265.nosavectx:
266
267        /*
268         * Call savepctx if process has installed context ops.
269         */
270	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
271        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
272        je      .nosavepctx              /* skip call when zero */
273
274        movq    %r14, %rdi              /* arg = proc pointer */
275        call    savepctx                 /* call ctx ops */
276.nosavepctx:
277
278	/*
279	 * Temporarily switch to the idle thread's stack
280	 */
281	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
282
283	/*
284	 * Set the idle thread as the current thread
285	 */
286	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
287	movq	%rax, CPU_THREAD(%r15)
288
289	/*
290	 * Switch in the hat context for the new thread
291	 *
292	 */
293	GET_THREAD_HATP(%rdi, %r12, %r11)
294	call	hat_switch
295
296	/*
297	 * Clear and unlock previous thread's t_lock
298	 * to allow it to be dispatched by another processor.
299	 */
300	movb	$0, T_LOCK(%r13)
301
302	/*
303	 * IMPORTANT: Registers at this point must be:
304	 *       %r12 = new thread
305	 *
306	 * Here we are in the idle thread, have dropped the old thread.
307	 */
308	ALTENTRY(_resume_from_idle)
309	/*
310	 * spin until dispatched thread's mutex has
311	 * been unlocked. this mutex is unlocked when
312	 * it becomes safe for the thread to run.
313	 */
314.lock_thread_mutex:
315	lock
316	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
317	jnc	.thread_mutex_locked	/* got it */
318
319.spin_thread_mutex:
320	pause
321	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
322	jz	.lock_thread_mutex	/* clear, retry lock */
323	jmp	.spin_thread_mutex	/* still locked, spin... */
324
325.thread_mutex_locked:
326	/*
327	 * Fix CPU structure to indicate new running thread.
328	 * Set pointer in new thread to the CPU structure.
329	 */
330	LOADCPU(%r13)			/* load current CPU pointer */
331	cmpq	%r13, T_CPU(%r12)
332	je	.setup_cpu
333
334	/* cp->cpu_stats.sys.cpumigrate++ */
335	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
336	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
337
338.setup_cpu:
339	/*
340	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
341	 * (Note: Since we don't have saved 'regs' structure for all
342	 *	  the threads we can't easily determine if we need to
343	 *	  change rsp0. So, we simply change the rsp0 to bottom
344	 *	  of the thread stack and it will work for all cases.)
345	 *
346	 * XX64 - Is this correct?
347	 */
348	movq	CPU_TSS(%r13), %r14
349	movq	T_STACK(%r12), %rax
350	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
351#if !defined(__xpv)
352	movq	%rax, TSS_RSP0(%r14)
353#else
354	movl	$KDS_SEL, %edi
355	movq	%rax, %rsi
356	call	HYPERVISOR_stack_switch
357#endif	/* __xpv */
358
359	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
360	mfence				/* synchronize with mutex_exit() */
361	xorl	%ebp, %ebp		/* make $<threadlist behave better */
362	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
363	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
364
365	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
366	movq	T_PC(%r12), %r13	/* saved return addr */
367
368	/*
369	 * Call restorectx if context ops have been installed.
370	 */
371	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
372	jz	.norestorectx		/* skip call when zero */
373	movq	%r12, %rdi		/* arg = thread pointer */
374	call	restorectx		/* call ctx ops */
375.norestorectx:
376
377	/*
378	 * Call restorepctx if context ops have been installed for the proc.
379	 */
380	movq	T_PROCP(%r12), %rcx
381	cmpq	$0, P_PCTX(%rcx)
382	jz	.norestorepctx
383	movq	%rcx, %rdi
384	call	restorepctx
385.norestorepctx:
386
387	STORE_INTR_START(%r12)
388
389	/*
390	 * Restore non-volatile registers, then have spl0 return to the
391	 * resuming thread's PC after first setting the priority as low as
392	 * possible and blocking all interrupt threads that may be active.
393	 */
394	movq	%r13, %rax	/* save return address */
395	RESTORE_REGS(%r11)
396	pushq	%rax		/* push return address for spl0() */
397	call	__dtrace_probe___sched_on__cpu
398	jmp	spl0
399
400resume_return:
401	/*
402	 * Remove stack frame created in SAVE_REGS()
403	 */
404	addq	$CLONGSIZE, %rsp
405	ret
406	SET_SIZE(_resume_from_idle)
407	SET_SIZE(resume)
408
409#elif defined (__i386)
410
411	ENTRY(resume)
412	movl	%gs:CPU_THREAD, %eax
413	movl	$resume_return, %ecx
414
415	/*
416	 * Save non-volatile registers, and set return address for current
417	 * thread to resume_return.
418	 *
419	 * %edi = t (new thread) when done.
420	 */
421	SAVE_REGS(%eax,  %ecx)
422
423	LOADCPU(%ebx)			/* %ebx = CPU */
424	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
425
426#ifdef DEBUG
427	call	assert_ints_enabled	/* panics if we are cli'd */
428#endif
429	/*
430	 * Call savectx if thread has installed context ops.
431	 *
432	 * Note that if we have floating point context, the save op
433	 * (either fpsave_begin or fpxsave_begin) will issue the
434	 * async save instruction (fnsave or fxsave respectively)
435	 * that we fwait for below.
436	 */
437	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
438	testl	%eax, %eax
439	jz	.nosavectx		/* skip call when zero */
440	pushl	%esi			/* arg = thread pointer */
441	call	savectx			/* call ctx ops */
442	addl	$4, %esp		/* restore stack pointer */
443.nosavectx:
444
445        /*
446         * Call savepctx if process has installed context ops.
447         */
448	movl	T_PROCP(%esi), %eax	/* %eax = proc */
449	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
450	je	.nosavepctx		/* skip call when zero */
451	pushl	%eax			/* arg = proc pointer */
452	call	savepctx		/* call ctx ops */
453	addl	$4, %esp
454.nosavepctx:
455
456	/*
457	 * Temporarily switch to the idle thread's stack
458	 */
459	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
460
461	/*
462	 * Set the idle thread as the current thread
463	 */
464	movl	T_SP(%eax), %esp	/* It is safe to set esp */
465	movl	%eax, CPU_THREAD(%ebx)
466
467	/* switch in the hat context for the new thread */
468	GET_THREAD_HATP(%ecx, %edi, %ecx)
469	pushl	%ecx
470	call	hat_switch
471	addl	$4, %esp
472
473	/*
474	 * Clear and unlock previous thread's t_lock
475	 * to allow it to be dispatched by another processor.
476	 */
477	movb	$0, T_LOCK(%esi)
478
479	/*
480	 * IMPORTANT: Registers at this point must be:
481	 *       %edi = new thread
482	 *
483	 * Here we are in the idle thread, have dropped the old thread.
484	 */
485	ALTENTRY(_resume_from_idle)
486	/*
487	 * spin until dispatched thread's mutex has
488	 * been unlocked. this mutex is unlocked when
489	 * it becomes safe for the thread to run.
490	 */
491.L4:
492	lock
493	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
494	jc	.L4_2			/* lock did not succeed */
495
496	/*
497	 * Fix CPU structure to indicate new running thread.
498	 * Set pointer in new thread to the CPU structure.
499	 */
500	LOADCPU(%esi)			/* load current CPU pointer */
501	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
502					/* Pentium. Used few lines below */
503	cmpl	%esi, T_CPU(%edi)
504	jne	.L5_2
505.L5_1:
506	/*
507	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
508	 * (Note: Since we don't have saved 'regs' structure for all
509	 *	  the threads we can't easily determine if we need to
510	 *	  change esp0. So, we simply change the esp0 to bottom
511	 *	  of the thread stack and it will work for all cases.)
512	 */
513	movl	CPU_TSS(%esi), %ecx
514	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
515#if !defined(__xpv)
516	movl	%eax, TSS_ESP0(%ecx)
517#else
518	pushl	%eax
519	pushl	$KDS_SEL
520	call	HYPERVISOR_stack_switch
521	addl	$8, %esp
522#endif	/* __xpv */
523
524	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
525	mfence				/* synchronize with mutex_exit() */
526	xorl	%ebp, %ebp		/* make $<threadlist behave better */
527	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
528	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
529
530	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
531	movl	T_PC(%edi), %esi	/* saved return addr */
532
533	/*
534	 * Call restorectx if context ops have been installed.
535	 */
536	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
537	testl	%eax, %eax
538	jz	.norestorectx		/* skip call when zero */
539	pushl	%edi			/* arg = thread pointer */
540	call	restorectx		/* call ctx ops */
541	addl	$4, %esp		/* restore stack pointer */
542.norestorectx:
543
544	/*
545	 * Call restorepctx if context ops have been installed for the proc.
546	 */
547	movl	T_PROCP(%edi), %eax
548	cmpl	$0, P_PCTX(%eax)
549	je	.norestorepctx
550	pushl	%eax			/* arg = proc pointer */
551	call	restorepctx
552	addl	$4, %esp		/* restore stack pointer */
553.norestorepctx:
554
555	STORE_INTR_START(%edi)
556
557	/*
558	 * Restore non-volatile registers, then have spl0 return to the
559	 * resuming thread's PC after first setting the priority as low as
560	 * possible and blocking all interrupt threads that may be active.
561	 */
562	movl	%esi, %eax		/* save return address */
563	RESTORE_REGS(%ecx)
564	pushl	%eax			/* push return address for spl0() */
565	call	__dtrace_probe___sched_on__cpu
566	jmp	spl0
567
568resume_return:
569	/*
570	 * Remove stack frame created in SAVE_REGS()
571	 */
572	addl	$CLONGSIZE, %esp
573	ret
574
575.L4_2:
576	pause
577	cmpb	$0, T_LOCK(%edi)
578	je	.L4
579	jmp	.L4_2
580
581.L5_2:
582	/* cp->cpu_stats.sys.cpumigrate++ */
583	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
584	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
585	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
586	jmp	.L5_1
587
588	SET_SIZE(_resume_from_idle)
589	SET_SIZE(resume)
590
591#endif	/* __amd64 */
592#endif	/* __lint */
593
594#if defined(__lint)
595
596/* ARGSUSED */
597void
598resume_from_zombie(kthread_t *t)
599{}
600
601#else	/* __lint */
602
603#if defined(__amd64)
604
605	ENTRY(resume_from_zombie)
606	movq	%gs:CPU_THREAD, %rax
607	leaq	resume_from_zombie_return(%rip), %r11
608
609	/*
610	 * Save non-volatile registers, and set return address for current
611	 * thread to resume_from_zombie_return.
612	 *
613	 * %r12 = t (new thread) when done
614	 */
615	SAVE_REGS(%rax, %r11)
616
617	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
618
619	/* clean up the fp unit. It might be left enabled */
620
621#if defined(__xpv)		/* XXPV XXtclayton */
622	/*
623	 * Remove this after bringup.
624	 * (Too many #gp's for an instrumented hypervisor.)
625	 */
626	STTS(%rax)
627#else
628	movq	%cr0, %rax
629	testq	$CR0_TS, %rax
630	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
631	fninit				/* init fpu & discard pending error */
632	orq	$CR0_TS, %rax
633	movq	%rax, %cr0
634.zfpu_disabled:
635
636#endif	/* __xpv */
637
638	/*
639	 * Temporarily switch to the idle thread's stack so that the zombie
640	 * thread's stack can be reclaimed by the reaper.
641	 */
642	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
643	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
644
645	/*
646	 * Sigh. If the idle thread has never run thread_start()
647	 * then t_sp is mis-aligned by thread_load().
648	 */
649	andq	$_BITNOT(STACK_ALIGN-1), %rsp
650
651	/*
652	 * Set the idle thread as the current thread.
653	 */
654	movq	%rax, %gs:CPU_THREAD
655
656	/* switch in the hat context for the new thread */
657	GET_THREAD_HATP(%rdi, %r12, %r11)
658	call	hat_switch
659
660	/*
661	 * Put the zombie on death-row.
662	 */
663	movq	%r13, %rdi
664	call	reapq_add
665
666	jmp	_resume_from_idle	/* finish job of resume */
667
668resume_from_zombie_return:
669	RESTORE_REGS(%r11)		/* restore non-volatile registers */
670	call	__dtrace_probe___sched_on__cpu
671
672	/*
673	 * Remove stack frame created in SAVE_REGS()
674	 */
675	addq	$CLONGSIZE, %rsp
676	ret
677	SET_SIZE(resume_from_zombie)
678
679#elif defined (__i386)
680
681	ENTRY(resume_from_zombie)
682	movl	%gs:CPU_THREAD, %eax
683	movl	$resume_from_zombie_return, %ecx
684
685	/*
686	 * Save non-volatile registers, and set return address for current
687	 * thread to resume_from_zombie_return.
688	 *
689	 * %edi = t (new thread) when done.
690	 */
691	SAVE_REGS(%eax, %ecx)
692
693#ifdef DEBUG
694	call	assert_ints_enabled	/* panics if we are cli'd */
695#endif
696	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
697
698	/* clean up the fp unit. It might be left enabled */
699
700	movl	%cr0, %eax
701	testl	$CR0_TS, %eax
702	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
703	fninit				/* init fpu & discard pending error */
704	orl	$CR0_TS, %eax
705	movl	%eax, %cr0
706.zfpu_disabled:
707
708	/*
709	 * Temporarily switch to the idle thread's stack so that the zombie
710	 * thread's stack can be reclaimed by the reaper.
711	 */
712	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
713	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
714
715	/*
716	 * Set the idle thread as the current thread.
717	 */
718	movl	%eax, %gs:CPU_THREAD
719
720	/*
721	 * switch in the hat context for the new thread
722	 */
723	GET_THREAD_HATP(%ecx, %edi, %ecx)
724	pushl	%ecx
725	call	hat_switch
726	addl	$4, %esp
727
728	/*
729	 * Put the zombie on death-row.
730	 */
731	pushl	%esi
732	call	reapq_add
733	addl	$4, %esp
734	jmp	_resume_from_idle	/* finish job of resume */
735
736resume_from_zombie_return:
737	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
738	call	__dtrace_probe___sched_on__cpu
739
740	/*
741	 * Remove stack frame created in SAVE_REGS()
742	 */
743	addl	$CLONGSIZE, %esp
744	ret
745	SET_SIZE(resume_from_zombie)
746
747#endif	/* __amd64 */
748#endif	/* __lint */
749
750#if defined(__lint)
751
752/* ARGSUSED */
753void
754resume_from_intr(kthread_t *t)
755{}
756
757#else	/* __lint */
758
759#if defined(__amd64)
760
761	ENTRY(resume_from_intr)
762	movq	%gs:CPU_THREAD, %rax
763	leaq	resume_from_intr_return(%rip), %r11
764
765	/*
766	 * Save non-volatile registers, and set return address for current
767	 * thread to resume_from_intr_return.
768	 *
769	 * %r12 = t (new thread) when done
770	 */
771	SAVE_REGS(%rax, %r11)
772
773	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
774	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
775	mfence				/* synchronize with mutex_exit() */
776	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
777	xorl	%ebp, %ebp		/* make $<threadlist behave better */
778
779	/*
780	 * Unlock outgoing thread's mutex dispatched by another processor.
781	 */
782	xorl	%eax, %eax
783	xchgb	%al, T_LOCK(%r13)
784
785	STORE_INTR_START(%r12)
786
787	/*
788	 * Restore non-volatile registers, then have spl0 return to the
789	 * resuming thread's PC after first setting the priority as low as
790	 * possible and blocking all interrupt threads that may be active.
791	 */
792	movq	T_PC(%r12), %rax	/* saved return addr */
793	RESTORE_REGS(%r11);
794	pushq	%rax			/* push return address for spl0() */
795	call	__dtrace_probe___sched_on__cpu
796	jmp	spl0
797
798resume_from_intr_return:
799	/*
800	 * Remove stack frame created in SAVE_REGS()
801	 */
802	addq 	$CLONGSIZE, %rsp
803	ret
804	SET_SIZE(resume_from_intr)
805
806#elif defined (__i386)
807
808	ENTRY(resume_from_intr)
809	movl	%gs:CPU_THREAD, %eax
810	movl	$resume_from_intr_return, %ecx
811
812	/*
813	 * Save non-volatile registers, and set return address for current
814	 * thread to resume_return.
815	 *
816	 * %edi = t (new thread) when done.
817	 */
818	SAVE_REGS(%eax, %ecx)
819
820#ifdef DEBUG
821	call	assert_ints_enabled	/* panics if we are cli'd */
822#endif
823	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
824	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
825	mfence				/* synchronize with mutex_exit() */
826	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
827	xorl	%ebp, %ebp		/* make $<threadlist behave better */
828
829	/*
830	 * Unlock outgoing thread's mutex dispatched by another processor.
831	 */
832	xorl	%eax,%eax
833	xchgb	%al, T_LOCK(%esi)
834
835	STORE_INTR_START(%edi)
836
837	/*
838	 * Restore non-volatile registers, then have spl0 return to the
839	 * resuming thread's PC after first setting the priority as low as
840	 * possible and blocking all interrupt threads that may be active.
841	 */
842	movl	T_PC(%edi), %eax	/* saved return addr */
843	RESTORE_REGS(%ecx)
844	pushl	%eax			/* push return address for spl0() */
845	call	__dtrace_probe___sched_on__cpu
846	jmp	spl0
847
848resume_from_intr_return:
849	/*
850	 * Remove stack frame created in SAVE_REGS()
851	 */
852	addl	$CLONGSIZE, %esp
853	ret
854	SET_SIZE(resume_from_intr)
855
856#endif	/* __amd64 */
857#endif /* __lint */
858
859#if defined(__lint)
860
861void
862thread_start(void)
863{}
864
865#else   /* __lint */
866
867#if defined(__amd64)
868
869	ENTRY(thread_start)
870	popq	%rax		/* start() */
871	popq	%rdi		/* arg */
872	popq	%rsi		/* len */
873	movq	%rsp, %rbp
874	call	*%rax
875	call	thread_exit	/* destroy thread if it returns. */
876	/*NOTREACHED*/
877	SET_SIZE(thread_start)
878
879#elif defined(__i386)
880
881	ENTRY(thread_start)
882	popl	%eax
883	movl	%esp, %ebp
884	addl	$8, %ebp
885	call	*%eax
886	addl	$8, %esp
887	call	thread_exit	/* destroy thread if it returns. */
888	/*NOTREACHED*/
889	SET_SIZE(thread_start)
890
891#endif	/* __i386 */
892
893#endif  /* __lint */
894