xref: /titanic_41/usr/src/uts/intel/ia32/ml/swtch.s (revision d29b2c4438482eb00488be49a1f5d6835f455546)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Process switching routines.
30 */
31
32#if defined(__lint)
33#include <sys/thread.h>
34#include <sys/systm.h>
35#include <sys/time.h>
36#else	/* __lint */
37#include "assym.h"
38#endif	/* __lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/asm_misc.h>
42#include <sys/regset.h>
43#include <sys/privregs.h>
44#include <sys/stack.h>
45#include <sys/segments.h>
46
47/*
48 * resume(thread_id_t t);
49 *
50 * a thread can only run on one processor at a time. there
51 * exists a window on MPs where the current thread on one
52 * processor is capable of being dispatched by another processor.
53 * some overlap between outgoing and incoming threads can happen
54 * when they are the same thread. in this case where the threads
55 * are the same, resume() on one processor will spin on the incoming
56 * thread until resume() on the other processor has finished with
57 * the outgoing thread.
58 *
59 * The MMU context changes when the resuming thread resides in a different
60 * process.  Kernel threads are known by resume to reside in process 0.
61 * The MMU context, therefore, only changes when resuming a thread in
62 * a process different from curproc.
63 *
64 * resume_from_intr() is called when the thread being resumed was not
65 * passivated by resume (e.g. was interrupted).  This means that the
66 * resume lock is already held and that a restore context is not needed.
67 * Also, the MMU context is not changed on the resume in this case.
68 *
69 * resume_from_zombie() is the same as resume except the calling thread
70 * is a zombie and must be put on the deathrow list after the CPU is
71 * off the stack.
72 */
73
74#if !defined(__lint)
75
76#if LWP_PCB_FPU != 0
77#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
78#endif	/* LWP_PCB_FPU != 0 */
79
80#endif	/* !__lint */
81
82#if defined(__amd64)
83
84/*
85 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
86 *
87 * The stack frame must be created before the save of %rsp so that tracebacks
88 * of swtch()ed-out processes show the process as having last called swtch().
89 */
90#define SAVE_REGS(thread_t, retaddr)			\
91	movq	%rbp, T_RBP(thread_t);			\
92	movq	%rbx, T_RBX(thread_t);			\
93	movq	%r12, T_R12(thread_t);			\
94	movq	%r13, T_R13(thread_t);			\
95	movq	%r14, T_R14(thread_t);			\
96	movq	%r15, T_R15(thread_t);			\
97	pushq	%rbp;					\
98	movq	%rsp, %rbp;				\
99	movq	%rsp, T_SP(thread_t);			\
100	movq	retaddr, T_PC(thread_t);		\
101	movq	%rdi, %r12;				\
102	call	__dtrace_probe___sched_off__cpu
103
104/*
105 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
106 *
107 * We load up %rsp from the label_t as part of the context switch, so
108 * we don't repeat that here.
109 *
110 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
111 * already has the effect of putting the stack back the way it was when
112 * we came in.
113 */
114#define RESTORE_REGS(scratch_reg)			\
115	movq	%gs:CPU_THREAD, scratch_reg;		\
116	movq	T_RBP(scratch_reg), %rbp;		\
117	movq	T_RBX(scratch_reg), %rbx;		\
118	movq	T_R12(scratch_reg), %r12;		\
119	movq	T_R13(scratch_reg), %r13;		\
120	movq	T_R14(scratch_reg), %r14;		\
121	movq	T_R15(scratch_reg), %r15
122
123/*
124 * Get pointer to a thread's hat structure
125 */
126#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
127	movq	T_PROCP(thread_t), hatp;		\
128	movq	P_AS(hatp), scratch_reg;		\
129	movq	A_HAT(scratch_reg), hatp
130
131#if defined (__xpv)
132
133#define	TSC_READ()					\
134	call	tsc_read;				\
135	movq	%rax, %r14;
136
137#else
138
139#define	TSC_READ()					\
140	rdtsc;						\
141	shlq	$32, %rdx;				\
142	movl	%eax, %r14d;				\
143	orq	%rdx, %r14
144
145#endif
146
147/*
148 * If we are resuming an interrupt thread, store a timestamp in the thread
149 * structure.  If an interrupt occurs between tsc_read() and its subsequent
150 * store, the timestamp will be stale by the time it is stored.  We can detect
151 * this by doing a compare-and-swap on the thread's timestamp, since any
152 * interrupt occurring in this window will put a new timestamp in the thread's
153 * t_intr_start field.
154 */
155#define	STORE_INTR_START(thread_t)			\
156	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
157	jz	1f;					\
1580:							\
159	TSC_READ();					\
160	movq	T_INTR_START(thread_t), %rax;		\
161	cmpxchgq %r14, T_INTR_START(thread_t);		\
162	jnz	0b;					\
1631:
164
165#elif defined (__i386)
166
167/*
168 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
169 *
170 * The stack frame must be created before the save of %esp so that tracebacks
171 * of swtch()ed-out processes show the process as having last called swtch().
172 */
173#define SAVE_REGS(thread_t, retaddr)			\
174	movl	%ebp, T_EBP(thread_t);			\
175	movl	%ebx, T_EBX(thread_t);			\
176	movl	%esi, T_ESI(thread_t);			\
177	movl	%edi, T_EDI(thread_t);			\
178	pushl	%ebp;					\
179	movl	%esp, %ebp;				\
180	movl	%esp, T_SP(thread_t);			\
181	movl	retaddr, T_PC(thread_t);		\
182	movl	8(%ebp), %edi;				\
183	pushl	%edi;					\
184	call	__dtrace_probe___sched_off__cpu;	\
185	addl	$CLONGSIZE, %esp
186
187/*
188 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
189 *
190 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
191 * already has the effect of putting the stack back the way it was when
192 * we came in.
193 */
194#define RESTORE_REGS(scratch_reg)			\
195	movl	%gs:CPU_THREAD, scratch_reg;		\
196	movl	T_EBP(scratch_reg), %ebp;		\
197	movl	T_EBX(scratch_reg), %ebx;		\
198	movl	T_ESI(scratch_reg), %esi;		\
199	movl	T_EDI(scratch_reg), %edi
200
201/*
202 * Get pointer to a thread's hat structure
203 */
204#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
205	movl	T_PROCP(thread_t), hatp;		\
206	movl	P_AS(hatp), scratch_reg;		\
207	movl	A_HAT(scratch_reg), hatp
208
209/*
210 * If we are resuming an interrupt thread, store a timestamp in the thread
211 * structure.  If an interrupt occurs between tsc_read() and its subsequent
212 * store, the timestamp will be stale by the time it is stored.  We can detect
213 * this by doing a compare-and-swap on the thread's timestamp, since any
214 * interrupt occurring in this window will put a new timestamp in the thread's
215 * t_intr_start field.
216 */
217#define	STORE_INTR_START(thread_t)			\
218	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
219	jz	1f;					\
220	pushl	%ecx;					\
2210:							\
222	pushl	T_INTR_START(thread_t);			\
223	pushl	T_INTR_START+4(thread_t);		\
224	call	tsc_read;				\
225	movl	%eax, %ebx;				\
226	movl	%edx, %ecx;				\
227	popl	%edx;					\
228	popl	%eax;					\
229	cmpxchg8b T_INTR_START(thread_t);		\
230	jnz	0b;					\
231	popl	%ecx;					\
2321:
233
234#endif	/* __amd64 */
235
236#if defined(__lint)
237
238/* ARGSUSED */
239void
240resume(kthread_t *t)
241{}
242
243#else	/* __lint */
244
245#if defined(__amd64)
246
247	ENTRY(resume)
248	movq	%gs:CPU_THREAD, %rax
249	leaq	resume_return(%rip), %r11
250
251	/*
252	 * Save non-volatile registers, and set return address for current
253	 * thread to resume_return.
254	 *
255	 * %r12 = t (new thread) when done
256	 */
257	SAVE_REGS(%rax, %r11)
258
259	LOADCPU(%r15)				/* %r15 = CPU */
260	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
261
262	/*
263	 * Call savectx if thread has installed context ops.
264	 *
265	 * Note that if we have floating point context, the save op
266	 * (either fpsave_begin or fpxsave_begin) will issue the
267	 * async save instruction (fnsave or fxsave respectively)
268	 * that we fwait for below.
269	 */
270	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
271	je	.nosavectx		/* skip call when zero */
272
273	movq	%r13, %rdi		/* arg = thread pointer */
274	call	savectx			/* call ctx ops */
275.nosavectx:
276
277        /*
278         * Call savepctx if process has installed context ops.
279         */
280	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
281        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
282        je      .nosavepctx              /* skip call when zero */
283
284        movq    %r14, %rdi              /* arg = proc pointer */
285        call    savepctx                 /* call ctx ops */
286.nosavepctx:
287
288	/*
289	 * Temporarily switch to the idle thread's stack
290	 */
291	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
292
293	/*
294	 * Set the idle thread as the current thread
295	 */
296	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
297	movq	%rax, CPU_THREAD(%r15)
298
299	/*
300	 * Switch in the hat context for the new thread
301	 *
302	 */
303	GET_THREAD_HATP(%rdi, %r12, %r11)
304	call	hat_switch
305
306	/*
307	 * Clear and unlock previous thread's t_lock
308	 * to allow it to be dispatched by another processor.
309	 */
310	movb	$0, T_LOCK(%r13)
311
312	/*
313	 * IMPORTANT: Registers at this point must be:
314	 *       %r12 = new thread
315	 *
316	 * Here we are in the idle thread, have dropped the old thread.
317	 */
318	ALTENTRY(_resume_from_idle)
319	/*
320	 * spin until dispatched thread's mutex has
321	 * been unlocked. this mutex is unlocked when
322	 * it becomes safe for the thread to run.
323	 */
324.lock_thread_mutex:
325	lock
326	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
327	jnc	.thread_mutex_locked	/* got it */
328
329.spin_thread_mutex:
330	pause
331	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
332	jz	.lock_thread_mutex	/* clear, retry lock */
333	jmp	.spin_thread_mutex	/* still locked, spin... */
334
335.thread_mutex_locked:
336	/*
337	 * Fix CPU structure to indicate new running thread.
338	 * Set pointer in new thread to the CPU structure.
339	 */
340	LOADCPU(%r13)			/* load current CPU pointer */
341	cmpq	%r13, T_CPU(%r12)
342	je	.setup_cpu
343
344	/* cp->cpu_stats.sys.cpumigrate++ */
345	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
346	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
347
348.setup_cpu:
349	/*
350	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
351	 * (Note: Since we don't have saved 'regs' structure for all
352	 *	  the threads we can't easily determine if we need to
353	 *	  change rsp0. So, we simply change the rsp0 to bottom
354	 *	  of the thread stack and it will work for all cases.)
355	 *
356	 * XX64 - Is this correct?
357	 */
358	movq	CPU_TSS(%r13), %r14
359	movq	T_STACK(%r12), %rax
360	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
361#if !defined(__xpv)
362	movq	%rax, TSS_RSP0(%r14)
363#else
364	movl	$KDS_SEL, %edi
365	movq	%rax, %rsi
366	call	HYPERVISOR_stack_switch
367#endif	/* __xpv */
368
369	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
370	xorl	%ebp, %ebp		/* make $<threadlist behave better */
371	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
372	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
373
374	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
375	movq	T_PC(%r12), %r13	/* saved return addr */
376
377	/*
378	 * Call restorectx if context ops have been installed.
379	 */
380	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
381	jz	.norestorectx		/* skip call when zero */
382	movq	%r12, %rdi		/* arg = thread pointer */
383	call	restorectx		/* call ctx ops */
384.norestorectx:
385
386	/*
387	 * Call restorepctx if context ops have been installed for the proc.
388	 */
389	movq	T_PROCP(%r12), %rcx
390	cmpq	$0, P_PCTX(%rcx)
391	jz	.norestorepctx
392	movq	%rcx, %rdi
393	call	restorepctx
394.norestorepctx:
395
396	STORE_INTR_START(%r12)
397
398	/*
399	 * Restore non-volatile registers, then have spl0 return to the
400	 * resuming thread's PC after first setting the priority as low as
401	 * possible and blocking all interrupt threads that may be active.
402	 */
403	movq	%r13, %rax	/* save return address */
404	RESTORE_REGS(%r11)
405	pushq	%rax		/* push return address for spl0() */
406	call	__dtrace_probe___sched_on__cpu
407	jmp	spl0
408
409resume_return:
410	/*
411	 * Remove stack frame created in SAVE_REGS()
412	 */
413	addq	$CLONGSIZE, %rsp
414	ret
415	SET_SIZE(_resume_from_idle)
416	SET_SIZE(resume)
417
418#elif defined (__i386)
419
420	ENTRY(resume)
421	movl	%gs:CPU_THREAD, %eax
422	movl	$resume_return, %ecx
423
424	/*
425	 * Save non-volatile registers, and set return address for current
426	 * thread to resume_return.
427	 *
428	 * %edi = t (new thread) when done.
429	 */
430	SAVE_REGS(%eax,  %ecx)
431
432	LOADCPU(%ebx)			/* %ebx = CPU */
433	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
434
435#ifdef DEBUG
436	call	assert_ints_enabled	/* panics if we are cli'd */
437#endif
438	/*
439	 * Call savectx if thread has installed context ops.
440	 *
441	 * Note that if we have floating point context, the save op
442	 * (either fpsave_begin or fpxsave_begin) will issue the
443	 * async save instruction (fnsave or fxsave respectively)
444	 * that we fwait for below.
445	 */
446	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
447	testl	%eax, %eax
448	jz	.nosavectx		/* skip call when zero */
449	pushl	%esi			/* arg = thread pointer */
450	call	savectx			/* call ctx ops */
451	addl	$4, %esp		/* restore stack pointer */
452.nosavectx:
453
454        /*
455         * Call savepctx if process has installed context ops.
456         */
457	movl	T_PROCP(%esi), %eax	/* %eax = proc */
458	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
459	je	.nosavepctx		/* skip call when zero */
460	pushl	%eax			/* arg = proc pointer */
461	call	savepctx		/* call ctx ops */
462	addl	$4, %esp
463.nosavepctx:
464
465	/*
466	 * Temporarily switch to the idle thread's stack
467	 */
468	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
469
470	/*
471	 * Set the idle thread as the current thread
472	 */
473	movl	T_SP(%eax), %esp	/* It is safe to set esp */
474	movl	%eax, CPU_THREAD(%ebx)
475
476	/* switch in the hat context for the new thread */
477	GET_THREAD_HATP(%ecx, %edi, %ecx)
478	pushl	%ecx
479	call	hat_switch
480	addl	$4, %esp
481
482	/*
483	 * Clear and unlock previous thread's t_lock
484	 * to allow it to be dispatched by another processor.
485	 */
486	movb	$0, T_LOCK(%esi)
487
488	/*
489	 * IMPORTANT: Registers at this point must be:
490	 *       %edi = new thread
491	 *
492	 * Here we are in the idle thread, have dropped the old thread.
493	 */
494	ALTENTRY(_resume_from_idle)
495	/*
496	 * spin until dispatched thread's mutex has
497	 * been unlocked. this mutex is unlocked when
498	 * it becomes safe for the thread to run.
499	 */
500.L4:
501	lock
502	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
503	jc	.L4_2			/* lock did not succeed */
504
505	/*
506	 * Fix CPU structure to indicate new running thread.
507	 * Set pointer in new thread to the CPU structure.
508	 */
509	LOADCPU(%esi)			/* load current CPU pointer */
510	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
511					/* Pentium. Used few lines below */
512	cmpl	%esi, T_CPU(%edi)
513	jne	.L5_2
514.L5_1:
515	/*
516	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
517	 * (Note: Since we don't have saved 'regs' structure for all
518	 *	  the threads we can't easily determine if we need to
519	 *	  change esp0. So, we simply change the esp0 to bottom
520	 *	  of the thread stack and it will work for all cases.)
521	 */
522	movl	CPU_TSS(%esi), %ecx
523	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
524#if !defined(__xpv)
525	movl	%eax, TSS_ESP0(%ecx)
526#else
527	pushl	%eax
528	pushl	$KDS_SEL
529	call	HYPERVISOR_stack_switch
530	addl	$8, %esp
531#endif	/* __xpv */
532
533	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
534	xorl	%ebp, %ebp		/* make $<threadlist behave better */
535	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
536	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
537
538	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
539	movl	T_PC(%edi), %esi	/* saved return addr */
540
541	/*
542	 * Call restorectx if context ops have been installed.
543	 */
544	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
545	testl	%eax, %eax
546	jz	.norestorectx		/* skip call when zero */
547	pushl	%edi			/* arg = thread pointer */
548	call	restorectx		/* call ctx ops */
549	addl	$4, %esp		/* restore stack pointer */
550.norestorectx:
551
552	/*
553	 * Call restorepctx if context ops have been installed for the proc.
554	 */
555	movl	T_PROCP(%edi), %eax
556	cmpl	$0, P_PCTX(%eax)
557	je	.norestorepctx
558	pushl	%eax			/* arg = proc pointer */
559	call	restorepctx
560	addl	$4, %esp		/* restore stack pointer */
561.norestorepctx:
562
563	STORE_INTR_START(%edi)
564
565	/*
566	 * Restore non-volatile registers, then have spl0 return to the
567	 * resuming thread's PC after first setting the priority as low as
568	 * possible and blocking all interrupt threads that may be active.
569	 */
570	movl	%esi, %eax		/* save return address */
571	RESTORE_REGS(%ecx)
572	pushl	%eax			/* push return address for spl0() */
573	call	__dtrace_probe___sched_on__cpu
574	jmp	spl0
575
576resume_return:
577	/*
578	 * Remove stack frame created in SAVE_REGS()
579	 */
580	addl	$CLONGSIZE, %esp
581	ret
582
583.L4_2:
584	pause
585	cmpb	$0, T_LOCK(%edi)
586	je	.L4
587	jmp	.L4_2
588
589.L5_2:
590	/* cp->cpu_stats.sys.cpumigrate++ */
591	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
592	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
593	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
594	jmp	.L5_1
595
596	SET_SIZE(_resume_from_idle)
597	SET_SIZE(resume)
598
599#endif	/* __amd64 */
600#endif	/* __lint */
601
602#if defined(__lint)
603
604/* ARGSUSED */
605void
606resume_from_zombie(kthread_t *t)
607{}
608
609#else	/* __lint */
610
611#if defined(__amd64)
612
613	ENTRY(resume_from_zombie)
614	movq	%gs:CPU_THREAD, %rax
615	leaq	resume_from_zombie_return(%rip), %r11
616
617	/*
618	 * Save non-volatile registers, and set return address for current
619	 * thread to resume_from_zombie_return.
620	 *
621	 * %r12 = t (new thread) when done
622	 */
623	SAVE_REGS(%rax, %r11)
624
625	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
626
627	/* clean up the fp unit. It might be left enabled */
628
629#if defined(__xpv)		/* XXPV XXtclayton */
630	/*
631	 * Remove this after bringup.
632	 * (Too many #gp's for an instrumented hypervisor.)
633	 */
634	STTS(%rax)
635#else
636	movq	%cr0, %rax
637	testq	$CR0_TS, %rax
638	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
639	fninit				/* init fpu & discard pending error */
640	orq	$CR0_TS, %rax
641	movq	%rax, %cr0
642.zfpu_disabled:
643
644#endif	/* __xpv */
645
646	/*
647	 * Temporarily switch to the idle thread's stack so that the zombie
648	 * thread's stack can be reclaimed by the reaper.
649	 */
650	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
651	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
652
653	/*
654	 * Sigh. If the idle thread has never run thread_start()
655	 * then t_sp is mis-aligned by thread_load().
656	 */
657	andq	$_BITNOT(STACK_ALIGN-1), %rsp
658
659	/*
660	 * Set the idle thread as the current thread.
661	 */
662	movq	%rax, %gs:CPU_THREAD
663
664	/* switch in the hat context for the new thread */
665	GET_THREAD_HATP(%rdi, %r12, %r11)
666	call	hat_switch
667
668	/*
669	 * Put the zombie on death-row.
670	 */
671	movq	%r13, %rdi
672	call	reapq_add
673
674	jmp	_resume_from_idle	/* finish job of resume */
675
676resume_from_zombie_return:
677	RESTORE_REGS(%r11)		/* restore non-volatile registers */
678	call	__dtrace_probe___sched_on__cpu
679
680	/*
681	 * Remove stack frame created in SAVE_REGS()
682	 */
683	addq	$CLONGSIZE, %rsp
684	ret
685	SET_SIZE(resume_from_zombie)
686
687#elif defined (__i386)
688
689	ENTRY(resume_from_zombie)
690	movl	%gs:CPU_THREAD, %eax
691	movl	$resume_from_zombie_return, %ecx
692
693	/*
694	 * Save non-volatile registers, and set return address for current
695	 * thread to resume_from_zombie_return.
696	 *
697	 * %edi = t (new thread) when done.
698	 */
699	SAVE_REGS(%eax, %ecx)
700
701#ifdef DEBUG
702	call	assert_ints_enabled	/* panics if we are cli'd */
703#endif
704	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
705
706	/* clean up the fp unit. It might be left enabled */
707
708	movl	%cr0, %eax
709	testl	$CR0_TS, %eax
710	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
711	fninit				/* init fpu & discard pending error */
712	orl	$CR0_TS, %eax
713	movl	%eax, %cr0
714.zfpu_disabled:
715
716	/*
717	 * Temporarily switch to the idle thread's stack so that the zombie
718	 * thread's stack can be reclaimed by the reaper.
719	 */
720	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
721	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
722
723	/*
724	 * Set the idle thread as the current thread.
725	 */
726	movl	%eax, %gs:CPU_THREAD
727
728	/*
729	 * switch in the hat context for the new thread
730	 */
731	GET_THREAD_HATP(%ecx, %edi, %ecx)
732	pushl	%ecx
733	call	hat_switch
734	addl	$4, %esp
735
736	/*
737	 * Put the zombie on death-row.
738	 */
739	pushl	%esi
740	call	reapq_add
741	addl	$4, %esp
742	jmp	_resume_from_idle	/* finish job of resume */
743
744resume_from_zombie_return:
745	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
746	call	__dtrace_probe___sched_on__cpu
747
748	/*
749	 * Remove stack frame created in SAVE_REGS()
750	 */
751	addl	$CLONGSIZE, %esp
752	ret
753	SET_SIZE(resume_from_zombie)
754
755#endif	/* __amd64 */
756#endif	/* __lint */
757
758#if defined(__lint)
759
760/* ARGSUSED */
761void
762resume_from_intr(kthread_t *t)
763{}
764
765#else	/* __lint */
766
767#if defined(__amd64)
768
769	ENTRY(resume_from_intr)
770	movq	%gs:CPU_THREAD, %rax
771	leaq	resume_from_intr_return(%rip), %r11
772
773	/*
774	 * Save non-volatile registers, and set return address for current
775	 * thread to resume_from_intr_return.
776	 *
777	 * %r12 = t (new thread) when done
778	 */
779	SAVE_REGS(%rax, %r11)
780
781	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
782	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
783	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
784	xorl	%ebp, %ebp		/* make $<threadlist behave better */
785
786	/*
787	 * Unlock outgoing thread's mutex dispatched by another processor.
788	 */
789	xorl	%eax, %eax
790	xchgb	%al, T_LOCK(%r13)
791
792	STORE_INTR_START(%r12)
793
794	/*
795	 * Restore non-volatile registers, then have spl0 return to the
796	 * resuming thread's PC after first setting the priority as low as
797	 * possible and blocking all interrupt threads that may be active.
798	 */
799	movq	T_PC(%r12), %rax	/* saved return addr */
800	RESTORE_REGS(%r11);
801	pushq	%rax			/* push return address for spl0() */
802	call	__dtrace_probe___sched_on__cpu
803	jmp	spl0
804
805resume_from_intr_return:
806	/*
807	 * Remove stack frame created in SAVE_REGS()
808	 */
809	addq 	$CLONGSIZE, %rsp
810	ret
811	SET_SIZE(resume_from_intr)
812
813#elif defined (__i386)
814
815	ENTRY(resume_from_intr)
816	movl	%gs:CPU_THREAD, %eax
817	movl	$resume_from_intr_return, %ecx
818
819	/*
820	 * Save non-volatile registers, and set return address for current
821	 * thread to resume_return.
822	 *
823	 * %edi = t (new thread) when done.
824	 */
825	SAVE_REGS(%eax, %ecx)
826
827#ifdef DEBUG
828	call	assert_ints_enabled	/* panics if we are cli'd */
829#endif
830	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
831	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
832	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
833	xorl	%ebp, %ebp		/* make $<threadlist behave better */
834
835	/*
836	 * Unlock outgoing thread's mutex dispatched by another processor.
837	 */
838	xorl	%eax,%eax
839	xchgb	%al, T_LOCK(%esi)
840
841	STORE_INTR_START(%edi)
842
843	/*
844	 * Restore non-volatile registers, then have spl0 return to the
845	 * resuming thread's PC after first setting the priority as low as
846	 * possible and blocking all interrupt threads that may be active.
847	 */
848	movl	T_PC(%edi), %eax	/* saved return addr */
849	RESTORE_REGS(%ecx)
850	pushl	%eax			/* push return address for spl0() */
851	call	__dtrace_probe___sched_on__cpu
852	jmp	spl0
853
854resume_from_intr_return:
855	/*
856	 * Remove stack frame created in SAVE_REGS()
857	 */
858	addl	$CLONGSIZE, %esp
859	ret
860	SET_SIZE(resume_from_intr)
861
862#endif	/* __amd64 */
863#endif /* __lint */
864
865#if defined(__lint)
866
867void
868thread_start(void)
869{}
870
871#else   /* __lint */
872
873#if defined(__amd64)
874
875	ENTRY(thread_start)
876	popq	%rax		/* start() */
877	popq	%rdi		/* arg */
878	popq	%rsi		/* len */
879	movq	%rsp, %rbp
880	call	*%rax
881	call	thread_exit	/* destroy thread if it returns. */
882	/*NOTREACHED*/
883	SET_SIZE(thread_start)
884
885#elif defined(__i386)
886
887	ENTRY(thread_start)
888	popl	%eax
889	movl	%esp, %ebp
890	addl	$8, %ebp
891	call	*%eax
892	addl	$8, %esp
893	call	thread_exit	/* destroy thread if it returns. */
894	/*NOTREACHED*/
895	SET_SIZE(thread_start)
896
897#endif	/* __i386 */
898
899#endif  /* __lint */
900