xref: /titanic_52/usr/src/uts/intel/ia32/ml/swtch.s (revision 49dc33e37f0b57cc47ce0a40a5dffaf6627bae4d)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5ae115bc7Smrj * Common Development and Distribution License (the "License").
6ae115bc7Smrj * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate/*
22ae115bc7Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
26*49dc33e3SBryan Cantrill/*
27*49dc33e3SBryan Cantrill * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28*49dc33e3SBryan Cantrill */
297c478bd9Sstevel@tonic-gate
307c478bd9Sstevel@tonic-gate/*
317c478bd9Sstevel@tonic-gate * Process switching routines.
327c478bd9Sstevel@tonic-gate */
337c478bd9Sstevel@tonic-gate
347c478bd9Sstevel@tonic-gate#if defined(__lint)
357c478bd9Sstevel@tonic-gate#include <sys/thread.h>
367c478bd9Sstevel@tonic-gate#include <sys/systm.h>
377c478bd9Sstevel@tonic-gate#include <sys/time.h>
387c478bd9Sstevel@tonic-gate#else	/* __lint */
397c478bd9Sstevel@tonic-gate#include "assym.h"
407c478bd9Sstevel@tonic-gate#endif	/* __lint */
417c478bd9Sstevel@tonic-gate
427c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
437c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h>
447c478bd9Sstevel@tonic-gate#include <sys/regset.h>
457c478bd9Sstevel@tonic-gate#include <sys/privregs.h>
467c478bd9Sstevel@tonic-gate#include <sys/stack.h>
477c478bd9Sstevel@tonic-gate#include <sys/segments.h>
487c478bd9Sstevel@tonic-gate
497c478bd9Sstevel@tonic-gate/*
507c478bd9Sstevel@tonic-gate * resume(thread_id_t t);
517c478bd9Sstevel@tonic-gate *
527c478bd9Sstevel@tonic-gate * a thread can only run on one processor at a time. there
537c478bd9Sstevel@tonic-gate * exists a window on MPs where the current thread on one
547c478bd9Sstevel@tonic-gate * processor is capable of being dispatched by another processor.
557c478bd9Sstevel@tonic-gate * some overlap between outgoing and incoming threads can happen
567c478bd9Sstevel@tonic-gate * when they are the same thread. in this case where the threads
577c478bd9Sstevel@tonic-gate * are the same, resume() on one processor will spin on the incoming
587c478bd9Sstevel@tonic-gate * thread until resume() on the other processor has finished with
597c478bd9Sstevel@tonic-gate * the outgoing thread.
607c478bd9Sstevel@tonic-gate *
617c478bd9Sstevel@tonic-gate * The MMU context changes when the resuming thread resides in a different
627c478bd9Sstevel@tonic-gate * process.  Kernel threads are known by resume to reside in process 0.
637c478bd9Sstevel@tonic-gate * The MMU context, therefore, only changes when resuming a thread in
647c478bd9Sstevel@tonic-gate * a process different from curproc.
657c478bd9Sstevel@tonic-gate *
667c478bd9Sstevel@tonic-gate * resume_from_intr() is called when the thread being resumed was not
677c478bd9Sstevel@tonic-gate * passivated by resume (e.g. was interrupted).  This means that the
687c478bd9Sstevel@tonic-gate * resume lock is already held and that a restore context is not needed.
697c478bd9Sstevel@tonic-gate * Also, the MMU context is not changed on the resume in this case.
707c478bd9Sstevel@tonic-gate *
717c478bd9Sstevel@tonic-gate * resume_from_zombie() is the same as resume except the calling thread
727c478bd9Sstevel@tonic-gate * is a zombie and must be put on the deathrow list after the CPU is
737c478bd9Sstevel@tonic-gate * off the stack.
747c478bd9Sstevel@tonic-gate */
757c478bd9Sstevel@tonic-gate
767c478bd9Sstevel@tonic-gate#if !defined(__lint)
777c478bd9Sstevel@tonic-gate
787c478bd9Sstevel@tonic-gate#if LWP_PCB_FPU != 0
797c478bd9Sstevel@tonic-gate#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
807c478bd9Sstevel@tonic-gate#endif	/* LWP_PCB_FPU != 0 */
817c478bd9Sstevel@tonic-gate
827c478bd9Sstevel@tonic-gate#endif	/* !__lint */
837c478bd9Sstevel@tonic-gate
847c478bd9Sstevel@tonic-gate#if defined(__amd64)
857c478bd9Sstevel@tonic-gate
867c478bd9Sstevel@tonic-gate/*
877c478bd9Sstevel@tonic-gate * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
887c478bd9Sstevel@tonic-gate *
897c478bd9Sstevel@tonic-gate * The stack frame must be created before the save of %rsp so that tracebacks
907c478bd9Sstevel@tonic-gate * of swtch()ed-out processes show the process as having last called swtch().
917c478bd9Sstevel@tonic-gate */
927c478bd9Sstevel@tonic-gate#define SAVE_REGS(thread_t, retaddr)			\
937c478bd9Sstevel@tonic-gate	movq	%rbp, T_RBP(thread_t);			\
947c478bd9Sstevel@tonic-gate	movq	%rbx, T_RBX(thread_t);			\
957c478bd9Sstevel@tonic-gate	movq	%r12, T_R12(thread_t);			\
967c478bd9Sstevel@tonic-gate	movq	%r13, T_R13(thread_t);			\
977c478bd9Sstevel@tonic-gate	movq	%r14, T_R14(thread_t);			\
987c478bd9Sstevel@tonic-gate	movq	%r15, T_R15(thread_t);			\
997c478bd9Sstevel@tonic-gate	pushq	%rbp;					\
1007c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp;				\
1017c478bd9Sstevel@tonic-gate	movq	%rsp, T_SP(thread_t);			\
1027c478bd9Sstevel@tonic-gate	movq	retaddr, T_PC(thread_t);		\
1037c478bd9Sstevel@tonic-gate	movq	%rdi, %r12;				\
1047c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_off__cpu
1057c478bd9Sstevel@tonic-gate
1067c478bd9Sstevel@tonic-gate/*
1077c478bd9Sstevel@tonic-gate * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
1087c478bd9Sstevel@tonic-gate *
1097c478bd9Sstevel@tonic-gate * We load up %rsp from the label_t as part of the context switch, so
1107c478bd9Sstevel@tonic-gate * we don't repeat that here.
1117c478bd9Sstevel@tonic-gate *
1127c478bd9Sstevel@tonic-gate * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
1137c478bd9Sstevel@tonic-gate * already has the effect of putting the stack back the way it was when
1147c478bd9Sstevel@tonic-gate * we came in.
1157c478bd9Sstevel@tonic-gate */
1167c478bd9Sstevel@tonic-gate#define RESTORE_REGS(scratch_reg)			\
1177c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, scratch_reg;		\
1187c478bd9Sstevel@tonic-gate	movq	T_RBP(scratch_reg), %rbp;		\
1197c478bd9Sstevel@tonic-gate	movq	T_RBX(scratch_reg), %rbx;		\
1207c478bd9Sstevel@tonic-gate	movq	T_R12(scratch_reg), %r12;		\
1217c478bd9Sstevel@tonic-gate	movq	T_R13(scratch_reg), %r13;		\
1227c478bd9Sstevel@tonic-gate	movq	T_R14(scratch_reg), %r14;		\
1237c478bd9Sstevel@tonic-gate	movq	T_R15(scratch_reg), %r15
1247c478bd9Sstevel@tonic-gate
1257c478bd9Sstevel@tonic-gate/*
1267c478bd9Sstevel@tonic-gate * Get pointer to a thread's hat structure
1277c478bd9Sstevel@tonic-gate */
1287c478bd9Sstevel@tonic-gate#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
1297c478bd9Sstevel@tonic-gate	movq	T_PROCP(thread_t), hatp;		\
1307c478bd9Sstevel@tonic-gate	movq	P_AS(hatp), scratch_reg;		\
1317c478bd9Sstevel@tonic-gate	movq	A_HAT(scratch_reg), hatp
1327c478bd9Sstevel@tonic-gate
133843e1988Sjohnlev#define	TSC_READ()					\
134843e1988Sjohnlev	call	tsc_read;				\
135843e1988Sjohnlev	movq	%rax, %r14;
136843e1988Sjohnlev
137843e1988Sjohnlev/*
138843e1988Sjohnlev * If we are resuming an interrupt thread, store a timestamp in the thread
139843e1988Sjohnlev * structure.  If an interrupt occurs between tsc_read() and its subsequent
140843e1988Sjohnlev * store, the timestamp will be stale by the time it is stored.  We can detect
141843e1988Sjohnlev * this by doing a compare-and-swap on the thread's timestamp, since any
142843e1988Sjohnlev * interrupt occurring in this window will put a new timestamp in the thread's
143843e1988Sjohnlev * t_intr_start field.
144843e1988Sjohnlev */
145843e1988Sjohnlev#define	STORE_INTR_START(thread_t)			\
146843e1988Sjohnlev	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
147843e1988Sjohnlev	jz	1f;					\
148843e1988Sjohnlev0:							\
149843e1988Sjohnlev	TSC_READ();					\
150843e1988Sjohnlev	movq	T_INTR_START(thread_t), %rax;		\
151843e1988Sjohnlev	cmpxchgq %r14, T_INTR_START(thread_t);		\
152843e1988Sjohnlev	jnz	0b;					\
153843e1988Sjohnlev1:
154843e1988Sjohnlev
1557c478bd9Sstevel@tonic-gate#elif defined (__i386)
1567c478bd9Sstevel@tonic-gate
1577c478bd9Sstevel@tonic-gate/*
1587c478bd9Sstevel@tonic-gate * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
1597c478bd9Sstevel@tonic-gate *
1607c478bd9Sstevel@tonic-gate * The stack frame must be created before the save of %esp so that tracebacks
1617c478bd9Sstevel@tonic-gate * of swtch()ed-out processes show the process as having last called swtch().
1627c478bd9Sstevel@tonic-gate */
1637c478bd9Sstevel@tonic-gate#define SAVE_REGS(thread_t, retaddr)			\
1647c478bd9Sstevel@tonic-gate	movl	%ebp, T_EBP(thread_t);			\
1657c478bd9Sstevel@tonic-gate	movl	%ebx, T_EBX(thread_t);			\
1667c478bd9Sstevel@tonic-gate	movl	%esi, T_ESI(thread_t);			\
1677c478bd9Sstevel@tonic-gate	movl	%edi, T_EDI(thread_t);			\
1687c478bd9Sstevel@tonic-gate	pushl	%ebp;					\
1697c478bd9Sstevel@tonic-gate	movl	%esp, %ebp;				\
1707c478bd9Sstevel@tonic-gate	movl	%esp, T_SP(thread_t);			\
1717c478bd9Sstevel@tonic-gate	movl	retaddr, T_PC(thread_t);		\
1727c478bd9Sstevel@tonic-gate	movl	8(%ebp), %edi;				\
1737c478bd9Sstevel@tonic-gate	pushl	%edi;					\
1747c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_off__cpu;	\
1757c478bd9Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
1767c478bd9Sstevel@tonic-gate
1777c478bd9Sstevel@tonic-gate/*
1787c478bd9Sstevel@tonic-gate * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
1797c478bd9Sstevel@tonic-gate *
1807c478bd9Sstevel@tonic-gate * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
1817c478bd9Sstevel@tonic-gate * already has the effect of putting the stack back the way it was when
1827c478bd9Sstevel@tonic-gate * we came in.
1837c478bd9Sstevel@tonic-gate */
1847c478bd9Sstevel@tonic-gate#define RESTORE_REGS(scratch_reg)			\
1857c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, scratch_reg;		\
1867c478bd9Sstevel@tonic-gate	movl	T_EBP(scratch_reg), %ebp;		\
1877c478bd9Sstevel@tonic-gate	movl	T_EBX(scratch_reg), %ebx;		\
1887c478bd9Sstevel@tonic-gate	movl	T_ESI(scratch_reg), %esi;		\
1897c478bd9Sstevel@tonic-gate	movl	T_EDI(scratch_reg), %edi
1907c478bd9Sstevel@tonic-gate
1917c478bd9Sstevel@tonic-gate/*
1927c478bd9Sstevel@tonic-gate * Get pointer to a thread's hat structure
1937c478bd9Sstevel@tonic-gate */
1947c478bd9Sstevel@tonic-gate#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
1957c478bd9Sstevel@tonic-gate	movl	T_PROCP(thread_t), hatp;		\
1967c478bd9Sstevel@tonic-gate	movl	P_AS(hatp), scratch_reg;		\
1977c478bd9Sstevel@tonic-gate	movl	A_HAT(scratch_reg), hatp
1987c478bd9Sstevel@tonic-gate
199843e1988Sjohnlev/*
200843e1988Sjohnlev * If we are resuming an interrupt thread, store a timestamp in the thread
201843e1988Sjohnlev * structure.  If an interrupt occurs between tsc_read() and its subsequent
202843e1988Sjohnlev * store, the timestamp will be stale by the time it is stored.  We can detect
203843e1988Sjohnlev * this by doing a compare-and-swap on the thread's timestamp, since any
204843e1988Sjohnlev * interrupt occurring in this window will put a new timestamp in the thread's
205843e1988Sjohnlev * t_intr_start field.
206843e1988Sjohnlev */
207843e1988Sjohnlev#define	STORE_INTR_START(thread_t)			\
208843e1988Sjohnlev	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
209843e1988Sjohnlev	jz	1f;					\
210843e1988Sjohnlev	pushl	%ecx;					\
211843e1988Sjohnlev0:							\
212843e1988Sjohnlev	pushl	T_INTR_START(thread_t);			\
213843e1988Sjohnlev	pushl	T_INTR_START+4(thread_t);		\
214843e1988Sjohnlev	call	tsc_read;				\
215843e1988Sjohnlev	movl	%eax, %ebx;				\
216843e1988Sjohnlev	movl	%edx, %ecx;				\
217843e1988Sjohnlev	popl	%edx;					\
218843e1988Sjohnlev	popl	%eax;					\
219843e1988Sjohnlev	cmpxchg8b T_INTR_START(thread_t);		\
220843e1988Sjohnlev	jnz	0b;					\
221843e1988Sjohnlev	popl	%ecx;					\
222843e1988Sjohnlev1:
223843e1988Sjohnlev
2247c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
2257c478bd9Sstevel@tonic-gate
2267c478bd9Sstevel@tonic-gate#if defined(__lint)
2277c478bd9Sstevel@tonic-gate
2287c478bd9Sstevel@tonic-gate/* ARGSUSED */
2297c478bd9Sstevel@tonic-gatevoid
2307c478bd9Sstevel@tonic-gateresume(kthread_t *t)
2317c478bd9Sstevel@tonic-gate{}
2327c478bd9Sstevel@tonic-gate
2337c478bd9Sstevel@tonic-gate#else	/* __lint */
2347c478bd9Sstevel@tonic-gate
2357c478bd9Sstevel@tonic-gate#if defined(__amd64)
2367c478bd9Sstevel@tonic-gate
2377c478bd9Sstevel@tonic-gate	ENTRY(resume)
2387c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
2397c478bd9Sstevel@tonic-gate	leaq	resume_return(%rip), %r11
2407c478bd9Sstevel@tonic-gate
2417c478bd9Sstevel@tonic-gate	/*
2427c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
2437c478bd9Sstevel@tonic-gate	 * thread to resume_return.
2447c478bd9Sstevel@tonic-gate	 *
2457c478bd9Sstevel@tonic-gate	 * %r12 = t (new thread) when done
2467c478bd9Sstevel@tonic-gate	 */
2477c478bd9Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
2487c478bd9Sstevel@tonic-gate
2497c478bd9Sstevel@tonic-gate	LOADCPU(%r15)				/* %r15 = CPU */
2507c478bd9Sstevel@tonic-gate	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
2517c478bd9Sstevel@tonic-gate
2527c478bd9Sstevel@tonic-gate	/*
2537c478bd9Sstevel@tonic-gate	 * Call savectx if thread has installed context ops.
2547c478bd9Sstevel@tonic-gate	 *
2557c478bd9Sstevel@tonic-gate	 * Note that if we have floating point context, the save op
2567c478bd9Sstevel@tonic-gate	 * (either fpsave_begin or fpxsave_begin) will issue the
2577c478bd9Sstevel@tonic-gate	 * async save instruction (fnsave or fxsave respectively)
2587c478bd9Sstevel@tonic-gate	 * that we fwait for below.
2597c478bd9Sstevel@tonic-gate	 */
2607c478bd9Sstevel@tonic-gate	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
2617c478bd9Sstevel@tonic-gate	je	.nosavectx		/* skip call when zero */
2627c478bd9Sstevel@tonic-gate
2637c478bd9Sstevel@tonic-gate	movq	%r13, %rdi		/* arg = thread pointer */
2647c478bd9Sstevel@tonic-gate	call	savectx			/* call ctx ops */
2657c478bd9Sstevel@tonic-gate.nosavectx:
266ae115bc7Smrj
2677c478bd9Sstevel@tonic-gate        /*
2680baeff3dSrab         * Call savepctx if process has installed context ops.
2697c478bd9Sstevel@tonic-gate         */
2700baeff3dSrab	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
2710baeff3dSrab        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
2720baeff3dSrab        je      .nosavepctx              /* skip call when zero */
2737c478bd9Sstevel@tonic-gate
2740baeff3dSrab        movq    %r14, %rdi              /* arg = proc pointer */
2750baeff3dSrab        call    savepctx                 /* call ctx ops */
2760baeff3dSrab.nosavepctx:
2777c478bd9Sstevel@tonic-gate
2787c478bd9Sstevel@tonic-gate	/*
2797c478bd9Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack
2807c478bd9Sstevel@tonic-gate	 */
2817c478bd9Sstevel@tonic-gate	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
2827c478bd9Sstevel@tonic-gate
2837c478bd9Sstevel@tonic-gate	/*
2847c478bd9Sstevel@tonic-gate	 * Set the idle thread as the current thread
2857c478bd9Sstevel@tonic-gate	 */
2867c478bd9Sstevel@tonic-gate	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
2877c478bd9Sstevel@tonic-gate	movq	%rax, CPU_THREAD(%r15)
2887c478bd9Sstevel@tonic-gate
2897c478bd9Sstevel@tonic-gate	/*
2907c478bd9Sstevel@tonic-gate	 * Switch in the hat context for the new thread
2917c478bd9Sstevel@tonic-gate	 *
2927c478bd9Sstevel@tonic-gate	 */
2937c478bd9Sstevel@tonic-gate	GET_THREAD_HATP(%rdi, %r12, %r11)
2947c478bd9Sstevel@tonic-gate	call	hat_switch
2957c478bd9Sstevel@tonic-gate
2967c478bd9Sstevel@tonic-gate	/*
2977c478bd9Sstevel@tonic-gate	 * Clear and unlock previous thread's t_lock
2987c478bd9Sstevel@tonic-gate	 * to allow it to be dispatched by another processor.
2997c478bd9Sstevel@tonic-gate	 */
3007c478bd9Sstevel@tonic-gate	movb	$0, T_LOCK(%r13)
3017c478bd9Sstevel@tonic-gate
3027c478bd9Sstevel@tonic-gate	/*
3037c478bd9Sstevel@tonic-gate	 * IMPORTANT: Registers at this point must be:
3047c478bd9Sstevel@tonic-gate	 *       %r12 = new thread
3057c478bd9Sstevel@tonic-gate	 *
3067c478bd9Sstevel@tonic-gate	 * Here we are in the idle thread, have dropped the old thread.
3077c478bd9Sstevel@tonic-gate	 */
3087c478bd9Sstevel@tonic-gate	ALTENTRY(_resume_from_idle)
3097c478bd9Sstevel@tonic-gate	/*
3107c478bd9Sstevel@tonic-gate	 * spin until dispatched thread's mutex has
3117c478bd9Sstevel@tonic-gate	 * been unlocked. this mutex is unlocked when
3127c478bd9Sstevel@tonic-gate	 * it becomes safe for the thread to run.
3137c478bd9Sstevel@tonic-gate	 */
3147c478bd9Sstevel@tonic-gate.lock_thread_mutex:
3157c478bd9Sstevel@tonic-gate	lock
3167c478bd9Sstevel@tonic-gate	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
3177c478bd9Sstevel@tonic-gate	jnc	.thread_mutex_locked	/* got it */
3187c478bd9Sstevel@tonic-gate
3197c478bd9Sstevel@tonic-gate.spin_thread_mutex:
3207c478bd9Sstevel@tonic-gate	pause
3217c478bd9Sstevel@tonic-gate	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
3227c478bd9Sstevel@tonic-gate	jz	.lock_thread_mutex	/* clear, retry lock */
3237c478bd9Sstevel@tonic-gate	jmp	.spin_thread_mutex	/* still locked, spin... */
3247c478bd9Sstevel@tonic-gate
3257c478bd9Sstevel@tonic-gate.thread_mutex_locked:
3267c478bd9Sstevel@tonic-gate	/*
3277c478bd9Sstevel@tonic-gate	 * Fix CPU structure to indicate new running thread.
3287c478bd9Sstevel@tonic-gate	 * Set pointer in new thread to the CPU structure.
3297c478bd9Sstevel@tonic-gate	 */
3307c478bd9Sstevel@tonic-gate	LOADCPU(%r13)			/* load current CPU pointer */
3317c478bd9Sstevel@tonic-gate	cmpq	%r13, T_CPU(%r12)
3327c478bd9Sstevel@tonic-gate	je	.setup_cpu
3337c478bd9Sstevel@tonic-gate
3347c478bd9Sstevel@tonic-gate	/* cp->cpu_stats.sys.cpumigrate++ */
3357c478bd9Sstevel@tonic-gate	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
3367c478bd9Sstevel@tonic-gate	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
3377c478bd9Sstevel@tonic-gate
3387c478bd9Sstevel@tonic-gate.setup_cpu:
3397c478bd9Sstevel@tonic-gate	/*
3407c478bd9Sstevel@tonic-gate	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
3417c478bd9Sstevel@tonic-gate	 * (Note: Since we don't have saved 'regs' structure for all
3427c478bd9Sstevel@tonic-gate	 *	  the threads we can't easily determine if we need to
3437c478bd9Sstevel@tonic-gate	 *	  change rsp0. So, we simply change the rsp0 to bottom
3447c478bd9Sstevel@tonic-gate	 *	  of the thread stack and it will work for all cases.)
3457c478bd9Sstevel@tonic-gate	 *
3467c478bd9Sstevel@tonic-gate	 * XX64 - Is this correct?
3477c478bd9Sstevel@tonic-gate	 */
3487c478bd9Sstevel@tonic-gate	movq	CPU_TSS(%r13), %r14
3497c478bd9Sstevel@tonic-gate	movq	T_STACK(%r12), %rax
3507c478bd9Sstevel@tonic-gate	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
351843e1988Sjohnlev#if !defined(__xpv)
3527c478bd9Sstevel@tonic-gate	movq	%rax, TSS_RSP0(%r14)
353843e1988Sjohnlev#else
354843e1988Sjohnlev	movl	$KDS_SEL, %edi
355843e1988Sjohnlev	movq	%rax, %rsi
356843e1988Sjohnlev	call	HYPERVISOR_stack_switch
357843e1988Sjohnlev#endif	/* __xpv */
3587c478bd9Sstevel@tonic-gate
3597c478bd9Sstevel@tonic-gate	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
360*49dc33e3SBryan Cantrill	mfence				/* synchronize with mutex_exit() */
3617c478bd9Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
3627c478bd9Sstevel@tonic-gate	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
3637c478bd9Sstevel@tonic-gate	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
3647c478bd9Sstevel@tonic-gate
3657c478bd9Sstevel@tonic-gate	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
3667c478bd9Sstevel@tonic-gate	movq	T_PC(%r12), %r13	/* saved return addr */
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate	/*
3697c478bd9Sstevel@tonic-gate	 * Call restorectx if context ops have been installed.
3707c478bd9Sstevel@tonic-gate	 */
3717c478bd9Sstevel@tonic-gate	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
3727c478bd9Sstevel@tonic-gate	jz	.norestorectx		/* skip call when zero */
3737c478bd9Sstevel@tonic-gate	movq	%r12, %rdi		/* arg = thread pointer */
3747c478bd9Sstevel@tonic-gate	call	restorectx		/* call ctx ops */
3757c478bd9Sstevel@tonic-gate.norestorectx:
3767c478bd9Sstevel@tonic-gate
3777c478bd9Sstevel@tonic-gate	/*
3780baeff3dSrab	 * Call restorepctx if context ops have been installed for the proc.
3790baeff3dSrab	 */
3800baeff3dSrab	movq	T_PROCP(%r12), %rcx
3810baeff3dSrab	cmpq	$0, P_PCTX(%rcx)
3820baeff3dSrab	jz	.norestorepctx
3830baeff3dSrab	movq	%rcx, %rdi
3840baeff3dSrab	call	restorepctx
3850baeff3dSrab.norestorepctx:
3860baeff3dSrab
387843e1988Sjohnlev	STORE_INTR_START(%r12)
3887c478bd9Sstevel@tonic-gate
3897c478bd9Sstevel@tonic-gate	/*
3907c478bd9Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
3917c478bd9Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
3927c478bd9Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
3937c478bd9Sstevel@tonic-gate	 */
3947c478bd9Sstevel@tonic-gate	movq	%r13, %rax	/* save return address */
3957c478bd9Sstevel@tonic-gate	RESTORE_REGS(%r11)
3967c478bd9Sstevel@tonic-gate	pushq	%rax		/* push return address for spl0() */
3977c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
3987c478bd9Sstevel@tonic-gate	jmp	spl0
3997c478bd9Sstevel@tonic-gate
4007c478bd9Sstevel@tonic-gateresume_return:
4017c478bd9Sstevel@tonic-gate	/*
4027c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
4037c478bd9Sstevel@tonic-gate	 */
4047c478bd9Sstevel@tonic-gate	addq	$CLONGSIZE, %rsp
4057c478bd9Sstevel@tonic-gate	ret
4067c478bd9Sstevel@tonic-gate	SET_SIZE(_resume_from_idle)
4077c478bd9Sstevel@tonic-gate	SET_SIZE(resume)
4087c478bd9Sstevel@tonic-gate
4097c478bd9Sstevel@tonic-gate#elif defined (__i386)
4107c478bd9Sstevel@tonic-gate
4117c478bd9Sstevel@tonic-gate	ENTRY(resume)
4127c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
4137c478bd9Sstevel@tonic-gate	movl	$resume_return, %ecx
4147c478bd9Sstevel@tonic-gate
4157c478bd9Sstevel@tonic-gate	/*
4167c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
4177c478bd9Sstevel@tonic-gate	 * thread to resume_return.
4187c478bd9Sstevel@tonic-gate	 *
4197c478bd9Sstevel@tonic-gate	 * %edi = t (new thread) when done.
4207c478bd9Sstevel@tonic-gate	 */
4217c478bd9Sstevel@tonic-gate	SAVE_REGS(%eax,  %ecx)
4227c478bd9Sstevel@tonic-gate
4237c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)			/* %ebx = CPU */
4247c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
4257c478bd9Sstevel@tonic-gate
426ae115bc7Smrj#ifdef DEBUG
427ae115bc7Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
428ae115bc7Smrj#endif
4297c478bd9Sstevel@tonic-gate	/*
4307c478bd9Sstevel@tonic-gate	 * Call savectx if thread has installed context ops.
4317c478bd9Sstevel@tonic-gate	 *
4327c478bd9Sstevel@tonic-gate	 * Note that if we have floating point context, the save op
4337c478bd9Sstevel@tonic-gate	 * (either fpsave_begin or fpxsave_begin) will issue the
4347c478bd9Sstevel@tonic-gate	 * async save instruction (fnsave or fxsave respectively)
4357c478bd9Sstevel@tonic-gate	 * that we fwait for below.
4367c478bd9Sstevel@tonic-gate	 */
4377c478bd9Sstevel@tonic-gate	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
4387c478bd9Sstevel@tonic-gate	testl	%eax, %eax
4397c478bd9Sstevel@tonic-gate	jz	.nosavectx		/* skip call when zero */
4407c478bd9Sstevel@tonic-gate	pushl	%esi			/* arg = thread pointer */
4417c478bd9Sstevel@tonic-gate	call	savectx			/* call ctx ops */
4427c478bd9Sstevel@tonic-gate	addl	$4, %esp		/* restore stack pointer */
4437c478bd9Sstevel@tonic-gate.nosavectx:
444ae115bc7Smrj
4457c478bd9Sstevel@tonic-gate        /*
4460baeff3dSrab         * Call savepctx if process has installed context ops.
4477c478bd9Sstevel@tonic-gate         */
4480baeff3dSrab	movl	T_PROCP(%esi), %eax	/* %eax = proc */
4490baeff3dSrab	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
4500baeff3dSrab	je	.nosavepctx		/* skip call when zero */
4510baeff3dSrab	pushl	%eax			/* arg = proc pointer */
4520baeff3dSrab	call	savepctx		/* call ctx ops */
4530baeff3dSrab	addl	$4, %esp
4540baeff3dSrab.nosavepctx:
4557c478bd9Sstevel@tonic-gate
4567c478bd9Sstevel@tonic-gate	/*
4577c478bd9Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack
4587c478bd9Sstevel@tonic-gate	 */
4597c478bd9Sstevel@tonic-gate	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
4607c478bd9Sstevel@tonic-gate
4617c478bd9Sstevel@tonic-gate	/*
4627c478bd9Sstevel@tonic-gate	 * Set the idle thread as the current thread
4637c478bd9Sstevel@tonic-gate	 */
4647c478bd9Sstevel@tonic-gate	movl	T_SP(%eax), %esp	/* It is safe to set esp */
4657c478bd9Sstevel@tonic-gate	movl	%eax, CPU_THREAD(%ebx)
4667c478bd9Sstevel@tonic-gate
4677c478bd9Sstevel@tonic-gate	/* switch in the hat context for the new thread */
4687c478bd9Sstevel@tonic-gate	GET_THREAD_HATP(%ecx, %edi, %ecx)
4697c478bd9Sstevel@tonic-gate	pushl	%ecx
4707c478bd9Sstevel@tonic-gate	call	hat_switch
4717c478bd9Sstevel@tonic-gate	addl	$4, %esp
4727c478bd9Sstevel@tonic-gate
4737c478bd9Sstevel@tonic-gate	/*
4747c478bd9Sstevel@tonic-gate	 * Clear and unlock previous thread's t_lock
4757c478bd9Sstevel@tonic-gate	 * to allow it to be dispatched by another processor.
4767c478bd9Sstevel@tonic-gate	 */
477ae115bc7Smrj	movb	$0, T_LOCK(%esi)
4787c478bd9Sstevel@tonic-gate
4797c478bd9Sstevel@tonic-gate	/*
4807c478bd9Sstevel@tonic-gate	 * IMPORTANT: Registers at this point must be:
4817c478bd9Sstevel@tonic-gate	 *       %edi = new thread
4827c478bd9Sstevel@tonic-gate	 *
4837c478bd9Sstevel@tonic-gate	 * Here we are in the idle thread, have dropped the old thread.
4847c478bd9Sstevel@tonic-gate	 */
4857c478bd9Sstevel@tonic-gate	ALTENTRY(_resume_from_idle)
4867c478bd9Sstevel@tonic-gate	/*
4877c478bd9Sstevel@tonic-gate	 * spin until dispatched thread's mutex has
4887c478bd9Sstevel@tonic-gate	 * been unlocked. this mutex is unlocked when
4897c478bd9Sstevel@tonic-gate	 * it becomes safe for the thread to run.
4907c478bd9Sstevel@tonic-gate	 */
4917c478bd9Sstevel@tonic-gate.L4:
4927c478bd9Sstevel@tonic-gate	lock
4937c478bd9Sstevel@tonic-gate	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
4947c478bd9Sstevel@tonic-gate	jc	.L4_2			/* lock did not succeed */
4957c478bd9Sstevel@tonic-gate
4967c478bd9Sstevel@tonic-gate	/*
4977c478bd9Sstevel@tonic-gate	 * Fix CPU structure to indicate new running thread.
4987c478bd9Sstevel@tonic-gate	 * Set pointer in new thread to the CPU structure.
4997c478bd9Sstevel@tonic-gate	 */
5007c478bd9Sstevel@tonic-gate	LOADCPU(%esi)			/* load current CPU pointer */
5017c478bd9Sstevel@tonic-gate	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
5027c478bd9Sstevel@tonic-gate					/* Pentium. Used few lines below */
5037c478bd9Sstevel@tonic-gate	cmpl	%esi, T_CPU(%edi)
5047c478bd9Sstevel@tonic-gate	jne	.L5_2
5057c478bd9Sstevel@tonic-gate.L5_1:
5067c478bd9Sstevel@tonic-gate	/*
5077c478bd9Sstevel@tonic-gate	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
5087c478bd9Sstevel@tonic-gate	 * (Note: Since we don't have saved 'regs' structure for all
5097c478bd9Sstevel@tonic-gate	 *	  the threads we can't easily determine if we need to
5107c478bd9Sstevel@tonic-gate	 *	  change esp0. So, we simply change the esp0 to bottom
5117c478bd9Sstevel@tonic-gate	 *	  of the thread stack and it will work for all cases.)
5127c478bd9Sstevel@tonic-gate	 */
5137c478bd9Sstevel@tonic-gate	movl	CPU_TSS(%esi), %ecx
5147c478bd9Sstevel@tonic-gate	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
515843e1988Sjohnlev#if !defined(__xpv)
5167c478bd9Sstevel@tonic-gate	movl	%eax, TSS_ESP0(%ecx)
517843e1988Sjohnlev#else
518843e1988Sjohnlev	pushl	%eax
519843e1988Sjohnlev	pushl	$KDS_SEL
520843e1988Sjohnlev	call	HYPERVISOR_stack_switch
521843e1988Sjohnlev	addl	$8, %esp
522843e1988Sjohnlev#endif	/* __xpv */
5237c478bd9Sstevel@tonic-gate
5247c478bd9Sstevel@tonic-gate	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
525*49dc33e3SBryan Cantrill	mfence				/* synchronize with mutex_exit() */
5267c478bd9Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
5277c478bd9Sstevel@tonic-gate	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
5287c478bd9Sstevel@tonic-gate	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
5297c478bd9Sstevel@tonic-gate
5307c478bd9Sstevel@tonic-gate	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
5317c478bd9Sstevel@tonic-gate	movl	T_PC(%edi), %esi	/* saved return addr */
5327c478bd9Sstevel@tonic-gate
5337c478bd9Sstevel@tonic-gate	/*
5347c478bd9Sstevel@tonic-gate	 * Call restorectx if context ops have been installed.
5357c478bd9Sstevel@tonic-gate	 */
5367c478bd9Sstevel@tonic-gate	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
5377c478bd9Sstevel@tonic-gate	testl	%eax, %eax
5387c478bd9Sstevel@tonic-gate	jz	.norestorectx		/* skip call when zero */
5397c478bd9Sstevel@tonic-gate	pushl	%edi			/* arg = thread pointer */
5407c478bd9Sstevel@tonic-gate	call	restorectx		/* call ctx ops */
5417c478bd9Sstevel@tonic-gate	addl	$4, %esp		/* restore stack pointer */
5427c478bd9Sstevel@tonic-gate.norestorectx:
5437c478bd9Sstevel@tonic-gate
5447c478bd9Sstevel@tonic-gate	/*
5450baeff3dSrab	 * Call restorepctx if context ops have been installed for the proc.
5460baeff3dSrab	 */
5470baeff3dSrab	movl	T_PROCP(%edi), %eax
5480baeff3dSrab	cmpl	$0, P_PCTX(%eax)
5490baeff3dSrab	je	.norestorepctx
5500baeff3dSrab	pushl	%eax			/* arg = proc pointer */
5510baeff3dSrab	call	restorepctx
5520baeff3dSrab	addl	$4, %esp		/* restore stack pointer */
5530baeff3dSrab.norestorepctx:
5540baeff3dSrab
555843e1988Sjohnlev	STORE_INTR_START(%edi)
556843e1988Sjohnlev
5577c478bd9Sstevel@tonic-gate	/*
5587c478bd9Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
5597c478bd9Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
5607c478bd9Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
5617c478bd9Sstevel@tonic-gate	 */
5627c478bd9Sstevel@tonic-gate	movl	%esi, %eax		/* save return address */
5637c478bd9Sstevel@tonic-gate	RESTORE_REGS(%ecx)
5647c478bd9Sstevel@tonic-gate	pushl	%eax			/* push return address for spl0() */
5657c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
5667c478bd9Sstevel@tonic-gate	jmp	spl0
5677c478bd9Sstevel@tonic-gate
5687c478bd9Sstevel@tonic-gateresume_return:
5697c478bd9Sstevel@tonic-gate	/*
5707c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
5717c478bd9Sstevel@tonic-gate	 */
5727c478bd9Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
5737c478bd9Sstevel@tonic-gate	ret
5747c478bd9Sstevel@tonic-gate
5757c478bd9Sstevel@tonic-gate.L4_2:
5767c478bd9Sstevel@tonic-gate	pause
5777c478bd9Sstevel@tonic-gate	cmpb	$0, T_LOCK(%edi)
5787c478bd9Sstevel@tonic-gate	je	.L4
5797c478bd9Sstevel@tonic-gate	jmp	.L4_2
5807c478bd9Sstevel@tonic-gate
5817c478bd9Sstevel@tonic-gate.L5_2:
5827c478bd9Sstevel@tonic-gate	/* cp->cpu_stats.sys.cpumigrate++ */
5837c478bd9Sstevel@tonic-gate	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
5847c478bd9Sstevel@tonic-gate	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
5857c478bd9Sstevel@tonic-gate	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
5867c478bd9Sstevel@tonic-gate	jmp	.L5_1
5877c478bd9Sstevel@tonic-gate
5887c478bd9Sstevel@tonic-gate	SET_SIZE(_resume_from_idle)
5897c478bd9Sstevel@tonic-gate	SET_SIZE(resume)
5907c478bd9Sstevel@tonic-gate
5917c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
5927c478bd9Sstevel@tonic-gate#endif	/* __lint */
5937c478bd9Sstevel@tonic-gate
5947c478bd9Sstevel@tonic-gate#if defined(__lint)
5957c478bd9Sstevel@tonic-gate
5967c478bd9Sstevel@tonic-gate/* ARGSUSED */
5977c478bd9Sstevel@tonic-gatevoid
5987c478bd9Sstevel@tonic-gateresume_from_zombie(kthread_t *t)
5997c478bd9Sstevel@tonic-gate{}
6007c478bd9Sstevel@tonic-gate
6017c478bd9Sstevel@tonic-gate#else	/* __lint */
6027c478bd9Sstevel@tonic-gate
6037c478bd9Sstevel@tonic-gate#if defined(__amd64)
6047c478bd9Sstevel@tonic-gate
6057c478bd9Sstevel@tonic-gate	ENTRY(resume_from_zombie)
6067c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
6077c478bd9Sstevel@tonic-gate	leaq	resume_from_zombie_return(%rip), %r11
6087c478bd9Sstevel@tonic-gate
6097c478bd9Sstevel@tonic-gate	/*
6107c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
6117c478bd9Sstevel@tonic-gate	 * thread to resume_from_zombie_return.
6127c478bd9Sstevel@tonic-gate	 *
6137c478bd9Sstevel@tonic-gate	 * %r12 = t (new thread) when done
6147c478bd9Sstevel@tonic-gate	 */
6157c478bd9Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
6167c478bd9Sstevel@tonic-gate
6177c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
6187c478bd9Sstevel@tonic-gate
6197c478bd9Sstevel@tonic-gate	/* clean up the fp unit. It might be left enabled */
620843e1988Sjohnlev
621843e1988Sjohnlev#if defined(__xpv)		/* XXPV XXtclayton */
622843e1988Sjohnlev	/*
623843e1988Sjohnlev	 * Remove this after bringup.
624843e1988Sjohnlev	 * (Too many #gp's for an instrumented hypervisor.)
625843e1988Sjohnlev	 */
626843e1988Sjohnlev	STTS(%rax)
627843e1988Sjohnlev#else
6287c478bd9Sstevel@tonic-gate	movq	%cr0, %rax
6297c478bd9Sstevel@tonic-gate	testq	$CR0_TS, %rax
6307c478bd9Sstevel@tonic-gate	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
6317c478bd9Sstevel@tonic-gate	fninit				/* init fpu & discard pending error */
6327c478bd9Sstevel@tonic-gate	orq	$CR0_TS, %rax
6337c478bd9Sstevel@tonic-gate	movq	%rax, %cr0
6347c478bd9Sstevel@tonic-gate.zfpu_disabled:
6357c478bd9Sstevel@tonic-gate
636843e1988Sjohnlev#endif	/* __xpv */
637843e1988Sjohnlev
6387c478bd9Sstevel@tonic-gate	/*
6397c478bd9Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack so that the zombie
6407c478bd9Sstevel@tonic-gate	 * thread's stack can be reclaimed by the reaper.
6417c478bd9Sstevel@tonic-gate	 */
6427c478bd9Sstevel@tonic-gate	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
6437c478bd9Sstevel@tonic-gate	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
6447c478bd9Sstevel@tonic-gate
6457c478bd9Sstevel@tonic-gate	/*
6467c478bd9Sstevel@tonic-gate	 * Sigh. If the idle thread has never run thread_start()
6477c478bd9Sstevel@tonic-gate	 * then t_sp is mis-aligned by thread_load().
6487c478bd9Sstevel@tonic-gate	 */
6497c478bd9Sstevel@tonic-gate	andq	$_BITNOT(STACK_ALIGN-1), %rsp
6507c478bd9Sstevel@tonic-gate
6517c478bd9Sstevel@tonic-gate	/*
6527c478bd9Sstevel@tonic-gate	 * Set the idle thread as the current thread.
6537c478bd9Sstevel@tonic-gate	 */
6547c478bd9Sstevel@tonic-gate	movq	%rax, %gs:CPU_THREAD
6557c478bd9Sstevel@tonic-gate
6567c478bd9Sstevel@tonic-gate	/* switch in the hat context for the new thread */
6577c478bd9Sstevel@tonic-gate	GET_THREAD_HATP(%rdi, %r12, %r11)
6587c478bd9Sstevel@tonic-gate	call	hat_switch
6597c478bd9Sstevel@tonic-gate
6607c478bd9Sstevel@tonic-gate	/*
6617c478bd9Sstevel@tonic-gate	 * Put the zombie on death-row.
6627c478bd9Sstevel@tonic-gate	 */
6637c478bd9Sstevel@tonic-gate	movq	%r13, %rdi
6647c478bd9Sstevel@tonic-gate	call	reapq_add
6657c478bd9Sstevel@tonic-gate
6667c478bd9Sstevel@tonic-gate	jmp	_resume_from_idle	/* finish job of resume */
6677c478bd9Sstevel@tonic-gate
6687c478bd9Sstevel@tonic-gateresume_from_zombie_return:
6697c478bd9Sstevel@tonic-gate	RESTORE_REGS(%r11)		/* restore non-volatile registers */
6707c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
6717c478bd9Sstevel@tonic-gate
6727c478bd9Sstevel@tonic-gate	/*
6737c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
6747c478bd9Sstevel@tonic-gate	 */
6757c478bd9Sstevel@tonic-gate	addq	$CLONGSIZE, %rsp
6767c478bd9Sstevel@tonic-gate	ret
6777c478bd9Sstevel@tonic-gate	SET_SIZE(resume_from_zombie)
6787c478bd9Sstevel@tonic-gate
6797c478bd9Sstevel@tonic-gate#elif defined (__i386)
6807c478bd9Sstevel@tonic-gate
6817c478bd9Sstevel@tonic-gate	ENTRY(resume_from_zombie)
6827c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
6837c478bd9Sstevel@tonic-gate	movl	$resume_from_zombie_return, %ecx
6847c478bd9Sstevel@tonic-gate
6857c478bd9Sstevel@tonic-gate	/*
6867c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
6877c478bd9Sstevel@tonic-gate	 * thread to resume_from_zombie_return.
6887c478bd9Sstevel@tonic-gate	 *
6897c478bd9Sstevel@tonic-gate	 * %edi = t (new thread) when done.
6907c478bd9Sstevel@tonic-gate	 */
6917c478bd9Sstevel@tonic-gate	SAVE_REGS(%eax, %ecx)
6927c478bd9Sstevel@tonic-gate
693ae115bc7Smrj#ifdef DEBUG
694ae115bc7Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
695ae115bc7Smrj#endif
6967c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
6977c478bd9Sstevel@tonic-gate
6987c478bd9Sstevel@tonic-gate	/* clean up the fp unit. It might be left enabled */
699ae115bc7Smrj
7007c478bd9Sstevel@tonic-gate	movl	%cr0, %eax
7017c478bd9Sstevel@tonic-gate	testl	$CR0_TS, %eax
7027c478bd9Sstevel@tonic-gate	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
7037c478bd9Sstevel@tonic-gate	fninit				/* init fpu & discard pending error */
7047c478bd9Sstevel@tonic-gate	orl	$CR0_TS, %eax
7057c478bd9Sstevel@tonic-gate	movl	%eax, %cr0
7067c478bd9Sstevel@tonic-gate.zfpu_disabled:
707ae115bc7Smrj
7087c478bd9Sstevel@tonic-gate	/*
7097c478bd9Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack so that the zombie
7107c478bd9Sstevel@tonic-gate	 * thread's stack can be reclaimed by the reaper.
7117c478bd9Sstevel@tonic-gate	 */
7127c478bd9Sstevel@tonic-gate	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
7137c478bd9Sstevel@tonic-gate	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
7147c478bd9Sstevel@tonic-gate
7157c478bd9Sstevel@tonic-gate	/*
7167c478bd9Sstevel@tonic-gate	 * Set the idle thread as the current thread.
7177c478bd9Sstevel@tonic-gate	 */
7187c478bd9Sstevel@tonic-gate	movl	%eax, %gs:CPU_THREAD
7197c478bd9Sstevel@tonic-gate
720ae115bc7Smrj	/*
721ae115bc7Smrj	 * switch in the hat context for the new thread
722ae115bc7Smrj	 */
7237c478bd9Sstevel@tonic-gate	GET_THREAD_HATP(%ecx, %edi, %ecx)
7247c478bd9Sstevel@tonic-gate	pushl	%ecx
7257c478bd9Sstevel@tonic-gate	call	hat_switch
7267c478bd9Sstevel@tonic-gate	addl	$4, %esp
727ae115bc7Smrj
7287c478bd9Sstevel@tonic-gate	/*
7297c478bd9Sstevel@tonic-gate	 * Put the zombie on death-row.
7307c478bd9Sstevel@tonic-gate	 */
7317c478bd9Sstevel@tonic-gate	pushl	%esi
7327c478bd9Sstevel@tonic-gate	call	reapq_add
7337c478bd9Sstevel@tonic-gate	addl	$4, %esp
7347c478bd9Sstevel@tonic-gate	jmp	_resume_from_idle	/* finish job of resume */
7357c478bd9Sstevel@tonic-gate
7367c478bd9Sstevel@tonic-gateresume_from_zombie_return:
7377c478bd9Sstevel@tonic-gate	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
7387c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
7397c478bd9Sstevel@tonic-gate
7407c478bd9Sstevel@tonic-gate	/*
7417c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
7427c478bd9Sstevel@tonic-gate	 */
7437c478bd9Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
7447c478bd9Sstevel@tonic-gate	ret
7457c478bd9Sstevel@tonic-gate	SET_SIZE(resume_from_zombie)
7467c478bd9Sstevel@tonic-gate
7477c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
7487c478bd9Sstevel@tonic-gate#endif	/* __lint */
7497c478bd9Sstevel@tonic-gate
7507c478bd9Sstevel@tonic-gate#if defined(__lint)
7517c478bd9Sstevel@tonic-gate
7527c478bd9Sstevel@tonic-gate/* ARGSUSED */
7537c478bd9Sstevel@tonic-gatevoid
7547c478bd9Sstevel@tonic-gateresume_from_intr(kthread_t *t)
7557c478bd9Sstevel@tonic-gate{}
7567c478bd9Sstevel@tonic-gate
7577c478bd9Sstevel@tonic-gate#else	/* __lint */
7587c478bd9Sstevel@tonic-gate
7597c478bd9Sstevel@tonic-gate#if defined(__amd64)
7607c478bd9Sstevel@tonic-gate
7617c478bd9Sstevel@tonic-gate	ENTRY(resume_from_intr)
7627c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
7637c478bd9Sstevel@tonic-gate	leaq	resume_from_intr_return(%rip), %r11
7647c478bd9Sstevel@tonic-gate
7657c478bd9Sstevel@tonic-gate	/*
7667c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
7677c478bd9Sstevel@tonic-gate	 * thread to resume_from_intr_return.
7687c478bd9Sstevel@tonic-gate	 *
7697c478bd9Sstevel@tonic-gate	 * %r12 = t (new thread) when done
7707c478bd9Sstevel@tonic-gate	 */
7717c478bd9Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
7727c478bd9Sstevel@tonic-gate
7737c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
7747c478bd9Sstevel@tonic-gate	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
775*49dc33e3SBryan Cantrill	mfence				/* synchronize with mutex_exit() */
7767c478bd9Sstevel@tonic-gate	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
7777c478bd9Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
7787c478bd9Sstevel@tonic-gate
7797c478bd9Sstevel@tonic-gate	/*
7807c478bd9Sstevel@tonic-gate	 * Unlock outgoing thread's mutex dispatched by another processor.
7817c478bd9Sstevel@tonic-gate	 */
7827c478bd9Sstevel@tonic-gate	xorl	%eax, %eax
7837c478bd9Sstevel@tonic-gate	xchgb	%al, T_LOCK(%r13)
7847c478bd9Sstevel@tonic-gate
785843e1988Sjohnlev	STORE_INTR_START(%r12)
7867c478bd9Sstevel@tonic-gate
7877c478bd9Sstevel@tonic-gate	/*
7887c478bd9Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
7897c478bd9Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
7907c478bd9Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
7917c478bd9Sstevel@tonic-gate	 */
7927c478bd9Sstevel@tonic-gate	movq	T_PC(%r12), %rax	/* saved return addr */
7937c478bd9Sstevel@tonic-gate	RESTORE_REGS(%r11);
7947c478bd9Sstevel@tonic-gate	pushq	%rax			/* push return address for spl0() */
7957c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
7967c478bd9Sstevel@tonic-gate	jmp	spl0
7977c478bd9Sstevel@tonic-gate
7987c478bd9Sstevel@tonic-gateresume_from_intr_return:
7997c478bd9Sstevel@tonic-gate	/*
8007c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
8017c478bd9Sstevel@tonic-gate	 */
8027c478bd9Sstevel@tonic-gate	addq 	$CLONGSIZE, %rsp
8037c478bd9Sstevel@tonic-gate	ret
8047c478bd9Sstevel@tonic-gate	SET_SIZE(resume_from_intr)
8057c478bd9Sstevel@tonic-gate
8067c478bd9Sstevel@tonic-gate#elif defined (__i386)
8077c478bd9Sstevel@tonic-gate
8087c478bd9Sstevel@tonic-gate	ENTRY(resume_from_intr)
8097c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
8107c478bd9Sstevel@tonic-gate	movl	$resume_from_intr_return, %ecx
8117c478bd9Sstevel@tonic-gate
8127c478bd9Sstevel@tonic-gate	/*
8137c478bd9Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
8147c478bd9Sstevel@tonic-gate	 * thread to resume_return.
8157c478bd9Sstevel@tonic-gate	 *
8167c478bd9Sstevel@tonic-gate	 * %edi = t (new thread) when done.
8177c478bd9Sstevel@tonic-gate	 */
8187c478bd9Sstevel@tonic-gate	SAVE_REGS(%eax, %ecx)
8197c478bd9Sstevel@tonic-gate
820ae115bc7Smrj#ifdef DEBUG
821ae115bc7Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
822ae115bc7Smrj#endif
8237c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
8247c478bd9Sstevel@tonic-gate	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
825*49dc33e3SBryan Cantrill	mfence				/* synchronize with mutex_exit() */
8267c478bd9Sstevel@tonic-gate	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
8277c478bd9Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
8287c478bd9Sstevel@tonic-gate
8297c478bd9Sstevel@tonic-gate	/*
8307c478bd9Sstevel@tonic-gate	 * Unlock outgoing thread's mutex dispatched by another processor.
8317c478bd9Sstevel@tonic-gate	 */
8327c478bd9Sstevel@tonic-gate	xorl	%eax,%eax
8337c478bd9Sstevel@tonic-gate	xchgb	%al, T_LOCK(%esi)
8347c478bd9Sstevel@tonic-gate
835843e1988Sjohnlev	STORE_INTR_START(%edi)
836843e1988Sjohnlev
8377c478bd9Sstevel@tonic-gate	/*
8387c478bd9Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
8397c478bd9Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
8407c478bd9Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
8417c478bd9Sstevel@tonic-gate	 */
8427c478bd9Sstevel@tonic-gate	movl	T_PC(%edi), %eax	/* saved return addr */
8437c478bd9Sstevel@tonic-gate	RESTORE_REGS(%ecx)
8447c478bd9Sstevel@tonic-gate	pushl	%eax			/* push return address for spl0() */
8457c478bd9Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
8467c478bd9Sstevel@tonic-gate	jmp	spl0
8477c478bd9Sstevel@tonic-gate
8487c478bd9Sstevel@tonic-gateresume_from_intr_return:
8497c478bd9Sstevel@tonic-gate	/*
8507c478bd9Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
8517c478bd9Sstevel@tonic-gate	 */
8527c478bd9Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
8537c478bd9Sstevel@tonic-gate	ret
8547c478bd9Sstevel@tonic-gate	SET_SIZE(resume_from_intr)
8557c478bd9Sstevel@tonic-gate
8567c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
8577c478bd9Sstevel@tonic-gate#endif /* __lint */
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate#if defined(__lint)
8607c478bd9Sstevel@tonic-gate
8617c478bd9Sstevel@tonic-gatevoid
8627c478bd9Sstevel@tonic-gatethread_start(void)
8637c478bd9Sstevel@tonic-gate{}
8647c478bd9Sstevel@tonic-gate
8657c478bd9Sstevel@tonic-gate#else   /* __lint */
8667c478bd9Sstevel@tonic-gate
8677c478bd9Sstevel@tonic-gate#if defined(__amd64)
8687c478bd9Sstevel@tonic-gate
8697c478bd9Sstevel@tonic-gate	ENTRY(thread_start)
8707c478bd9Sstevel@tonic-gate	popq	%rax		/* start() */
8717c478bd9Sstevel@tonic-gate	popq	%rdi		/* arg */
8727c478bd9Sstevel@tonic-gate	popq	%rsi		/* len */
8737c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
8747c478bd9Sstevel@tonic-gate	call	*%rax
8757c478bd9Sstevel@tonic-gate	call	thread_exit	/* destroy thread if it returns. */
8767c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
8777c478bd9Sstevel@tonic-gate	SET_SIZE(thread_start)
8787c478bd9Sstevel@tonic-gate
8797c478bd9Sstevel@tonic-gate#elif defined(__i386)
8807c478bd9Sstevel@tonic-gate
8817c478bd9Sstevel@tonic-gate	ENTRY(thread_start)
8827c478bd9Sstevel@tonic-gate	popl	%eax
8837c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
8847c478bd9Sstevel@tonic-gate	addl	$8, %ebp
8857c478bd9Sstevel@tonic-gate	call	*%eax
8867c478bd9Sstevel@tonic-gate	addl	$8, %esp
8877c478bd9Sstevel@tonic-gate	call	thread_exit	/* destroy thread if it returns. */
8887c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
8897c478bd9Sstevel@tonic-gate	SET_SIZE(thread_start)
8907c478bd9Sstevel@tonic-gate
8917c478bd9Sstevel@tonic-gate#endif	/* __i386 */
8927c478bd9Sstevel@tonic-gate
8937c478bd9Sstevel@tonic-gate#endif  /* __lint */
894