xref: /titanic_51/usr/src/uts/intel/ia32/ml/swtch.s (revision 40e5e17b3361b3eea56a9723071c406894a20b78)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Process switching routines.
30 */
31
32#if defined(__lint)
33#include <sys/thread.h>
34#include <sys/systm.h>
35#include <sys/time.h>
36#else	/* __lint */
37#include "assym.h"
38#endif	/* __lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/asm_misc.h>
42#include <sys/regset.h>
43#include <sys/privregs.h>
44#include <sys/stack.h>
45#include <sys/segments.h>
46
47/*
48 * resume(thread_id_t t);
49 *
50 * a thread can only run on one processor at a time. there
51 * exists a window on MPs where the current thread on one
52 * processor is capable of being dispatched by another processor.
53 * some overlap between outgoing and incoming threads can happen
54 * when they are the same thread. in this case where the threads
55 * are the same, resume() on one processor will spin on the incoming
56 * thread until resume() on the other processor has finished with
57 * the outgoing thread.
58 *
59 * The MMU context changes when the resuming thread resides in a different
60 * process.  Kernel threads are known by resume to reside in process 0.
61 * The MMU context, therefore, only changes when resuming a thread in
62 * a process different from curproc.
63 *
64 * resume_from_intr() is called when the thread being resumed was not
65 * passivated by resume (e.g. was interrupted).  This means that the
66 * resume lock is already held and that a restore context is not needed.
67 * Also, the MMU context is not changed on the resume in this case.
68 *
69 * resume_from_zombie() is the same as resume except the calling thread
70 * is a zombie and must be put on the deathrow list after the CPU is
71 * off the stack.
72 */
73
74#if !defined(__lint)
75
76#if LWP_PCB_FPU != 0
77#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
78#endif	/* LWP_PCB_FPU != 0 */
79
80#endif	/* !__lint */
81
82#if defined(__amd64)
83
84/*
85 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
86 *
87 * The stack frame must be created before the save of %rsp so that tracebacks
88 * of swtch()ed-out processes show the process as having last called swtch().
89 */
90#define SAVE_REGS(thread_t, retaddr)			\
91	movq	%rbp, T_RBP(thread_t);			\
92	movq	%rbx, T_RBX(thread_t);			\
93	movq	%r12, T_R12(thread_t);			\
94	movq	%r13, T_R13(thread_t);			\
95	movq	%r14, T_R14(thread_t);			\
96	movq	%r15, T_R15(thread_t);			\
97	pushq	%rbp;					\
98	movq	%rsp, %rbp;				\
99	movq	%rsp, T_SP(thread_t);			\
100	movq	retaddr, T_PC(thread_t);		\
101	movq	%rdi, %r12;				\
102	call	__dtrace_probe___sched_off__cpu
103
104/*
105 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
106 *
107 * We load up %rsp from the label_t as part of the context switch, so
108 * we don't repeat that here.
109 *
110 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
111 * already has the effect of putting the stack back the way it was when
112 * we came in.
113 */
114#define RESTORE_REGS(scratch_reg)			\
115	movq	%gs:CPU_THREAD, scratch_reg;		\
116	movq	T_RBP(scratch_reg), %rbp;		\
117	movq	T_RBX(scratch_reg), %rbx;		\
118	movq	T_R12(scratch_reg), %r12;		\
119	movq	T_R13(scratch_reg), %r13;		\
120	movq	T_R14(scratch_reg), %r14;		\
121	movq	T_R15(scratch_reg), %r15
122
123/*
124 * Get pointer to a thread's hat structure
125 */
126#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
127	movq	T_PROCP(thread_t), hatp;		\
128	movq	P_AS(hatp), scratch_reg;		\
129	movq	A_HAT(scratch_reg), hatp
130
131#elif defined (__i386)
132
133/*
134 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
135 *
136 * The stack frame must be created before the save of %esp so that tracebacks
137 * of swtch()ed-out processes show the process as having last called swtch().
138 */
139#define SAVE_REGS(thread_t, retaddr)			\
140	movl	%ebp, T_EBP(thread_t);			\
141	movl	%ebx, T_EBX(thread_t);			\
142	movl	%esi, T_ESI(thread_t);			\
143	movl	%edi, T_EDI(thread_t);			\
144	pushl	%ebp;					\
145	movl	%esp, %ebp;				\
146	movl	%esp, T_SP(thread_t);			\
147	movl	retaddr, T_PC(thread_t);		\
148	movl	8(%ebp), %edi;				\
149	pushl	%edi;					\
150	call	__dtrace_probe___sched_off__cpu;	\
151	addl	$CLONGSIZE, %esp
152
153/*
154 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
155 *
156 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
157 * already has the effect of putting the stack back the way it was when
158 * we came in.
159 */
160#define RESTORE_REGS(scratch_reg)			\
161	movl	%gs:CPU_THREAD, scratch_reg;		\
162	movl	T_EBP(scratch_reg), %ebp;		\
163	movl	T_EBX(scratch_reg), %ebx;		\
164	movl	T_ESI(scratch_reg), %esi;		\
165	movl	T_EDI(scratch_reg), %edi
166
167/*
168 * Get pointer to a thread's hat structure
169 */
170#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
171	movl	T_PROCP(thread_t), hatp;		\
172	movl	P_AS(hatp), scratch_reg;		\
173	movl	A_HAT(scratch_reg), hatp
174
175#endif	/* __amd64 */
176
177#if defined(__lint)
178
179/* ARGSUSED */
180void
181resume(kthread_t *t)
182{}
183
184#else	/* __lint */
185
186#if defined(__amd64)
187
188	ENTRY(resume)
189	movq	%gs:CPU_THREAD, %rax
190	leaq	resume_return(%rip), %r11
191
192	/*
193	 * Save non-volatile registers, and set return address for current
194	 * thread to resume_return.
195	 *
196	 * %r12 = t (new thread) when done
197	 */
198	SAVE_REGS(%rax, %r11)
199
200	LOADCPU(%r15)				/* %r15 = CPU */
201	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
202
203	/*
204	 * Call savectx if thread has installed context ops.
205	 *
206	 * Note that if we have floating point context, the save op
207	 * (either fpsave_begin or fpxsave_begin) will issue the
208	 * async save instruction (fnsave or fxsave respectively)
209	 * that we fwait for below.
210	 */
211	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
212	je	.nosavectx		/* skip call when zero */
213
214	movq	%r13, %rdi		/* arg = thread pointer */
215	call	savectx			/* call ctx ops */
216.nosavectx:
217
218        /*
219         * Call savepctx if process has installed context ops.
220         */
221	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
222        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
223        je      .nosavepctx              /* skip call when zero */
224
225        movq    %r14, %rdi              /* arg = proc pointer */
226        call    savepctx                 /* call ctx ops */
227.nosavepctx:
228
229	/*
230	 * Temporarily switch to the idle thread's stack
231	 */
232	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
233
234	/*
235	 * Set the idle thread as the current thread
236	 */
237	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
238	movq	%rax, CPU_THREAD(%r15)
239
240	/*
241	 * Switch in the hat context for the new thread
242	 *
243	 */
244	GET_THREAD_HATP(%rdi, %r12, %r11)
245	call	hat_switch
246
247	/*
248	 * Clear and unlock previous thread's t_lock
249	 * to allow it to be dispatched by another processor.
250	 */
251	movb	$0, T_LOCK(%r13)
252
253	/*
254	 * IMPORTANT: Registers at this point must be:
255	 *       %r12 = new thread
256	 *
257	 * Here we are in the idle thread, have dropped the old thread.
258	 */
259	ALTENTRY(_resume_from_idle)
260	/*
261	 * spin until dispatched thread's mutex has
262	 * been unlocked. this mutex is unlocked when
263	 * it becomes safe for the thread to run.
264	 */
265.lock_thread_mutex:
266	lock
267	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
268	jnc	.thread_mutex_locked	/* got it */
269
270.spin_thread_mutex:
271	pause
272	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
273	jz	.lock_thread_mutex	/* clear, retry lock */
274	jmp	.spin_thread_mutex	/* still locked, spin... */
275
276.thread_mutex_locked:
277	/*
278	 * Fix CPU structure to indicate new running thread.
279	 * Set pointer in new thread to the CPU structure.
280	 */
281	LOADCPU(%r13)			/* load current CPU pointer */
282	cmpq	%r13, T_CPU(%r12)
283	je	.setup_cpu
284
285	/* cp->cpu_stats.sys.cpumigrate++ */
286	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
287	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
288
289.setup_cpu:
290	/*
291	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
292	 * (Note: Since we don't have saved 'regs' structure for all
293	 *	  the threads we can't easily determine if we need to
294	 *	  change rsp0. So, we simply change the rsp0 to bottom
295	 *	  of the thread stack and it will work for all cases.)
296	 *
297	 * XX64 - Is this correct?
298	 */
299	movq	CPU_TSS(%r13), %r14
300	movq	T_STACK(%r12), %rax
301	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
302	movq	%rax, TSS_RSP0(%r14)
303
304	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
305	xorl	%ebp, %ebp		/* make $<threadlist behave better */
306	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
307	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
308
309	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
310	movq	T_PC(%r12), %r13	/* saved return addr */
311
312	/*
313	 * Call restorectx if context ops have been installed.
314	 */
315	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
316	jz	.norestorectx		/* skip call when zero */
317	movq	%r12, %rdi		/* arg = thread pointer */
318	call	restorectx		/* call ctx ops */
319.norestorectx:
320
321	/*
322	 * Call restorepctx if context ops have been installed for the proc.
323	 */
324	movq	T_PROCP(%r12), %rcx
325	cmpq	$0, P_PCTX(%rcx)
326	jz	.norestorepctx
327	movq	%rcx, %rdi
328	call	restorepctx
329.norestorepctx:
330
331	/*
332	 * If we are resuming an interrupt thread, store a timestamp
333	 * in the thread structure.
334	 */
335	testw	$T_INTR_THREAD, T_FLAGS(%r12)
336	jz	1f
337
3380:
339	/*
340	 * If an interrupt occurs between the rdtsc instruction and its
341	 * subsequent store, the timestamp will be stale by the time it is
342	 * stored. We can detect this by doing a compare-and-swap on the
343	 * thread's timestamp, since any interrupt occurring in this window
344	 * will put a new timestamp in the thread's t_intr_start field.
345	 */
346	movq	T_INTR_START(%r12), %rcx
347	rdtsc
348
349	/*
350	 * After rdtsc:
351	 *     High 32 bits of TC are in %edx
352	 *     Low 32 bits of TC are in %eax
353	 */
354	shlq	$32, %rdx
355	movl	%eax, %r14d
356	orq	%rdx, %r14
357	movq	%rcx, %rax
358	cmpxchgq %r14, T_INTR_START(%r12)
359	jnz	0b
3601:
361	/*
362	 * Restore non-volatile registers, then have spl0 return to the
363	 * resuming thread's PC after first setting the priority as low as
364	 * possible and blocking all interrupt threads that may be active.
365	 */
366	movq	%r13, %rax	/* save return address */
367	RESTORE_REGS(%r11)
368	pushq	%rax		/* push return address for spl0() */
369	call	__dtrace_probe___sched_on__cpu
370	jmp	spl0
371
372resume_return:
373	/*
374	 * Remove stack frame created in SAVE_REGS()
375	 */
376	addq	$CLONGSIZE, %rsp
377	ret
378	SET_SIZE(_resume_from_idle)
379	SET_SIZE(resume)
380
381#elif defined (__i386)
382
383	ENTRY(resume)
384	movl	%gs:CPU_THREAD, %eax
385	movl	$resume_return, %ecx
386
387	/*
388	 * Save non-volatile registers, and set return address for current
389	 * thread to resume_return.
390	 *
391	 * %edi = t (new thread) when done.
392	 */
393	SAVE_REGS(%eax,  %ecx)
394
395	LOADCPU(%ebx)			/* %ebx = CPU */
396	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
397
398#ifdef DEBUG
399	call	assert_ints_enabled	/* panics if we are cli'd */
400#endif
401	/*
402	 * Call savectx if thread has installed context ops.
403	 *
404	 * Note that if we have floating point context, the save op
405	 * (either fpsave_begin or fpxsave_begin) will issue the
406	 * async save instruction (fnsave or fxsave respectively)
407	 * that we fwait for below.
408	 */
409	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
410	testl	%eax, %eax
411	jz	.nosavectx		/* skip call when zero */
412	pushl	%esi			/* arg = thread pointer */
413	call	savectx			/* call ctx ops */
414	addl	$4, %esp		/* restore stack pointer */
415.nosavectx:
416
417        /*
418         * Call savepctx if process has installed context ops.
419         */
420	movl	T_PROCP(%esi), %eax	/* %eax = proc */
421	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
422	je	.nosavepctx		/* skip call when zero */
423	pushl	%eax			/* arg = proc pointer */
424	call	savepctx		/* call ctx ops */
425	addl	$4, %esp
426.nosavepctx:
427
428	/*
429	 * Temporarily switch to the idle thread's stack
430	 */
431	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
432
433	/*
434	 * Set the idle thread as the current thread
435	 */
436	movl	T_SP(%eax), %esp	/* It is safe to set esp */
437	movl	%eax, CPU_THREAD(%ebx)
438
439	/* switch in the hat context for the new thread */
440	GET_THREAD_HATP(%ecx, %edi, %ecx)
441	pushl	%ecx
442	call	hat_switch
443	addl	$4, %esp
444
445	/*
446	 * Clear and unlock previous thread's t_lock
447	 * to allow it to be dispatched by another processor.
448	 */
449	movb	$0, T_LOCK(%esi)
450
451	/*
452	 * IMPORTANT: Registers at this point must be:
453	 *       %edi = new thread
454	 *
455	 * Here we are in the idle thread, have dropped the old thread.
456	 */
457	ALTENTRY(_resume_from_idle)
458	/*
459	 * spin until dispatched thread's mutex has
460	 * been unlocked. this mutex is unlocked when
461	 * it becomes safe for the thread to run.
462	 */
463.L4:
464	lock
465	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
466	jc	.L4_2			/* lock did not succeed */
467
468	/*
469	 * Fix CPU structure to indicate new running thread.
470	 * Set pointer in new thread to the CPU structure.
471	 */
472	LOADCPU(%esi)			/* load current CPU pointer */
473	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
474					/* Pentium. Used few lines below */
475	cmpl	%esi, T_CPU(%edi)
476	jne	.L5_2
477.L5_1:
478	/*
479	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
480	 * (Note: Since we don't have saved 'regs' structure for all
481	 *	  the threads we can't easily determine if we need to
482	 *	  change esp0. So, we simply change the esp0 to bottom
483	 *	  of the thread stack and it will work for all cases.)
484	 */
485	movl	CPU_TSS(%esi), %ecx
486	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
487	movl	%eax, TSS_ESP0(%ecx)
488
489	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
490	xorl	%ebp, %ebp		/* make $<threadlist behave better */
491	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
492	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
493
494	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
495	movl	T_PC(%edi), %esi	/* saved return addr */
496
497	/*
498	 * Call restorectx if context ops have been installed.
499	 */
500	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
501	testl	%eax, %eax
502	jz	.norestorectx		/* skip call when zero */
503	pushl	%edi			/* arg = thread pointer */
504	call	restorectx		/* call ctx ops */
505	addl	$4, %esp		/* restore stack pointer */
506.norestorectx:
507
508	/*
509	 * Call restorepctx if context ops have been installed for the proc.
510	 */
511	movl	T_PROCP(%edi), %eax
512	cmpl	$0, P_PCTX(%eax)
513	je	.norestorepctx
514	pushl	%eax			/* arg = proc pointer */
515	call	restorepctx
516	addl	$4, %esp		/* restore stack pointer */
517.norestorepctx:
518
519	/*
520	 * If we are resuming an interrupt thread, store a timestamp
521	 * in the thread structure.
522	 */
523	testw	$T_INTR_THREAD, T_FLAGS(%edi)
524	jz	1f
525	pushl	%ecx
5260:
527	/*
528	 * If an interrupt occurs between the rdtsc instruction and its
529	 * subsequent store, the timestamp will be stale by the time it is
530	 * stored. We can detect this by doing a compare-and-swap on the
531	 * thread's timestamp, since any interrupt occurring in this window
532	 * will put a new timestamp in the thread's t_intr_start field.
533	 */
534	pushl	T_INTR_START(%edi)
535	pushl	T_INTR_START+4(%edi)
536	.globl	_tsc_patch15
537_tsc_patch15:
538	nop; nop			/* patched to rdtsc if available */
539	movl	%eax, %ebx
540	movl	%edx, %ecx
541	popl	%edx
542	popl	%eax
543	cmpxchg8b T_INTR_START(%edi)
544	jnz	0b
545	popl	%ecx
5461:
547	/*
548	 * Restore non-volatile registers, then have spl0 return to the
549	 * resuming thread's PC after first setting the priority as low as
550	 * possible and blocking all interrupt threads that may be active.
551	 */
552	movl	%esi, %eax		/* save return address */
553	RESTORE_REGS(%ecx)
554	pushl	%eax			/* push return address for spl0() */
555	call	__dtrace_probe___sched_on__cpu
556	jmp	spl0
557
558resume_return:
559	/*
560	 * Remove stack frame created in SAVE_REGS()
561	 */
562	addl	$CLONGSIZE, %esp
563	ret
564
565.L4_2:
566	pause
567	cmpb	$0, T_LOCK(%edi)
568	je	.L4
569	jmp	.L4_2
570
571.L5_2:
572	/* cp->cpu_stats.sys.cpumigrate++ */
573	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
574	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
575	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
576	jmp	.L5_1
577
578	SET_SIZE(_resume_from_idle)
579	SET_SIZE(resume)
580
581#endif	/* __amd64 */
582#endif	/* __lint */
583
584#if defined(__lint)
585
586/* ARGSUSED */
587void
588resume_from_zombie(kthread_t *t)
589{}
590
591#else	/* __lint */
592
593#if defined(__amd64)
594
595	ENTRY(resume_from_zombie)
596	movq	%gs:CPU_THREAD, %rax
597	leaq	resume_from_zombie_return(%rip), %r11
598
599	/*
600	 * Save non-volatile registers, and set return address for current
601	 * thread to resume_from_zombie_return.
602	 *
603	 * %r12 = t (new thread) when done
604	 */
605	SAVE_REGS(%rax, %r11)
606
607	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
608
609	/* clean up the fp unit. It might be left enabled */
610	movq	%cr0, %rax
611	testq	$CR0_TS, %rax
612	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
613	fninit				/* init fpu & discard pending error */
614	orq	$CR0_TS, %rax
615	movq	%rax, %cr0
616.zfpu_disabled:
617
618	/*
619	 * Temporarily switch to the idle thread's stack so that the zombie
620	 * thread's stack can be reclaimed by the reaper.
621	 */
622	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
623	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
624
625	/*
626	 * Sigh. If the idle thread has never run thread_start()
627	 * then t_sp is mis-aligned by thread_load().
628	 */
629	andq	$_BITNOT(STACK_ALIGN-1), %rsp
630
631	/*
632	 * Set the idle thread as the current thread.
633	 */
634	movq	%rax, %gs:CPU_THREAD
635
636	/* switch in the hat context for the new thread */
637	GET_THREAD_HATP(%rdi, %r12, %r11)
638	call	hat_switch
639
640	/*
641	 * Put the zombie on death-row.
642	 */
643	movq	%r13, %rdi
644	call	reapq_add
645
646	jmp	_resume_from_idle	/* finish job of resume */
647
648resume_from_zombie_return:
649	RESTORE_REGS(%r11)		/* restore non-volatile registers */
650	call	__dtrace_probe___sched_on__cpu
651
652	/*
653	 * Remove stack frame created in SAVE_REGS()
654	 */
655	addq	$CLONGSIZE, %rsp
656	ret
657	SET_SIZE(resume_from_zombie)
658
659#elif defined (__i386)
660
661	ENTRY(resume_from_zombie)
662	movl	%gs:CPU_THREAD, %eax
663	movl	$resume_from_zombie_return, %ecx
664
665	/*
666	 * Save non-volatile registers, and set return address for current
667	 * thread to resume_from_zombie_return.
668	 *
669	 * %edi = t (new thread) when done.
670	 */
671	SAVE_REGS(%eax, %ecx)
672
673#ifdef DEBUG
674	call	assert_ints_enabled	/* panics if we are cli'd */
675#endif
676	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
677
678	/* clean up the fp unit. It might be left enabled */
679
680	movl	%cr0, %eax
681	testl	$CR0_TS, %eax
682	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
683	fninit				/* init fpu & discard pending error */
684	orl	$CR0_TS, %eax
685	movl	%eax, %cr0
686.zfpu_disabled:
687
688	/*
689	 * Temporarily switch to the idle thread's stack so that the zombie
690	 * thread's stack can be reclaimed by the reaper.
691	 */
692	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
693	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
694
695	/*
696	 * Set the idle thread as the current thread.
697	 */
698	movl	%eax, %gs:CPU_THREAD
699
700	/*
701	 * switch in the hat context for the new thread
702	 */
703	GET_THREAD_HATP(%ecx, %edi, %ecx)
704	pushl	%ecx
705	call	hat_switch
706	addl	$4, %esp
707
708	/*
709	 * Put the zombie on death-row.
710	 */
711	pushl	%esi
712	call	reapq_add
713	addl	$4, %esp
714	jmp	_resume_from_idle	/* finish job of resume */
715
716resume_from_zombie_return:
717	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
718	call	__dtrace_probe___sched_on__cpu
719
720	/*
721	 * Remove stack frame created in SAVE_REGS()
722	 */
723	addl	$CLONGSIZE, %esp
724	ret
725	SET_SIZE(resume_from_zombie)
726
727#endif	/* __amd64 */
728#endif	/* __lint */
729
730#if defined(__lint)
731
732/* ARGSUSED */
733void
734resume_from_intr(kthread_t *t)
735{}
736
737#else	/* __lint */
738
739#if defined(__amd64)
740
741	ENTRY(resume_from_intr)
742	movq	%gs:CPU_THREAD, %rax
743	leaq	resume_from_intr_return(%rip), %r11
744
745	/*
746	 * Save non-volatile registers, and set return address for current
747	 * thread to resume_from_intr_return.
748	 *
749	 * %r12 = t (new thread) when done
750	 */
751	SAVE_REGS(%rax, %r11)
752
753	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
754	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
755	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
756	xorl	%ebp, %ebp		/* make $<threadlist behave better */
757
758	/*
759	 * Unlock outgoing thread's mutex dispatched by another processor.
760	 */
761	xorl	%eax, %eax
762	xchgb	%al, T_LOCK(%r13)
763
764	/*
765	 * If we are resuming an interrupt thread, store a timestamp in
766	 * the thread structure.
767	 */
768	testw	$T_INTR_THREAD, T_FLAGS(%r12)
769	jz	1f
7700:
771	/*
772	 * If an interrupt occurs between the rdtsc instruction and its
773	 * subsequent store, the timestamp will be stale by the time it is
774	 * stored. We can detect this by doing a compare-and-swap on the
775	 * thread's timestamp, since any interrupt occurring in this window
776	 * will put a new timestamp in the thread's t_intr_start field.
777	 */
778	movq	T_INTR_START(%r12), %rcx
779	rdtsc
780
781	/*
782	 * After rdtsc:
783	 *     High 32 bits of TC are in %edx
784	 *     Low 32 bits of TC are in %eax
785	 */
786	shlq	$32, %rdx
787	movl	%eax, %r14d
788	orq	%rdx, %r14
789	movq	%rcx, %rax
790	cmpxchgq %r14, T_INTR_START(%r12)
791	jnz	0b
7921:
793	/*
794	 * Restore non-volatile registers, then have spl0 return to the
795	 * resuming thread's PC after first setting the priority as low as
796	 * possible and blocking all interrupt threads that may be active.
797	 */
798	movq	T_PC(%r12), %rax	/* saved return addr */
799	RESTORE_REGS(%r11);
800	pushq	%rax			/* push return address for spl0() */
801	call	__dtrace_probe___sched_on__cpu
802	jmp	spl0
803
804resume_from_intr_return:
805	/*
806	 * Remove stack frame created in SAVE_REGS()
807	 */
808	addq 	$CLONGSIZE, %rsp
809	ret
810	SET_SIZE(resume_from_intr)
811
812#elif defined (__i386)
813
814	ENTRY(resume_from_intr)
815	movl	%gs:CPU_THREAD, %eax
816	movl	$resume_from_intr_return, %ecx
817
818	/*
819	 * Save non-volatile registers, and set return address for current
820	 * thread to resume_return.
821	 *
822	 * %edi = t (new thread) when done.
823	 */
824	SAVE_REGS(%eax, %ecx)
825
826#ifdef DEBUG
827	call	assert_ints_enabled	/* panics if we are cli'd */
828#endif
829	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
830	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
831	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
832	xorl	%ebp, %ebp		/* make $<threadlist behave better */
833
834	/*
835	 * Unlock outgoing thread's mutex dispatched by another processor.
836	 */
837	xorl	%eax,%eax
838	xchgb	%al, T_LOCK(%esi)
839
840	/*
841	 * If we are resuming an interrupt thread, store a timestamp in
842	 * the thread structure.
843	 */
844	testw	$T_INTR_THREAD, T_FLAGS(%edi)
845	jz	1f
8460:
847	/*
848	 * If an interrupt occurs between the rdtsc instruction and its
849	 * subsequent store, the timestamp will be stale by the time it is
850	 * stored. We can detect this by doing a compare-and-swap on the
851	 * thread's timestamp, since any interrupt occurring in this window
852	 * will put a new timestamp in the thread's t_intr_start field.
853	 */
854	pushl	T_INTR_START(%edi)
855	pushl	T_INTR_START+4(%edi)
856	.globl	_tsc_patch16
857_tsc_patch16:
858	nop; nop			/* patched to rdtsc if available */
859	movl	%eax, %ebx
860	movl	%edx, %ecx
861	popl	%edx
862	popl	%eax
863	cmpxchg8b T_INTR_START(%edi)
864	jnz	0b
8651:
866	/*
867	 * Restore non-volatile registers, then have spl0 return to the
868	 * resuming thread's PC after first setting the priority as low as
869	 * possible and blocking all interrupt threads that may be active.
870	 */
871	movl	T_PC(%edi), %eax	/* saved return addr */
872	RESTORE_REGS(%ecx)
873	pushl	%eax			/* push return address for spl0() */
874	call	__dtrace_probe___sched_on__cpu
875	jmp	spl0
876
877resume_from_intr_return:
878	/*
879	 * Remove stack frame created in SAVE_REGS()
880	 */
881	addl	$CLONGSIZE, %esp
882	ret
883	SET_SIZE(resume_from_intr)
884
885#endif	/* __amd64 */
886#endif /* __lint */
887
888#if defined(__lint)
889
890void
891thread_start(void)
892{}
893
894#else   /* __lint */
895
896#if defined(__amd64)
897
898	ENTRY(thread_start)
899	popq	%rax		/* start() */
900	popq	%rdi		/* arg */
901	popq	%rsi		/* len */
902	movq	%rsp, %rbp
903	call	*%rax
904	call	thread_exit	/* destroy thread if it returns. */
905	/*NOTREACHED*/
906	SET_SIZE(thread_start)
907
908#elif defined(__i386)
909
910	ENTRY(thread_start)
911	popl	%eax
912	movl	%esp, %ebp
913	addl	$8, %ebp
914	call	*%eax
915	addl	$8, %esp
916	call	thread_exit	/* destroy thread if it returns. */
917	/*NOTREACHED*/
918	SET_SIZE(thread_start)
919
920#endif	/* __i386 */
921
922#endif  /* __lint */
923