xref: /titanic_41/usr/src/uts/sun4/ml/swtch.s (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * Process switching routines.
31 */
32
33#if !defined(lint)
34#include "assym.h"
35#else	/* lint */
36#include <sys/thread.h>
37#endif	/* lint */
38
39#include <sys/param.h>
40#include <sys/asm_linkage.h>
41#include <sys/mmu.h>
42#include <sys/pcb.h>
43#include <sys/machthread.h>
44#include <sys/privregs.h>
45#include <sys/vtrace.h>
46#include <vm/hat_sfmmu.h>
47
48/*
49 * resume(kthread_id_t)
50 *
51 * a thread can only run on one processor at a time. there
52 * exists a window on MPs where the current thread on one
53 * processor is capable of being dispatched by another processor.
54 * some overlap between outgoing and incoming threads can happen
55 * when they are the same thread. in this case where the threads
56 * are the same, resume() on one processor will spin on the incoming
57 * thread until resume() on the other processor has finished with
58 * the outgoing thread.
59 *
60 * The MMU context changes when the resuming thread resides in a different
61 * process.  Kernel threads are known by resume to reside in process 0.
62 * The MMU context, therefore, only changes when resuming a thread in
63 * a process different from curproc.
64 *
65 * resume_from_intr() is called when the thread being resumed was not
66 * passivated by resume (e.g. was interrupted).  This means that the
67 * resume lock is already held and that a restore context is not needed.
68 * Also, the MMU context is not changed on the resume in this case.
69 *
70 * resume_from_zombie() is the same as resume except the calling thread
71 * is a zombie and must be put on the deathrow list after the CPU is
72 * off the stack.
73 */
74
75#if defined(lint)
76
77/* ARGSUSED */
78void
79resume(kthread_id_t t)
80{}
81
82#else	/* lint */
83
84	ENTRY(resume)
85	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
86
87	call	__dtrace_probe___sched_off__cpu	! DTrace probe
88	mov	%i0, %o0			! arg for DTrace probe
89
90	membar	#Sync				! flush writebuffers
91	flushw					! flushes all but this window
92
93	stn	%i7, [THREAD_REG + T_PC]	! save return address
94	stn	%fp, [THREAD_REG + T_SP]	! save sp
95
96	!
97	! Save GSR (Graphics Status Register).
98	!
99	! Read fprs, call fp_save if FPRS_FEF set.
100	! This handles floating-point state saving.
101	! The fprs could be turned on by hw bcopy software,
102	! *or* by fp_disabled. Handle it either way.
103	!
104	ldn	[THREAD_REG + T_LWP], %o4	! get lwp pointer
105	rd	%fprs, %g4			! read fprs
106	brnz,pt	%o4, 0f				! if user thread skip
107	  ldn	[THREAD_REG + T_CPU], %i1	! get CPU pointer
108
109	!
110	! kernel thread
111	!
112	! we save fprs at the beginning the stack so we know
113	! where to check at resume time
114	ldn	[THREAD_REG + T_STACK], %i2
115	ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
116	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
117	bz,pt	%icc, 1f			! nope, skip
118	  st	%g4, [%i2 + SA(MINFRAME) + FPU_FPRS]	! save fprs
119
120	! save kernel fp state in stack
121	add	%i2, SA(MINFRAME), %o0		! o0 = kfpu_t ptr
122	rd	%gsr, %g5
123	call	fp_save
124	stx	%g5, [%o0 + FPU_GSR]		! store GSR
125	ba,a,pt	%icc, 1f
126	  nop
127
1280:
129	! user thread
130	! o4 = lwp ptr
131	! g4 = fprs
132	! i1 = CPU ptr
133	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
134	stn	%fp, [THREAD_REG + T_SP]	! save sp
135	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
136	st	%g4, [%o0 + FPU_FPRS]		! store FPRS
137#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
138	sethi	%hi(fpu_exists), %g5
139	ld	[%g5 + %lo(fpu_exists)], %g5
140	brz,pn	%g5, 1f
141	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
142#endif
143	bz,pt	%icc, 1f			! most apps don't use fp
144	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
145	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
146	rd	%gsr, %g5
147	call	fp_save				! doesn't touch globals
148	stx	%g5, [%o0 + FPU_GSR]		! store GSR
1491:
150	!
151	! Perform context switch callback if set.
152	! This handles coprocessor state saving.
153	! i1 = cpu ptr
154	! g3 = ctx pointer
155	!
156	wr	%g0, %g0, %fprs			! disable fpu and clear fprs
157	brz,pt	%g3, 2f				! skip call when zero
158	ldn	[%i0 + T_PROCP], %i3		! delay slot - get proc pointer
159	call	savectx
160	mov	THREAD_REG, %o0			! delay - arg = thread pointer
1612:
162	ldn	[THREAD_REG + T_PROCP], %i2	! load old curproc - for mmu
163
164	!
165	! Temporarily switch to idle thread's stack
166	!
167	ldn	[%i1 + CPU_IDLE_THREAD], %o0	! idle thread pointer
168	ldn	[%o0 + T_SP], %o1		! get onto idle thread stack
169	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
170	clr	%fp
171
172	!
173	! Set the idle thread as the current thread
174	!
175	mov	THREAD_REG, %l3			! save %g7 (current thread)
176	mov	%o0, THREAD_REG			! set %g7 to idle
177	stn	%o0, [%i1 + CPU_THREAD]		! set CPU's thread to idle
178
179	!
180	! Clear and unlock previous thread's t_lock
181	! to allow it to be dispatched by another processor.
182	!
183	clrb	[%l3 + T_LOCK]			! clear tp->t_lock
184
185	!
186	! IMPORTANT: Registers at this point must be:
187	!	%i0 = new thread
188	!	%i1 = flag (non-zero if unpinning from an interrupt thread)
189	!	%i1 = cpu pointer
190	!	%i2 = old proc pointer
191	!	%i3 = new proc pointer
192	!
193	! Here we are in the idle thread, have dropped the old thread.
194	!
195	ALTENTRY(_resume_from_idle)
196
197	! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
198	SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
199
200	cmp 	%i2, %i3		! resuming the same process?
201	be,pt	%xcc, 5f		! yes.
202	  nop
203	ldx	[%i3 + P_AS], %o0	! load p->p_as
204	ldx	[%o0 + A_HAT], %o3	! load (p->p_as)->a_hat
205	! %o3 is live until the call to sfmmu_setctx_sec below
206
207	!
208	! update cpusran field
209	!
210	ld	[%i1 + CPU_ID], %o4
211	add	%o3, SFMMU_CPUSRAN, %o5
212	CPU_INDEXTOSET(%o5, %o4, %g1)
213	ldx	[%o5], %o2		! o2 = cpusran field
214	mov	1, %g2
215	sllx	%g2, %o4, %o4		! o4 = bit for this cpu
216	andcc	%o4, %o2, %g0
217	bnz,pn	%xcc, 4f
218	  nop
2193:
220	or	%o2, %o4, %o1		! or in this cpu's bit mask
221	casx	[%o5], %o2, %o1
222	cmp	%o2, %o1
223	bne,a,pn %xcc, 3b
224	  ldx	[%o5], %o2		! o2 = cpusran field
225	membar	#LoadLoad|#StoreLoad
226
227	!
228	! Switch to different address space.
229	!
2304:
231	rdpr	%pstate, %i4
232	wrpr	%i4, PSTATE_IE, %pstate		! disable interrupts
233
234	call	sfmmu_setctx_sec		! switch to other ctx (maybe 0)
235	  lduh	[%o3 + SFMMU_CNUM], %o0
236	call	sfmmu_load_mmustate		! program MMU registers
237	  mov	%o3, %o0
238
239	wrpr	%g0, %i4, %pstate		! enable interrupts
240
2415:
242	!
243	! spin until dispatched thread's mutex has
244	! been unlocked. this mutex is unlocked when
245	! it becomes safe for the thread to run.
246	!
247	ldstub	[%i0 + T_LOCK], %o0	! lock curthread's t_lock
2486:
249	brnz,pn	%o0, 7f			! lock failed
250	  ldx	[%i0 + T_PC], %i7	! delay - restore resuming thread's pc
251
252	!
253	! Fix CPU structure to indicate new running thread.
254	! Set pointer in new thread to the CPU structure.
255	! XXX - Move migration statistic out of here
256	!
257        ldx	[%i0 + T_CPU], %g2	! last CPU to run the new thread
258        cmp     %g2, %i1		! test for migration
259        be,pt	%xcc, 4f		! no migration
260          ldn	[%i0 + T_LWP], %o1	! delay - get associated lwp (if any)
261        ldx	[%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
262        inc     %g2
263        stx	%g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
264	stx	%i1, [%i0 + T_CPU]	! set new thread's CPU pointer
2654:
266	stx	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
267	membar	#StoreLoad		! synchronize with mutex_exit()
268	mov	%i0, THREAD_REG		! update global thread register
269	stx	%o1, [%i1 + CPU_LWP]	! set CPU's lwp ptr
270	brz,a,pn %o1, 1f		! if no lwp, branch and clr mpcb
271	  stx	%g0, [%i1 + CPU_MPCB]
272	!
273	! user thread
274	! o1 = lwp
275	! i0 = new thread
276	!
277	ldx	[%i0 + T_STACK], %o0
278	stx	%o0, [%i1 + CPU_MPCB]	! set CPU's mpcb pointer
279#ifdef CPU_MPCB_PA
280	ldx	[%o0 + MPCB_PA], %o0
281	stx	%o0, [%i1 + CPU_MPCB_PA]
282#endif
283	! Switch to new thread's stack
284	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
285	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
286	mov	%o0, %fp
287	!
288	! Restore resuming thread's GSR reg and floating-point regs
289	! Note that the ld to the gsr register ensures that the loading of
290	! the floating point saved state has completed without necessity
291	! of a membar #Sync.
292	!
293#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
294	sethi	%hi(fpu_exists), %g3
295	ld	[%g3 + %lo(fpu_exists)], %g3
296	brz,pn	%g3, 2f
297	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
298#endif
299	ldx	[%o1 + LWP_FPU], %o0		! fp pointer
300	ld	[%o0 + FPU_FPRS], %g5		! get fpu_fprs
301	andcc	%g5, FPRS_FEF, %g0		! is FPRS_FEF set?
302	bz,a,pt	%icc, 9f			! no, skip fp_restore
303	  wr	%g0, FPRS_FEF, %fprs		! enable fprs so fp_zero works
304
305	ldx	[THREAD_REG + T_CPU], %o4	! cpu pointer
306	call	fp_restore
307	  wr	%g5, %g0, %fprs			! enable fpu and restore fprs
308
309	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
310	wr	%g5, %g0, %gsr			! restore %gsr data
311	ba,pt	%icc,2f
312	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
313
3149:
315	!
316	! Zero resuming thread's fp registers, for *all* non-fp program
317	! Remove all possibility of using the fp regs as a "covert channel".
318	!
319	call	fp_zero
320	  wr	%g0, %g0, %gsr
321	ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
322	ba,pt	%icc, 2f
323	  wr	%g0, %g0, %fprs			! disable fprs
324
3251:
326#ifdef CPU_MPCB_PA
327	mov	-1, %o1
328	stx	%o1, [%i1 + CPU_MPCB_PA]
329#endif
330	!
331	! kernel thread
332	! i0 = new thread
333	!
334	! Switch to new thread's stack
335	!
336	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
337	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
338	mov	%o0, %fp
339	!
340	! Restore resuming thread's GSR reg and floating-point regs
341	! Note that the ld to the gsr register ensures that the loading of
342	! the floating point saved state has completed without necessity
343	! of a membar #Sync.
344	!
345	ldx	[%i0 + T_STACK], %o0
346	ld	[%o0 + SA(MINFRAME) + FPU_FPRS], %g5	! load fprs
347	ldx	[%i0 + T_CTX], %i5		! should thread restorectx?
348	andcc	%g5, FPRS_FEF, %g0		! did we save fp in stack?
349	bz,a,pt	%icc, 2f
350	  wr	%g0, %g0, %fprs			! clr fprs
351
352	wr	%g5, %g0, %fprs			! enable fpu and restore fprs
353	call	fp_restore
354	add	%o0, SA(MINFRAME), %o0		! o0 = kpu_t ptr
355	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
356	wr	%g5, %g0, %gsr			! restore %gsr data
357
3582:
359	!
360	! Restore resuming thread's context
361	! i5 = ctx ptr
362	!
363	brz,a,pt %i5, 8f		! skip restorectx() when zero
364	  ld	[%i1 + CPU_BASE_SPL], %o0
365	call	restorectx		! thread can not sleep on temp stack
366	  mov	THREAD_REG, %o0		! delay slot - arg = thread pointer
367	!
368	! Set priority as low as possible, blocking all interrupt threads
369	! that may be active.
370	!
371	ld	[%i1 + CPU_BASE_SPL], %o0
3728:
373	wrpr	%o0, 0, %pil
374	wrpr	%g0, WSTATE_KERN, %wstate
375	!
376	! If we are resuming an interrupt thread, store a starting timestamp
377	! in the thread structure.
378	!
379	lduh	[THREAD_REG + T_FLAGS], %o0
380	andcc	%o0, T_INTR_THREAD, %g0
381	bnz,pn	%xcc, 0f
382	  nop
3835:
384	call	__dtrace_probe___sched_on__cpu	! DTrace probe
385	nop
386
387	ret				! resume curthread
388	restore
3890:
390	add	THREAD_REG, T_INTR_START, %o2
3911:
392	ldx	[%o2], %o1
393	rdpr	%tick, %o0
394	sllx	%o0, 1, %o0
395	srlx	%o0, 1, %o0			! shift off NPT bit
396	casx	[%o2], %o1, %o0
397	cmp	%o0, %o1
398	be,pt	%xcc, 5b
399	  nop
400	! If an interrupt occurred while we were attempting to store
401	! the timestamp, try again.
402	ba,pt	%xcc, 1b
403	  nop
404
405	!
406	! lock failed - spin with regular load to avoid cache-thrashing.
407	!
4087:
409	brnz,a,pt %o0, 7b		! spin while locked
410	  ldub	[%i0 + T_LOCK], %o0
411	ba	%xcc, 6b
412	  ldstub  [%i0 + T_LOCK], %o0	! delay - lock curthread's mutex
413	SET_SIZE(_resume_from_idle)
414	SET_SIZE(resume)
415
416#endif	/* lint */
417
418#if defined(lint)
419
420/* ARGSUSED */
421void
422resume_from_zombie(kthread_id_t t)
423{}
424
425#else	/* lint */
426
427	ENTRY(resume_from_zombie)
428	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
429
430	call	__dtrace_probe___sched_off__cpu	! DTrace probe
431	mov	%i0, %o0			! arg for DTrace probe
432
433	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
434
435	flushw					! flushes all but this window
436	ldn	[THREAD_REG + T_PROCP], %i2	! old procp for mmu ctx
437
438	!
439	! Temporarily switch to the idle thread's stack so that
440	! the zombie thread's stack can be reclaimed by the reaper.
441	!
442	ldn	[%i1 + CPU_IDLE_THREAD], %o2	! idle thread pointer
443	ldn	[%o2 + T_SP], %o1		! get onto idle thread stack
444	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
445	clr	%fp
446	!
447	! Set the idle thread as the current thread.
448	! Put the zombie on death-row.
449	!
450	mov	THREAD_REG, %o0			! save %g7 = curthread for arg
451	mov	%o2, THREAD_REG			! set %g7 to idle
452	stn	%g0, [%i1 + CPU_MPCB]		! clear mpcb
453#ifdef CPU_MPCB_PA
454	mov	-1, %o1
455	stx	%o1, [%i1 + CPU_MPCB_PA]
456#endif
457	call	reapq_add			! reapq_add(old_thread);
458	stn	%o2, [%i1 + CPU_THREAD]		! delay - CPU's thread = idle
459
460	!
461	! resume_from_idle args:
462	!	%i0 = new thread
463	!	%i1 = cpu
464	!	%i2 = old proc
465	!	%i3 = new proc
466	!
467	b	_resume_from_idle		! finish job of resume
468	ldn	[%i0 + T_PROCP], %i3		! new process
469	SET_SIZE(resume_from_zombie)
470
471#endif	/* lint */
472
473#if defined(lint)
474
475/* ARGSUSED */
476void
477resume_from_intr(kthread_id_t t)
478{}
479
480#else	/* lint */
481
482	ENTRY(resume_from_intr)
483	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
484
485	flushw					! flushes all but this window
486	stn	%fp, [THREAD_REG + T_SP]	! delay - save sp
487	stn	%i7, [THREAD_REG + T_PC]	! save return address
488
489	ldn	[%i0 + T_PC], %i7		! restore resuming thread's pc
490	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
491
492	!
493	! Fix CPU structure to indicate new running thread.
494	! The pinned thread we're resuming already has the CPU pointer set.
495	!
496	mov	THREAD_REG, %l3		! save old thread
497	stn	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
498	membar	#StoreLoad		! synchronize with mutex_exit()
499	mov	%i0, THREAD_REG		! update global thread register
500
501	!
502	! Switch to new thread's stack
503	!
504	ldn	[THREAD_REG + T_SP], %o0	! restore resuming thread's sp
505	sub	%o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
506	mov	%o0, %fp
507	clrb	[%l3 + T_LOCK]		! clear intr thread's tp->t_lock
508
509	!
510	! If we are resuming an interrupt thread, store a timestamp in the
511	! thread structure.
512	!
513	lduh	[THREAD_REG + T_FLAGS], %o0
514	andcc	%o0, T_INTR_THREAD, %g0
515	bnz,pn	%xcc, 0f
516	!
517	! We're resuming a non-interrupt thread.
518	! Clear CPU_INTRCNT and check if cpu_kprunrun set?
519	!
520	ldub	[%i1 + CPU_KPRUNRUN], %o5	! delay
521	brnz,pn	%o5, 3f				! call kpreempt(KPREEMPT_SYNC);
522	stub	%g0, [%i1 + CPU_INTRCNT]
5231:
524	ret				! resume curthread
525	restore
5260:
527	!
528	! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
529	!
530	add	THREAD_REG, T_INTR_START, %o2
5312:
532	ldx	[%o2], %o1
533	rdpr	%tick, %o0
534	sllx	%o0, 1, %o0
535	srlx	%o0, 1, %o0			! shift off NPT bit
536	casx	[%o2], %o1, %o0
537	cmp	%o0, %o1
538	bne,pn	%xcc, 2b
539	ldn	[THREAD_REG + T_INTR], %l1	! delay
540	! Reset cpu_intrcnt if we aren't pinning anyone
541	brz,a,pt %l1, 2f
542	stub	%g0, [%i1 + CPU_INTRCNT]
5432:
544	ba,pt	%xcc, 1b
545	nop
5463:
547	!
548	! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
549	!
550	call	kpreempt
551	mov	KPREEMPT_SYNC, %o0
552	ba,pt	%xcc, 1b
553	nop
554	SET_SIZE(resume_from_intr)
555
556#endif /* lint */
557
558
559/*
560 * thread_start()
561 *
562 * the current register window was crafted by thread_run() to contain
563 * an address of a procedure (in register %i7), and its args in registers
564 * %i0 through %i5. a stack trace of this thread will show the procedure
565 * that thread_start() invoked at the bottom of the stack. an exit routine
566 * is stored in %l0 and called when started thread returns from its called
567 * procedure.
568 */
569
570#if defined(lint)
571
572void
573thread_start(void)
574{}
575
576#else	/* lint */
577
578	ENTRY(thread_start)
579	mov	%i0, %o0
580	jmpl 	%i7, %o7	! call thread_run()'s start() procedure.
581	mov	%i1, %o1
582
583	call	thread_exit	! destroy thread if it returns.
584	nop
585	unimp 0
586	SET_SIZE(thread_start)
587
588#endif	/* lint */
589