xref: /illumos-gate/usr/src/uts/sun4/ml/swtch.S (revision 8ab441ef78c0aa3a5e87b38d2f5a0b5316b6da11)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * Process switching routines.
27 */
28
29#include "assym.h"
30
31#include <sys/param.h>
32#include <sys/asm_linkage.h>
33#include <sys/mmu.h>
34#include <sys/pcb.h>
35#include <sys/machthread.h>
36#include <sys/machclock.h>
37#include <sys/privregs.h>
38#include <sys/vtrace.h>
39#include <vm/hat_sfmmu.h>
40
41/*
42 * resume(kthread_id_t)
43 *
44 * a thread can only run on one processor at a time. there
45 * exists a window on MPs where the current thread on one
46 * processor is capable of being dispatched by another processor.
47 * some overlap between outgoing and incoming threads can happen
48 * when they are the same thread. in this case where the threads
49 * are the same, resume() on one processor will spin on the incoming
50 * thread until resume() on the other processor has finished with
51 * the outgoing thread.
52 *
53 * The MMU context changes when the resuming thread resides in a different
54 * process.  Kernel threads are known by resume to reside in process 0.
55 * The MMU context, therefore, only changes when resuming a thread in
56 * a process different from curproc.
57 *
58 * resume_from_intr() is called when the thread being resumed was not
59 * passivated by resume (e.g. was interrupted).  This means that the
60 * resume lock is already held and that a restore context is not needed.
61 * Also, the MMU context is not changed on the resume in this case.
62 *
63 * resume_from_zombie() is the same as resume except the calling thread
64 * is a zombie and must be put on the deathrow list after the CPU is
65 * off the stack.
66 */
67
68	ENTRY(resume)
69	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
70
71	call	__dtrace_probe___sched_off__cpu	! DTrace probe
72	mov	%i0, %o0			! arg for DTrace probe
73
74	membar	#Sync				! flush writebuffers
75	flushw					! flushes all but this window
76
77	stn	%i7, [THREAD_REG + T_PC]	! save return address
78	stn	%fp, [THREAD_REG + T_SP]	! save sp
79
80	!
81	! Save GSR (Graphics Status Register).
82	!
83	! Read fprs, call fp_save if FPRS_FEF set.
84	! This handles floating-point state saving.
85	! The fprs could be turned on by hw bcopy software,
86	! *or* by fp_disabled. Handle it either way.
87	!
88	ldn	[THREAD_REG + T_LWP], %o4	! get lwp pointer
89	rd	%fprs, %g4			! read fprs
90	brnz,pt	%o4, 0f				! if user thread skip
91	  ldn	[THREAD_REG + T_CPU], %i1	! get CPU pointer
92
93	!
94	! kernel thread
95	!
96	! we save fprs at the beginning the stack so we know
97	! where to check at resume time
98	ldn	[THREAD_REG + T_STACK], %i2
99	ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
100	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
101	bz,pt	%icc, 1f			! nope, skip
102	  st	%g4, [%i2 + SA(MINFRAME) + FPU_FPRS]	! save fprs
103
104	! save kernel fp state in stack
105	add	%i2, SA(MINFRAME), %o0		! o0 = kfpu_t ptr
106	rd	%gsr, %g5
107	call	fp_save
108	stx	%g5, [%o0 + FPU_GSR]		! store GSR
109	ba,a,pt	%icc, 1f
110	  nop
111
1120:
113	! user thread
114	! o4 = lwp ptr
115	! g4 = fprs
116	! i1 = CPU ptr
117	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
118	stn	%fp, [THREAD_REG + T_SP]	! save sp
119	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
120	st	%g4, [%o0 + FPU_FPRS]		! store FPRS
121#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
122	sethi	%hi(fpu_exists), %g5
123	ld	[%g5 + %lo(fpu_exists)], %g5
124	brz,pn	%g5, 1f
125	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
126#endif
127	bz,pt	%icc, 1f			! most apps don't use fp
128	  ldn	[THREAD_REG + T_CTX], %g3	! get ctx pointer
129	ldn	[%o4 + LWP_FPU], %o0		! fp pointer
130	rd	%gsr, %g5
131	call	fp_save				! doesn't touch globals
132	stx	%g5, [%o0 + FPU_GSR]		! store GSR
1331:
134	!
135	! Perform context switch callback if set.
136	! This handles coprocessor state saving.
137	! i1 = cpu ptr
138	! g3 = ctx pointer
139	!
140	wr	%g0, %g0, %fprs			! disable fpu and clear fprs
141	brz,pt	%g3, 2f				! skip call when zero
142	ldn	[%i0 + T_PROCP], %i3		! delay slot - get proc pointer
143	call	savectx
144	mov	THREAD_REG, %o0			! delay - arg = thread pointer
1452:
146	ldn	[THREAD_REG + T_PROCP], %i2	! load old curproc - for mmu
147
148	!
149	! Temporarily switch to idle thread's stack
150	!
151	ldn	[%i1 + CPU_IDLE_THREAD], %o0	! idle thread pointer
152	ldn	[%o0 + T_SP], %o1		! get onto idle thread stack
153	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
154	clr	%fp
155
156	!
157	! Set the idle thread as the current thread
158	!
159	mov	THREAD_REG, %l3			! save %g7 (current thread)
160	mov	%o0, THREAD_REG			! set %g7 to idle
161	stn	%o0, [%i1 + CPU_THREAD]		! set CPU's thread to idle
162
163	!
164	! Clear and unlock previous thread's t_lock
165	! to allow it to be dispatched by another processor.
166	!
167	clrb	[%l3 + T_LOCK]			! clear tp->t_lock
168
169	!
170	! IMPORTANT: Registers at this point must be:
171	!	%i0 = new thread
172	!	%i1 = cpu pointer
173	!	%i2 = old proc pointer
174	!	%i3 = new proc pointer
175	!
176	! Here we are in the idle thread, have dropped the old thread.
177	!
178	ALTENTRY(_resume_from_idle)
179
180	! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
181	SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
182
183	cmp 	%i2, %i3		! resuming the same process?
184	be,pt	%xcc, 5f		! yes.
185	  nop
186
187	ldx	[%i3 + P_AS], %o0	! load p->p_as
188	ldx	[%o0 + A_HAT], %i5	! %i5 = new proc hat
189
190	!
191	! update cpusran field
192	!
193	ld	[%i1 + CPU_ID], %o4
194	add	%i5, SFMMU_CPUSRAN, %o5
195	CPU_INDEXTOSET(%o5, %o4, %g1)
196	ldx	[%o5], %o2		! %o2 = cpusran field
197	mov	1, %g2
198	sllx	%g2, %o4, %o4		! %o4 = bit for this cpu
199	andcc	%o4, %o2, %g0
200	bnz,pn	%xcc, 0f		! bit already set, go to 0
201	  nop
2023:
203	or	%o2, %o4, %o1		! or in this cpu's bit mask
204	casx	[%o5], %o2, %o1
205	cmp	%o2, %o1
206	bne,a,pn %xcc, 3b
207	  ldx	[%o5], %o2		! o2 = cpusran field
208	membar	#LoadLoad|#StoreLoad
209
2100:
211	!
212	! disable interrupts
213	!
214	! if resume from user to kernel thread
215	!	call sfmmu_setctx_sec
216	! if resume from kernel (or a different user) thread to user thread
217	!	call sfmmu_alloc_ctx
218	! sfmmu_load_mmustate
219	!
220	! enable interrupts
221	!
222	! %i5 = new proc hat
223	!
224
225	sethi	%hi(ksfmmup), %o2
226        ldx	[%o2 + %lo(ksfmmup)], %o2
227
228	rdpr	%pstate, %i4
229        cmp	%i5, %o2		! new proc hat == ksfmmup ?
230	bne,pt	%xcc, 3f		! new proc is not kernel as, go to 3
231	  wrpr	%i4, PSTATE_IE, %pstate
232
233	SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1)
234
235	! new proc is kernel as
236
237	call	sfmmu_setctx_sec		! switch to kernel context
238	  or	%o0, %o1, %o0
239
240	ba,a,pt	%icc, 4f
241
242	!
243	! Switch to user address space.
244	!
2453:
246	mov	%i5, %o0			! %o0 = sfmmup
247	mov	%i1, %o2			! %o2 = CPU
248	set	SFMMU_PRIVATE, %o3		! %o3 = sfmmu private flag
249	call	sfmmu_alloc_ctx
250	  mov	%g0, %o1			! %o1 = allocate flag = 0
251
252	brz,a,pt %o0, 4f			! %o0 == 0, no private alloc'ed
253          nop
254
255        ldn     [%i5 + SFMMU_SCDP], %o0         ! using shared contexts?
256        brz,a,pt %o0, 4f
257          nop
258
259	ldn   [%o0 + SCD_SFMMUP], %o0		! %o0 = scdp->scd_sfmmup
260	mov	%i1, %o2			! %o2 = CPU
261	set	SFMMU_SHARED, %o3		! %o3 = sfmmu shared flag
262	call	sfmmu_alloc_ctx
263	  mov	1, %o1				! %o1 = allocate flag = 1
264
2654:
266	call	sfmmu_load_mmustate		! program MMU registers
267	  mov	%i5, %o0
268
269	wrpr    %g0, %i4, %pstate               ! enable interrupts
270
2715:
272	!
273	! spin until dispatched thread's mutex has
274	! been unlocked. this mutex is unlocked when
275	! it becomes safe for the thread to run.
276	!
277	ldstub	[%i0 + T_LOCK], %o0	! lock curthread's t_lock
2786:
279	brnz,pn	%o0, 7f			! lock failed
280	  ldx	[%i0 + T_PC], %i7	! delay - restore resuming thread's pc
281
282	!
283	! Fix CPU structure to indicate new running thread.
284	! Set pointer in new thread to the CPU structure.
285	! XXX - Move migration statistic out of here
286	!
287        ldx	[%i0 + T_CPU], %g2	! last CPU to run the new thread
288        cmp     %g2, %i1		! test for migration
289        be,pt	%xcc, 4f		! no migration
290          ldn	[%i0 + T_LWP], %o1	! delay - get associated lwp (if any)
291        ldx	[%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
292        inc     %g2
293        stx	%g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
294	stx	%i1, [%i0 + T_CPU]	! set new thread's CPU pointer
2954:
296	stx	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
297	membar	#StoreLoad		! synchronize with mutex_exit()
298	mov	%i0, THREAD_REG		! update global thread register
299	stx	%o1, [%i1 + CPU_LWP]	! set CPU's lwp ptr
300	brz,a,pn %o1, 1f		! if no lwp, branch and clr mpcb
301	  stx	%g0, [%i1 + CPU_MPCB]
302	!
303	! user thread
304	! o1 = lwp
305	! i0 = new thread
306	!
307	ldx	[%i0 + T_STACK], %o0
308	stx	%o0, [%i1 + CPU_MPCB]	! set CPU's mpcb pointer
309#ifdef CPU_MPCB_PA
310	ldx	[%o0 + MPCB_PA], %o0
311	stx	%o0, [%i1 + CPU_MPCB_PA]
312#endif
313	! Switch to new thread's stack
314	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
315	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
316	mov	%o0, %fp
317	!
318	! Restore resuming thread's GSR reg and floating-point regs
319	! Note that the ld to the gsr register ensures that the loading of
320	! the floating point saved state has completed without necessity
321	! of a membar #Sync.
322	!
323#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
324	sethi	%hi(fpu_exists), %g3
325	ld	[%g3 + %lo(fpu_exists)], %g3
326	brz,pn	%g3, 2f
327	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
328#endif
329	ldx	[%o1 + LWP_FPU], %o0		! fp pointer
330	ld	[%o0 + FPU_FPRS], %g5		! get fpu_fprs
331	andcc	%g5, FPRS_FEF, %g0		! is FPRS_FEF set?
332	bz,a,pt	%icc, 9f			! no, skip fp_restore
333	  wr	%g0, FPRS_FEF, %fprs		! enable fprs so fp_zero works
334
335	ldx	[THREAD_REG + T_CPU], %o4	! cpu pointer
336	call	fp_restore
337	  wr	%g5, %g0, %fprs			! enable fpu and restore fprs
338
339	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
340	wr	%g5, %g0, %gsr			! restore %gsr data
341	ba,pt	%icc,2f
342	  ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
343
3449:
345	!
346	! Zero resuming thread's fp registers, for *all* non-fp program
347	! Remove all possibility of using the fp regs as a "covert channel".
348	!
349	call	fp_zero
350	  wr	%g0, %g0, %gsr
351	ldx	[%i0 + T_CTX], %i5	! should resumed thread restorectx?
352	ba,pt	%icc, 2f
353	  wr	%g0, %g0, %fprs			! disable fprs
354
3551:
356#ifdef CPU_MPCB_PA
357	mov	-1, %o1
358	stx	%o1, [%i1 + CPU_MPCB_PA]
359#endif
360	!
361	! kernel thread
362	! i0 = new thread
363	!
364	! Switch to new thread's stack
365	!
366	ldx	[%i0 + T_SP], %o0	! restore resuming thread's sp
367	sub	%o0, SA(MINFRAME), %sp	! in case of intr or trap before restore
368	mov	%o0, %fp
369	!
370	! Restore resuming thread's GSR reg and floating-point regs
371	! Note that the ld to the gsr register ensures that the loading of
372	! the floating point saved state has completed without necessity
373	! of a membar #Sync.
374	!
375	ldx	[%i0 + T_STACK], %o0
376	ld	[%o0 + SA(MINFRAME) + FPU_FPRS], %g5	! load fprs
377	ldx	[%i0 + T_CTX], %i5		! should thread restorectx?
378	andcc	%g5, FPRS_FEF, %g0		! did we save fp in stack?
379	bz,a,pt	%icc, 2f
380	  wr	%g0, %g0, %fprs			! clr fprs
381
382	wr	%g5, %g0, %fprs			! enable fpu and restore fprs
383	call	fp_restore
384	add	%o0, SA(MINFRAME), %o0		! o0 = kpu_t ptr
385	ldx	[%o0 + FPU_GSR], %g5		! load saved GSR data
386	wr	%g5, %g0, %gsr			! restore %gsr data
387
3882:
389	!
390	! Restore resuming thread's context
391	! i5 = ctx ptr
392	!
393	brz,a,pt %i5, 8f		! skip restorectx() when zero
394	  ld	[%i1 + CPU_BASE_SPL], %o0
395	call	restorectx		! thread can not sleep on temp stack
396	  mov	THREAD_REG, %o0		! delay slot - arg = thread pointer
397	!
398	! Set priority as low as possible, blocking all interrupt threads
399	! that may be active.
400	!
401	ld	[%i1 + CPU_BASE_SPL], %o0
4028:
403	wrpr	%o0, 0, %pil
404	wrpr	%g0, WSTATE_KERN, %wstate
405	!
406	! If we are resuming an interrupt thread, store a starting timestamp
407	! in the thread structure.
408	!
409	lduh	[THREAD_REG + T_FLAGS], %o0
410	andcc	%o0, T_INTR_THREAD, %g0
411	bnz,pn	%xcc, 0f
412	  nop
4135:
414	call	__dtrace_probe___sched_on__cpu	! DTrace probe
415	nop
416
417	ret				! resume curthread
418	restore
4190:
420	add	THREAD_REG, T_INTR_START, %o2
4211:
422	ldx	[%o2], %o1
423	RD_CLOCK_TICK(%o0,%o3,%g5,__LINE__)
424	casx	[%o2], %o1, %o0
425	cmp	%o0, %o1
426	be,pt	%xcc, 5b
427	  nop
428	! If an interrupt occurred while we were attempting to store
429	! the timestamp, try again.
430	ba,pt	%xcc, 1b
431	  nop
432
433	!
434	! lock failed - spin with regular load to avoid cache-thrashing.
435	!
4367:
437	brnz,a,pt %o0, 7b		! spin while locked
438	  ldub	[%i0 + T_LOCK], %o0
439	ba	%xcc, 6b
440	  ldstub  [%i0 + T_LOCK], %o0	! delay - lock curthread's mutex
441	SET_SIZE(_resume_from_idle)
442	SET_SIZE(resume)
443
444	ENTRY(resume_from_zombie)
445	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
446
447	call	__dtrace_probe___sched_off__cpu	! DTrace probe
448	mov	%i0, %o0			! arg for DTrace probe
449
450	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
451
452	flushw					! flushes all but this window
453	ldn	[THREAD_REG + T_PROCP], %i2	! old procp for mmu ctx
454
455	!
456	! Temporarily switch to the idle thread's stack so that
457	! the zombie thread's stack can be reclaimed by the reaper.
458	!
459	ldn	[%i1 + CPU_IDLE_THREAD], %o2	! idle thread pointer
460	ldn	[%o2 + T_SP], %o1		! get onto idle thread stack
461	sub	%o1, SA(MINFRAME), %sp		! save room for ins and locals
462	clr	%fp
463	!
464	! Set the idle thread as the current thread.
465	! Put the zombie on death-row.
466	!
467	mov	THREAD_REG, %o0			! save %g7 = curthread for arg
468	mov	%o2, THREAD_REG			! set %g7 to idle
469	stn	%g0, [%i1 + CPU_MPCB]		! clear mpcb
470#ifdef CPU_MPCB_PA
471	mov	-1, %o1
472	stx	%o1, [%i1 + CPU_MPCB_PA]
473#endif
474	call	reapq_add			! reapq_add(old_thread);
475	stn	%o2, [%i1 + CPU_THREAD]		! delay - CPU's thread = idle
476
477	!
478	! resume_from_idle args:
479	!	%i0 = new thread
480	!	%i1 = cpu
481	!	%i2 = old proc
482	!	%i3 = new proc
483	!
484	b	_resume_from_idle		! finish job of resume
485	ldn	[%i0 + T_PROCP], %i3		! new process
486	SET_SIZE(resume_from_zombie)
487
488	ENTRY(resume_from_intr)
489	save	%sp, -SA(MINFRAME), %sp		! save ins and locals
490
491	!
492	! We read in the fprs and call fp_save if FPRS_FEF is set
493	! to save the floating-point state if fprs has been
494	! modified by operations such as hw bcopy or fp_disabled.
495	! This is to resolve an issue where an interrupting thread
496	! doesn't retain their floating-point registers when
497	! switching out of the interrupt context.
498	!
499	rd	%fprs, %g4
500	ldn	[THREAD_REG + T_STACK], %i2
501	andcc	%g4, FPRS_FEF, %g0		! is FPRS_FEF set
502	bz,pt	%icc, 4f
503	  st	%g4, [%i2 + SA(MINFRAME) + FPU_FPRS]	! save fprs
504
505	! save kernel fp state in stack
506	add	%i2, SA(MINFRAME), %o0		! %o0 = kfpu_t ptr
507	rd	%gsr, %g5
508	call fp_save
509	stx	%g5, [%o0 + FPU_GSR]		! store GSR
510
5114:
512
513	flushw					! flushes all but this window
514	stn	%fp, [THREAD_REG + T_SP]	! delay - save sp
515	stn	%i7, [THREAD_REG + T_PC]	! save return address
516
517	ldn	[%i0 + T_PC], %i7		! restore resuming thread's pc
518	ldn	[THREAD_REG + T_CPU], %i1	! cpu pointer
519
520	!
521	! Fix CPU structure to indicate new running thread.
522	! The pinned thread we're resuming already has the CPU pointer set.
523	!
524	mov	THREAD_REG, %l3		! save old thread
525	stn	%i0, [%i1 + CPU_THREAD]	! set CPU's thread pointer
526	membar	#StoreLoad		! synchronize with mutex_exit()
527	mov	%i0, THREAD_REG		! update global thread register
528
529	!
530	! Switch to new thread's stack
531	!
532	ldn	[THREAD_REG + T_SP], %o0	! restore resuming thread's sp
533	sub	%o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
534	mov	%o0, %fp
535	clrb	[%l3 + T_LOCK]		! clear intr thread's tp->t_lock
536
537	!
538	! If we are resuming an interrupt thread, store a timestamp in the
539	! thread structure.
540	!
541	lduh	[THREAD_REG + T_FLAGS], %o0
542	andcc	%o0, T_INTR_THREAD, %g0
543	bnz,pn	%xcc, 0f
544	!
545	! We're resuming a non-interrupt thread.
546	! Clear CPU_INTRCNT and check if cpu_kprunrun set?
547	!
548	ldub	[%i1 + CPU_KPRUNRUN], %o5	! delay
549	brnz,pn	%o5, 3f				! call kpreempt(KPREEMPT_SYNC);
550	stub	%g0, [%i1 + CPU_INTRCNT]
5511:
552	ret				! resume curthread
553	restore
5540:
555	!
556	! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
557	!
558	add	THREAD_REG, T_INTR_START, %o2
5592:
560	ldx	[%o2], %o1
561	RD_CLOCK_TICK(%o0,%o3,%l1,__LINE__)
562	casx	[%o2], %o1, %o0
563	cmp	%o0, %o1
564	bne,pn	%xcc, 2b
565	ldn	[THREAD_REG + T_INTR], %l1	! delay
566	! Reset cpu_intrcnt if we aren't pinning anyone
567	brz,a,pt %l1, 2f
568	stub	%g0, [%i1 + CPU_INTRCNT]
5692:
570	ba,pt	%xcc, 1b
571	nop
5723:
573	!
574	! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
575	!
576	call	kpreempt
577	mov	KPREEMPT_SYNC, %o0
578	ba,pt	%xcc, 1b
579	nop
580	SET_SIZE(resume_from_intr)
581
582
583/*
584 * thread_start()
585 *
586 * the current register window was crafted by thread_run() to contain
587 * an address of a procedure (in register %i7), and its args in registers
588 * %i0 through %i5. a stack trace of this thread will show the procedure
589 * that thread_start() invoked at the bottom of the stack. an exit routine
590 * is stored in %l0 and called when started thread returns from its called
591 * procedure.
592 */
593
594	ENTRY(thread_start)
595	mov	%i0, %o0
596	jmpl 	%i7, %o7	! call thread_run()'s start() procedure.
597	mov	%i1, %o1
598
599	call	thread_exit	! destroy thread if it returns.
600	nop
601	unimp 0
602	SET_SIZE(thread_start)
603