xref: /linux/arch/mips/kernel/r4k_switch.S (revision ba6e8564f459211117ce300eae2c7fdd23befe34)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 *    written by Carsten Langgaard, carstenl@mips.com
12 */
13#include <asm/asm.h>
14#include <asm/cachectl.h>
15#include <asm/fpregdef.h>
16#include <asm/mipsregs.h>
17#include <asm/asm-offsets.h>
18#include <asm/page.h>
19#include <asm/pgtable-bits.h>
20#include <asm/regdef.h>
21#include <asm/stackframe.h>
22#include <asm/thread_info.h>
23
24#include <asm/asmmacro.h>
25
26/*
27 * Offset to the current process status flags, the first 32 bytes of the
28 * stack are not used.
29 */
30#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
31
32/*
33 * FPU context is saved iff the process has used it's FPU in the current
34 * time slice as indicated by _TIF_USEDFPU.  In any case, the CU1 bit for user
35 * space STATUS register should be 0, so that a process *always* starts its
36 * userland with FPU disabled after each context switch.
37 *
38 * FPU will be enabled as soon as the process accesses FPU again, through
39 * do_cpu() trap.
40 */
41
42/*
43 * task_struct *resume(task_struct *prev, task_struct *next,
44 *                     struct thread_info *next_ti)
45 */
46	.align	5
47	LEAF(resume)
48#ifndef CONFIG_CPU_HAS_LLSC
49	sw	zero, ll_bit
50#endif
51	mfc0	t2, CP0_STATUS
52	cpu_save_nonscratch a0
53	LONG_S	ra, THREAD_REG31(a0)
54
55	/*
56	 * check if we need to save FPU registers
57	 */
58	PTR_L	t3, TASK_THREAD_INFO(a0)
59	LONG_L	t0, TI_FLAGS(t3)
60	li	t1, _TIF_USEDFPU
61	and	t1, t0
62	beqz	t1, 1f
63	nor	t1, zero, t1
64
65	and	t0, t0, t1
66	LONG_S	t0, TI_FLAGS(t3)
67
68	/*
69	 * clear saved user stack CU1 bit
70	 */
71	LONG_L	t0, ST_OFF(t3)
72	li	t1, ~ST0_CU1
73	and	t0, t0, t1
74	LONG_S	t0, ST_OFF(t3)
75	/* clear thread_struct CU1 bit */
76	and	t2, t1
77
78	fpu_save_double a0 t0 t1		# c0_status passed in t0
79						# clobbers t1
801:
81	LONG_S	t2, THREAD_STATUS(a0)
82
83	/*
84	 * The order of restoring the registers takes care of the race
85	 * updating $28, $29 and kernelsp without disabling ints.
86	 */
87	move	$28, a2
88	cpu_restore_nonscratch a1
89
90#if (_THREAD_SIZE - 32) < 0x10000
91	PTR_ADDIU	t0, $28, _THREAD_SIZE - 32
92#else
93	PTR_LI		t0, _THREAD_SIZE - 32
94	PTR_ADDU	t0, $28
95#endif
96	set_saved_sp	t0, t1, t2
97#ifdef CONFIG_MIPS_MT_SMTC
98	/* Read-modify-writes of Status must be atomic on a VPE */
99	mfc0	t2, CP0_TCSTATUS
100	ori	t1, t2, TCSTATUS_IXMT
101	mtc0	t1, CP0_TCSTATUS
102	andi	t2, t2, TCSTATUS_IXMT
103	_ehb
104	DMT	8				# dmt	t0
105	move	t1,ra
106	jal	mips_ihb
107	move	ra,t1
108#endif /* CONFIG_MIPS_MT_SMTC */
109	mfc0	t1, CP0_STATUS		/* Do we really need this? */
110	li	a3, 0xff01
111	and	t1, a3
112	LONG_L	a2, THREAD_STATUS(a1)
113	nor	a3, $0, a3
114	and	a2, a3
115	or	a2, t1
116	mtc0	a2, CP0_STATUS
117#ifdef CONFIG_MIPS_MT_SMTC
118	_ehb
119	andi	t0, t0, VPECONTROL_TE
120	beqz	t0, 1f
121	emt
1221:
123	mfc0	t1, CP0_TCSTATUS
124	xori	t1, t1, TCSTATUS_IXMT
125	or	t1, t1, t2
126	mtc0	t1, CP0_TCSTATUS
127	_ehb
128#endif /* CONFIG_MIPS_MT_SMTC */
129	move	v0, a0
130	jr	ra
131	END(resume)
132
133/*
134 * Save a thread's fp context.
135 */
136LEAF(_save_fp)
137#ifdef CONFIG_64BIT
138	mfc0	t0, CP0_STATUS
139#endif
140	fpu_save_double a0 t0 t1		# clobbers t1
141	jr	ra
142	END(_save_fp)
143
144/*
145 * Restore a thread's fp context.
146 */
147LEAF(_restore_fp)
148#ifdef CONFIG_64BIT
149	mfc0	t0, CP0_STATUS
150#endif
151	fpu_restore_double a0 t0 t1		# clobbers t1
152	jr	ra
153	END(_restore_fp)
154
155/*
156 * Load the FPU with signalling NANS.  This bit pattern we're using has
157 * the property that no matter whether considered as single or as double
158 * precision represents signaling NANS.
159 *
160 * We initialize fcr31 to rounding to nearest, no exceptions.
161 */
162
163#define FPU_DEFAULT  0x00000000
164
165LEAF(_init_fpu)
166#ifdef CONFIG_MIPS_MT_SMTC
167	/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
168	mfc0	t0, CP0_TCSTATUS
169	/* Bit position is the same for Status, TCStatus */
170	li	t1, ST0_CU1
171	or	t0, t1
172	mtc0	t0, CP0_TCSTATUS
173#else /* Normal MIPS CU1 enable */
174	mfc0	t0, CP0_STATUS
175	li	t1, ST0_CU1
176	or	t0, t1
177	mtc0	t0, CP0_STATUS
178#endif /* CONFIG_MIPS_MT_SMTC */
179	fpu_enable_hazard
180
181	li	t1, FPU_DEFAULT
182	ctc1	t1, fcr31
183
184	li	t1, -1				# SNaN
185
186#ifdef CONFIG_64BIT
187	sll	t0, t0, 5
188	bgez	t0, 1f				# 16 / 32 register mode?
189
190	dmtc1	t1, $f1
191	dmtc1	t1, $f3
192	dmtc1	t1, $f5
193	dmtc1	t1, $f7
194	dmtc1	t1, $f9
195	dmtc1	t1, $f11
196	dmtc1	t1, $f13
197	dmtc1	t1, $f15
198	dmtc1	t1, $f17
199	dmtc1	t1, $f19
200	dmtc1	t1, $f21
201	dmtc1	t1, $f23
202	dmtc1	t1, $f25
203	dmtc1	t1, $f27
204	dmtc1	t1, $f29
205	dmtc1	t1, $f31
2061:
207#endif
208
209#ifdef CONFIG_CPU_MIPS32
210	mtc1	t1, $f0
211	mtc1	t1, $f1
212	mtc1	t1, $f2
213	mtc1	t1, $f3
214	mtc1	t1, $f4
215	mtc1	t1, $f5
216	mtc1	t1, $f6
217	mtc1	t1, $f7
218	mtc1	t1, $f8
219	mtc1	t1, $f9
220	mtc1	t1, $f10
221	mtc1	t1, $f11
222	mtc1	t1, $f12
223	mtc1	t1, $f13
224	mtc1	t1, $f14
225	mtc1	t1, $f15
226	mtc1	t1, $f16
227	mtc1	t1, $f17
228	mtc1	t1, $f18
229	mtc1	t1, $f19
230	mtc1	t1, $f20
231	mtc1	t1, $f21
232	mtc1	t1, $f22
233	mtc1	t1, $f23
234	mtc1	t1, $f24
235	mtc1	t1, $f25
236	mtc1	t1, $f26
237	mtc1	t1, $f27
238	mtc1	t1, $f28
239	mtc1	t1, $f29
240	mtc1	t1, $f30
241	mtc1	t1, $f31
242#else
243	.set	mips3
244	dmtc1	t1, $f0
245	dmtc1	t1, $f2
246	dmtc1	t1, $f4
247	dmtc1	t1, $f6
248	dmtc1	t1, $f8
249	dmtc1	t1, $f10
250	dmtc1	t1, $f12
251	dmtc1	t1, $f14
252	dmtc1	t1, $f16
253	dmtc1	t1, $f18
254	dmtc1	t1, $f20
255	dmtc1	t1, $f22
256	dmtc1	t1, $f24
257	dmtc1	t1, $f26
258	dmtc1	t1, $f28
259	dmtc1	t1, $f30
260#endif
261	jr	ra
262	END(_init_fpu)
263