xref: /linux/arch/mips/kernel/r4k_switch.S (revision 7b12b9137930eb821b68e1bfa11e9de692208620)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 *    written by Carsten Langgaard, carstenl@mips.com
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/fpregdef.h>
17#include <asm/mipsregs.h>
18#include <asm/asm-offsets.h>
19#include <asm/page.h>
20#include <asm/pgtable-bits.h>
21#include <asm/regdef.h>
22#include <asm/stackframe.h>
23#include <asm/thread_info.h>
24
25#include <asm/asmmacro.h>
26
27/*
28 * Offset to the current process status flags, the first 32 bytes of the
29 * stack are not used.
30 */
31#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
32
33/*
34 * FPU context is saved iff the process has used it's FPU in the current
35 * time slice as indicated by _TIF_USEDFPU.  In any case, the CU1 bit for user
36 * space STATUS register should be 0, so that a process *always* starts its
37 * userland with FPU disabled after each context switch.
38 *
39 * FPU will be enabled as soon as the process accesses FPU again, through
40 * do_cpu() trap.
41 */
42
43/*
44 * task_struct *resume(task_struct *prev, task_struct *next,
45 *                     struct thread_info *next_ti)
46 */
47	.align	5
48	LEAF(resume)
49#ifndef CONFIG_CPU_HAS_LLSC
50	sw	zero, ll_bit
51#endif
52	mfc0	t1, CP0_STATUS
53	LONG_S	t1, THREAD_STATUS(a0)
54	cpu_save_nonscratch a0
55	LONG_S	ra, THREAD_REG31(a0)
56
57	/*
58	 * check if we need to save FPU registers
59	 */
60	PTR_L	t3, TASK_THREAD_INFO(a0)
61	LONG_L	t0, TI_FLAGS(t3)
62	li	t1, _TIF_USEDFPU
63	and	t2, t0, t1
64	beqz	t2, 1f
65	nor	t1, zero, t1
66
67	and	t0, t0, t1
68	LONG_S	t0, TI_FLAGS(t3)
69
70	/*
71	 * clear saved user stack CU1 bit
72	 */
73	LONG_L	t0, ST_OFF(t3)
74	li	t1, ~ST0_CU1
75	and	t0, t0, t1
76	LONG_S	t0, ST_OFF(t3)
77
78	fpu_save_double a0 t1 t0 t2		# c0_status passed in t1
79						# clobbers t0 and t2
801:
81
82	/*
83	 * The order of restoring the registers takes care of the race
84	 * updating $28, $29 and kernelsp without disabling ints.
85	 */
86	move	$28, a2
87	cpu_restore_nonscratch a1
88
89	PTR_ADDIU	t0, $28, _THREAD_SIZE - 32
90	set_saved_sp	t0, t1, t2
91#ifdef CONFIG_MIPS_MT_SMTC
92	/* Read-modify-writes of Status must be atomic on a VPE */
93	mfc0	t2, CP0_TCSTATUS
94	ori	t1, t2, TCSTATUS_IXMT
95	mtc0	t1, CP0_TCSTATUS
96	andi	t2, t2, TCSTATUS_IXMT
97	ehb
98	DMT	8				# dmt	t0
99	move	t1,ra
100	jal	mips_ihb
101	move	ra,t1
102#endif /* CONFIG_MIPS_MT_SMTC */
103	mfc0	t1, CP0_STATUS		/* Do we really need this? */
104	li	a3, 0xff01
105	and	t1, a3
106	LONG_L	a2, THREAD_STATUS(a1)
107	nor	a3, $0, a3
108	and	a2, a3
109	or	a2, t1
110	mtc0	a2, CP0_STATUS
111#ifdef CONFIG_MIPS_MT_SMTC
112	ehb
113	andi	t0, t0, VPECONTROL_TE
114	beqz	t0, 1f
115	emt
1161:
117	mfc0	t1, CP0_TCSTATUS
118	xori	t1, t1, TCSTATUS_IXMT
119	or	t1, t1, t2
120	mtc0	t1, CP0_TCSTATUS
121	ehb
122#endif /* CONFIG_MIPS_MT_SMTC */
123	move	v0, a0
124	jr	ra
125	END(resume)
126
127/*
128 * Save a thread's fp context.
129 */
130LEAF(_save_fp)
131#ifdef CONFIG_64BIT
132	mfc0	t1, CP0_STATUS
133#endif
134	fpu_save_double a0 t1 t0 t2		# clobbers t1
135	jr	ra
136	END(_save_fp)
137
138/*
139 * Restore a thread's fp context.
140 */
141LEAF(_restore_fp)
142	fpu_restore_double a0, t1		# clobbers t1
143	jr	ra
144	END(_restore_fp)
145
146/*
147 * Load the FPU with signalling NANS.  This bit pattern we're using has
148 * the property that no matter whether considered as single or as double
149 * precision represents signaling NANS.
150 *
151 * We initialize fcr31 to rounding to nearest, no exceptions.
152 */
153
154#define FPU_DEFAULT  0x00000000
155
156LEAF(_init_fpu)
157#ifdef CONFIG_MIPS_MT_SMTC
158	/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
159	mfc0	t0, CP0_TCSTATUS
160	/* Bit position is the same for Status, TCStatus */
161	li	t1, ST0_CU1
162	or	t0, t1
163	mtc0	t0, CP0_TCSTATUS
164#else /* Normal MIPS CU1 enable */
165	mfc0	t0, CP0_STATUS
166	li	t1, ST0_CU1
167	or	t0, t1
168	mtc0	t0, CP0_STATUS
169#endif /* CONFIG_MIPS_MT_SMTC */
170	fpu_enable_hazard
171
172	li	t1, FPU_DEFAULT
173	ctc1	t1, fcr31
174
175	li	t1, -1				# SNaN
176
177#ifdef CONFIG_64BIT
178	sll	t0, t0, 5
179	bgez	t0, 1f				# 16 / 32 register mode?
180
181	dmtc1	t1, $f1
182	dmtc1	t1, $f3
183	dmtc1	t1, $f5
184	dmtc1	t1, $f7
185	dmtc1	t1, $f9
186	dmtc1	t1, $f11
187	dmtc1	t1, $f13
188	dmtc1	t1, $f15
189	dmtc1	t1, $f17
190	dmtc1	t1, $f19
191	dmtc1	t1, $f21
192	dmtc1	t1, $f23
193	dmtc1	t1, $f25
194	dmtc1	t1, $f27
195	dmtc1	t1, $f29
196	dmtc1	t1, $f31
1971:
198#endif
199
200#ifdef CONFIG_CPU_MIPS32
201	mtc1	t1, $f0
202	mtc1	t1, $f1
203	mtc1	t1, $f2
204	mtc1	t1, $f3
205	mtc1	t1, $f4
206	mtc1	t1, $f5
207	mtc1	t1, $f6
208	mtc1	t1, $f7
209	mtc1	t1, $f8
210	mtc1	t1, $f9
211	mtc1	t1, $f10
212	mtc1	t1, $f11
213	mtc1	t1, $f12
214	mtc1	t1, $f13
215	mtc1	t1, $f14
216	mtc1	t1, $f15
217	mtc1	t1, $f16
218	mtc1	t1, $f17
219	mtc1	t1, $f18
220	mtc1	t1, $f19
221	mtc1	t1, $f20
222	mtc1	t1, $f21
223	mtc1	t1, $f22
224	mtc1	t1, $f23
225	mtc1	t1, $f24
226	mtc1	t1, $f25
227	mtc1	t1, $f26
228	mtc1	t1, $f27
229	mtc1	t1, $f28
230	mtc1	t1, $f29
231	mtc1	t1, $f30
232	mtc1	t1, $f31
233#else
234	.set	mips3
235	dmtc1	t1, $f0
236	dmtc1	t1, $f2
237	dmtc1	t1, $f4
238	dmtc1	t1, $f6
239	dmtc1	t1, $f8
240	dmtc1	t1, $f10
241	dmtc1	t1, $f12
242	dmtc1	t1, $f14
243	dmtc1	t1, $f16
244	dmtc1	t1, $f18
245	dmtc1	t1, $f20
246	dmtc1	t1, $f22
247	dmtc1	t1, $f24
248	dmtc1	t1, $f26
249	dmtc1	t1, $f28
250	dmtc1	t1, $f30
251#endif
252	jr	ra
253	END(_init_fpu)
254