vector.S (05668381140309088443bf5dc53add4104610fbb) vector.S (e821ea70f3b4873b50056a1e0f74befed1014c09)
1#include <asm/processor.h>
1#include <asm/ppc_asm.h>
2#include <asm/reg.h>
2#include <asm/ppc_asm.h>
3#include <asm/reg.h>
4#include <asm/asm-offsets.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/page.h>
3
4/*
8
9/*
10 * load_up_altivec(unused, unused, tsk)
11 * Disable VMX for the task which had it previously,
12 * and save its vector registers in its thread_struct.
13 * Enables the VMX for use in the kernel on return.
14 * On SMP we know the VMX is free, since we give it up every
15 * switch (ie, no lazy save of the vector registers).
16 */
17_GLOBAL(load_up_altivec)
18 mfmsr r5 /* grab the current MSR */
19 oris r5,r5,MSR_VEC@h
20 MTMSRD(r5) /* enable use of AltiVec now */
21 isync
22
23/*
24 * For SMP, we don't do lazy VMX switching because it just gets too
25 * horrendously complex, especially when a task switches from one CPU
26 * to another. Instead we call giveup_altvec in switch_to.
27 * VRSAVE isn't dealt with here, that is done in the normal context
28 * switch code. Note that we could rely on vrsave value to eventually
29 * avoid saving all of the VREGs here...
30 */
31#ifndef CONFIG_SMP
32 LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
33 toreal(r3)
34 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
35 PPC_LCMPI 0,r4,0
36 beq 1f
37
38 /* Save VMX state to last_task_used_altivec's THREAD struct */
39 toreal(r4)
40 addi r4,r4,THREAD
41 SAVE_32VRS(0,r5,r4)
42 mfvscr vr0
43 li r10,THREAD_VSCR
44 stvx vr0,r10,r4
45 /* Disable VMX for last_task_used_altivec */
46 PPC_LL r5,PT_REGS(r4)
47 toreal(r5)
48 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
49 lis r10,MSR_VEC@h
50 andc r4,r4,r10
51 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
521:
53#endif /* CONFIG_SMP */
54
55 /* Hack: if we get an altivec unavailable trap with VRSAVE
56 * set to all zeros, we assume this is a broken application
57 * that fails to set it properly, and thus we switch it to
58 * all 1's
59 */
60 mfspr r4,SPRN_VRSAVE
61 cmpdi 0,r4,0
62 bne+ 1f
63 li r4,-1
64 mtspr SPRN_VRSAVE,r4
651:
66 /* enable use of VMX after return */
67#ifdef CONFIG_PPC32
68 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
69 oris r9,r9,MSR_VEC@h
70#else
71 ld r4,PACACURRENT(r13)
72 addi r5,r4,THREAD /* Get THREAD */
73 oris r12,r12,MSR_VEC@h
74 std r12,_MSR(r1)
75#endif
76 li r4,1
77 li r10,THREAD_VSCR
78 stw r4,THREAD_USED_VR(r5)
79 lvx vr0,r10,r5
80 mtvscr vr0
81 REST_32VRS(0,r4,r5)
82#ifndef CONFIG_SMP
83 /* Update last_task_used_math to 'current' */
84 subi r4,r5,THREAD /* Back to 'current' */
85 fromreal(r4)
86 PPC_STL r4,ADDROFF(last_task_used_math)(r3)
87#endif /* CONFIG_SMP */
88 /* restore registers and return */
89 blr
90
91/*
92 * giveup_altivec(tsk)
93 * Disable VMX for the task given as the argument,
94 * and save the vector registers in its thread_struct.
95 * Enables the VMX for use in the kernel on return.
96 */
97_GLOBAL(giveup_altivec)
98 mfmsr r5
99 oris r5,r5,MSR_VEC@h
100 SYNC
101 MTMSRD(r5) /* enable use of VMX now */
102 isync
103 PPC_LCMPI 0,r3,0
104 beqlr- /* if no previous owner, done */
105 addi r3,r3,THREAD /* want THREAD of task */
106 PPC_LL r5,PT_REGS(r3)
107 PPC_LCMPI 0,r5,0
108 SAVE_32VRS(0,r4,r3)
109 mfvscr vr0
110 li r4,THREAD_VSCR
111 stvx vr0,r4,r3
112 beq 1f
113 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
114#ifdef CONFIG_VSX
115BEGIN_FTR_SECTION
116 lis r3,(MSR_VEC|MSR_VSX)@h
117FTR_SECTION_ELSE
118 lis r3,MSR_VEC@h
119ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
120#else
121 lis r3,MSR_VEC@h
122#endif
123 andc r4,r4,r3 /* disable FP for previous task */
124 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1251:
126#ifndef CONFIG_SMP
127 li r5,0
128 LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
129 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
130#endif /* CONFIG_SMP */
131 blr
132
133#ifdef CONFIG_VSX
134
135#ifdef CONFIG_PPC32
136#error This asm code isn't ready for 32-bit kernels
137#endif
138
139/*
140 * load_up_vsx(unused, unused, tsk)
141 * Disable VSX for the task which had it previously,
142 * and save its vector registers in its thread_struct.
143 * Reuse the fp and vsx saves, but first check to see if they have
144 * been saved already.
145 */
146_GLOBAL(load_up_vsx)
147/* Load FP and VSX registers if they haven't been done yet */
148 andi. r5,r12,MSR_FP
149 beql+ load_up_fpu /* skip if already loaded */
150 andis. r5,r12,MSR_VEC@h
151 beql+ load_up_altivec /* skip if already loaded */
152
153#ifndef CONFIG_SMP
154 ld r3,last_task_used_vsx@got(r2)
155 ld r4,0(r3)
156 cmpdi 0,r4,0
157 beq 1f
158 /* Disable VSX for last_task_used_vsx */
159 addi r4,r4,THREAD
160 ld r5,PT_REGS(r4)
161 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
162 lis r6,MSR_VSX@h
163 andc r6,r4,r6
164 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1651:
166#endif /* CONFIG_SMP */
167 ld r4,PACACURRENT(r13)
168 addi r4,r4,THREAD /* Get THREAD */
169 li r6,1
170 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
171 /* enable use of VSX after return */
172 oris r12,r12,MSR_VSX@h
173 std r12,_MSR(r1)
174#ifndef CONFIG_SMP
175 /* Update last_task_used_math to 'current' */
176 ld r4,PACACURRENT(r13)
177 std r4,0(r3)
178#endif /* CONFIG_SMP */
179 b fast_exception_return
180
181/*
182 * __giveup_vsx(tsk)
183 * Disable VSX for the task given as the argument.
184 * Does NOT save vsx registers.
185 * Enables the VSX for use in the kernel on return.
186 */
187_GLOBAL(__giveup_vsx)
188 mfmsr r5
189 oris r5,r5,MSR_VSX@h
190 mtmsrd r5 /* enable use of VSX now */
191 isync
192
193 cmpdi 0,r3,0
194 beqlr- /* if no previous owner, done */
195 addi r3,r3,THREAD /* want THREAD of task */
196 ld r5,PT_REGS(r3)
197 cmpdi 0,r5,0
198 beq 1f
199 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
200 lis r3,MSR_VSX@h
201 andc r4,r4,r3 /* disable VSX for previous task */
202 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2031:
204#ifndef CONFIG_SMP
205 li r5,0
206 ld r4,last_task_used_vsx@got(r2)
207 std r5,0(r4)
208#endif /* CONFIG_SMP */
209 blr
210
211#endif /* CONFIG_VSX */
212
213
214/*
5 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called
7 * with preempt disabled.
8 */
9#ifdef CONFIG_PPC32
10 .data
11fpzero:
12 .long 0

--- 184 unchanged lines hidden ---
215 * The routines below are in assembler so we can closely control the
216 * usage of floating-point registers. These routines must be called
217 * with preempt disabled.
218 */
219#ifdef CONFIG_PPC32
220 .data
221fpzero:
222 .long 0

--- 184 unchanged lines hidden ---