xref: /linux/arch/powerpc/kernel/swsusp_32.S (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1#include <linux/config.h>
2#include <linux/threads.h>
3#include <asm/processor.h>
4#include <asm/page.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/ppc_asm.h>
8#include <asm/asm-offsets.h>
9
10
11/*
12 * Structure for storing CPU registers on the save area.
13 */
14#define SL_SP		0
15#define SL_PC		4
16#define SL_MSR		8
17#define SL_SDR1		0xc
18#define SL_SPRG0	0x10	/* 4 sprg's */
19#define SL_DBAT0	0x20
20#define SL_IBAT0	0x28
21#define SL_DBAT1	0x30
22#define SL_IBAT1	0x38
23#define SL_DBAT2	0x40
24#define SL_IBAT2	0x48
25#define SL_DBAT3	0x50
26#define SL_IBAT3	0x58
27#define SL_TB		0x60
28#define SL_R2		0x68
29#define SL_CR		0x6c
30#define SL_LR		0x70
31#define SL_R12		0x74	/* r12 to r31 */
32#define SL_SIZE		(SL_R12 + 80)
33
34	.section .data
35	.align	5
36
37_GLOBAL(swsusp_save_area)
38	.space	SL_SIZE
39
40
41	.section .text
42	.align	5
43
44_GLOBAL(swsusp_arch_suspend)
45
46	lis	r11,swsusp_save_area@h
47	ori	r11,r11,swsusp_save_area@l
48
49	mflr	r0
50	stw	r0,SL_LR(r11)
51	mfcr	r0
52	stw	r0,SL_CR(r11)
53	stw	r1,SL_SP(r11)
54	stw	r2,SL_R2(r11)
55	stmw	r12,SL_R12(r11)
56
57	/* Save MSR & SDR1 */
58	mfmsr	r4
59	stw	r4,SL_MSR(r11)
60	mfsdr1	r4
61	stw	r4,SL_SDR1(r11)
62
63	/* Get a stable timebase and save it */
641:	mftbu	r4
65	stw	r4,SL_TB(r11)
66	mftb	r5
67	stw	r5,SL_TB+4(r11)
68	mftbu	r3
69	cmpw	r3,r4
70	bne	1b
71
72	/* Save SPRGs */
73	mfsprg	r4,0
74	stw	r4,SL_SPRG0(r11)
75	mfsprg	r4,1
76	stw	r4,SL_SPRG0+4(r11)
77	mfsprg	r4,2
78	stw	r4,SL_SPRG0+8(r11)
79	mfsprg	r4,3
80	stw	r4,SL_SPRG0+12(r11)
81
82	/* Save BATs */
83	mfdbatu	r4,0
84	stw	r4,SL_DBAT0(r11)
85	mfdbatl	r4,0
86	stw	r4,SL_DBAT0+4(r11)
87	mfdbatu	r4,1
88	stw	r4,SL_DBAT1(r11)
89	mfdbatl	r4,1
90	stw	r4,SL_DBAT1+4(r11)
91	mfdbatu	r4,2
92	stw	r4,SL_DBAT2(r11)
93	mfdbatl	r4,2
94	stw	r4,SL_DBAT2+4(r11)
95	mfdbatu	r4,3
96	stw	r4,SL_DBAT3(r11)
97	mfdbatl	r4,3
98	stw	r4,SL_DBAT3+4(r11)
99	mfibatu	r4,0
100	stw	r4,SL_IBAT0(r11)
101	mfibatl	r4,0
102	stw	r4,SL_IBAT0+4(r11)
103	mfibatu	r4,1
104	stw	r4,SL_IBAT1(r11)
105	mfibatl	r4,1
106	stw	r4,SL_IBAT1+4(r11)
107	mfibatu	r4,2
108	stw	r4,SL_IBAT2(r11)
109	mfibatl	r4,2
110	stw	r4,SL_IBAT2+4(r11)
111	mfibatu	r4,3
112	stw	r4,SL_IBAT3(r11)
113	mfibatl	r4,3
114	stw	r4,SL_IBAT3+4(r11)
115
116#if  0
117	/* Backup various CPU config stuffs */
118	bl	__save_cpu_setup
119#endif
120	/* Call the low level suspend stuff (we should probably have made
121	 * a stackframe...
122	 */
123	bl	swsusp_save
124
125	/* Restore LR from the save area */
126	lis	r11,swsusp_save_area@h
127	ori	r11,r11,swsusp_save_area@l
128	lwz	r0,SL_LR(r11)
129	mtlr	r0
130
131	blr
132
133
134/* Resume code */
135_GLOBAL(swsusp_arch_resume)
136
137	/* Stop pending alitvec streams and memory accesses */
138BEGIN_FTR_SECTION
139	DSSALL
140END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141 	sync
142
143	/* Disable MSR:DR to make sure we don't take a TLB or
144	 * hash miss during the copy, as our hash table will
145	 * for a while be unuseable. For .text, we assume we are
146	 * covered by a BAT. This works only for non-G5 at this
147	 * point. G5 will need a better approach, possibly using
148	 * a small temporary hash table filled with large mappings,
149	 * disabling the MMU completely isn't a good option for
150	 * performance reasons.
151	 * (Note that 750's may have the same performance issue as
152	 * the G5 in this case, we should investigate using moving
153	 * BATs for these CPUs)
154	 */
155	mfmsr	r0
156	sync
157	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
158	mtmsr	r0
159	sync
160	isync
161
162	/* Load ptr the list of pages to copy in r3 */
163	lis	r11,(pagedir_nosave - KERNELBASE)@h
164	ori	r11,r11,pagedir_nosave@l
165	lwz	r10,0(r11)
166
167	/* Copy the pages. This is a very basic implementation, to
168	 * be replaced by something more cache efficient */
1691:
170	tophys(r3,r10)
171	li	r0,256
172	mtctr	r0
173	lwz	r11,pbe_address(r3)	/* source */
174	tophys(r5,r11)
175	lwz	r10,pbe_orig_address(r3)	/* destination */
176	tophys(r6,r10)
1772:
178	lwz	r8,0(r5)
179	lwz	r9,4(r5)
180	lwz	r10,8(r5)
181	lwz	r11,12(r5)
182	addi	r5,r5,16
183	stw	r8,0(r6)
184	stw	r9,4(r6)
185	stw	r10,8(r6)
186	stw	r11,12(r6)
187	addi	r6,r6,16
188	bdnz	2b
189	lwz		r10,pbe_next(r3)
190	cmpwi	0,r10,0
191	bne	1b
192
193	/* Do a very simple cache flush/inval of the L1 to ensure
194	 * coherency of the icache
195	 */
196	lis	r3,0x0002
197	mtctr	r3
198	li	r3, 0
1991:
200	lwz	r0,0(r3)
201	addi	r3,r3,0x0020
202	bdnz	1b
203	isync
204	sync
205
206	/* Now flush those cache lines */
207	lis	r3,0x0002
208	mtctr	r3
209	li	r3, 0
2101:
211	dcbf	0,r3
212	addi	r3,r3,0x0020
213	bdnz	1b
214	sync
215
216	/* Ok, we are now running with the kernel data of the old
217	 * kernel fully restored. We can get to the save area
218	 * easily now. As for the rest of the code, it assumes the
219	 * loader kernel and the booted one are exactly identical
220	 */
221	lis	r11,swsusp_save_area@h
222	ori	r11,r11,swsusp_save_area@l
223	tophys(r11,r11)
224
225#if 0
226	/* Restore various CPU config stuffs */
227	bl	__restore_cpu_setup
228#endif
229	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
230	 * This is a bit hairy as we are running out of those BATs,
231	 * but first, our code is probably in the icache, and we are
232	 * writing the same value to the BAT, so that should be fine,
233	 * though a better solution will have to be found long-term
234	 */
235	lwz	r4,SL_SDR1(r11)
236	mtsdr1	r4
237	lwz	r4,SL_SPRG0(r11)
238	mtsprg	0,r4
239	lwz	r4,SL_SPRG0+4(r11)
240	mtsprg	1,r4
241	lwz	r4,SL_SPRG0+8(r11)
242	mtsprg	2,r4
243	lwz	r4,SL_SPRG0+12(r11)
244	mtsprg	3,r4
245
246#if 0
247	lwz	r4,SL_DBAT0(r11)
248	mtdbatu	0,r4
249	lwz	r4,SL_DBAT0+4(r11)
250	mtdbatl	0,r4
251	lwz	r4,SL_DBAT1(r11)
252	mtdbatu	1,r4
253	lwz	r4,SL_DBAT1+4(r11)
254	mtdbatl	1,r4
255	lwz	r4,SL_DBAT2(r11)
256	mtdbatu	2,r4
257	lwz	r4,SL_DBAT2+4(r11)
258	mtdbatl	2,r4
259	lwz	r4,SL_DBAT3(r11)
260	mtdbatu	3,r4
261	lwz	r4,SL_DBAT3+4(r11)
262	mtdbatl	3,r4
263	lwz	r4,SL_IBAT0(r11)
264	mtibatu	0,r4
265	lwz	r4,SL_IBAT0+4(r11)
266	mtibatl	0,r4
267	lwz	r4,SL_IBAT1(r11)
268	mtibatu	1,r4
269	lwz	r4,SL_IBAT1+4(r11)
270	mtibatl	1,r4
271	lwz	r4,SL_IBAT2(r11)
272	mtibatu	2,r4
273	lwz	r4,SL_IBAT2+4(r11)
274	mtibatl	2,r4
275	lwz	r4,SL_IBAT3(r11)
276	mtibatu	3,r4
277	lwz	r4,SL_IBAT3+4(r11)
278	mtibatl	3,r4
279#endif
280
281BEGIN_FTR_SECTION
282	li	r4,0
283	mtspr	SPRN_DBAT4U,r4
284	mtspr	SPRN_DBAT4L,r4
285	mtspr	SPRN_DBAT5U,r4
286	mtspr	SPRN_DBAT5L,r4
287	mtspr	SPRN_DBAT6U,r4
288	mtspr	SPRN_DBAT6L,r4
289	mtspr	SPRN_DBAT7U,r4
290	mtspr	SPRN_DBAT7L,r4
291	mtspr	SPRN_IBAT4U,r4
292	mtspr	SPRN_IBAT4L,r4
293	mtspr	SPRN_IBAT5U,r4
294	mtspr	SPRN_IBAT5L,r4
295	mtspr	SPRN_IBAT6U,r4
296	mtspr	SPRN_IBAT6L,r4
297	mtspr	SPRN_IBAT7U,r4
298	mtspr	SPRN_IBAT7L,r4
299END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
300
301	/* Flush all TLBs */
302	lis	r4,0x1000
3031:	addic.	r4,r4,-0x1000
304	tlbie	r4
305	blt	1b
306	sync
307
308	/* restore the MSR and turn on the MMU */
309	lwz	r3,SL_MSR(r11)
310	bl	turn_on_mmu
311	tovirt(r11,r11)
312
313	/* Restore TB */
314	li	r3,0
315	mttbl	r3
316	lwz	r3,SL_TB(r11)
317	lwz	r4,SL_TB+4(r11)
318	mttbu	r3
319	mttbl	r4
320
321	/* Kick decrementer */
322	li	r0,1
323	mtdec	r0
324
325	/* Restore the callee-saved registers and return */
326	lwz	r0,SL_CR(r11)
327	mtcr	r0
328	lwz	r2,SL_R2(r11)
329	lmw	r12,SL_R12(r11)
330	lwz	r1,SL_SP(r11)
331	lwz	r0,SL_LR(r11)
332	mtlr	r0
333
334	// XXX Note: we don't really need to call swsusp_resume
335
336	li	r3,0
337	blr
338
339/* FIXME:This construct is actually not useful since we don't shut
340 * down the instruction MMU, we could just flip back MSR-DR on.
341 */
342turn_on_mmu:
343	mflr	r4
344	mtsrr0	r4
345	mtsrr1	r3
346	sync
347	isync
348	rfi
349
350