xref: /linux/arch/powerpc/platforms/52xx/lite5200_sleep.S (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3
4#include <asm/reg.h>
5#include <asm/ppc_asm.h>
6#include <asm/processor.h>
7#include <asm/cache.h>
8
9
10#define SDRAM_CTRL	0x104
11#define SC_MODE_EN	(1<<31)
12#define SC_CKE		(1<<30)
13#define SC_REF_EN	(1<<28)
14#define SC_SOFT_PRE	(1<<1)
15
16#define GPIOW_GPIOE	0xc00
17#define GPIOW_DDR	0xc08
18#define GPIOW_DVO	0xc0c
19
20#define CDM_CE		0x214
21#define CDM_SDRAM	(1<<3)
22
23
24/* helpers... beware: r10 and r4 are overwritten */
25#define SAVE_SPRN(reg, addr)		\
26	mfspr	r10, SPRN_##reg;	\
27	stw	r10, ((addr)*4)(r4);
28
29#define LOAD_SPRN(reg, addr)		\
30	lwz	r10, ((addr)*4)(r4);	\
31	mtspr	SPRN_##reg, r10;	\
32	sync;				\
33	isync;
34
35
36	.data
37registers:
38	.space 0x5c*4
39	.text
40
41/* ---------------------------------------------------------------------- */
42/* low-power mode with help of M68HLC908QT1 */
43
44	.globl lite5200_low_power
45lite5200_low_power:
46
47	mr	r7, r3	/* save SRAM va */
48	mr	r8, r4	/* save MBAR va */
49
50	/* setup wakeup address for u-boot at physical location 0x0 */
51	lis	r3, CONFIG_KERNEL_START@h
52	lis	r4, lite5200_wakeup@h
53	ori	r4, r4, lite5200_wakeup@l
54	sub	r4, r4, r3
55	stw	r4, 0(r3)
56
57
58	/*
59	 * save stuff BDI overwrites
60	 * 0xf0 (0xe0->0x100 gets overwritten when BDI connected;
61	 *   even when CONFIG_BDI_SWITCH is disabled and MMU XLAT commented; heisenbug?))
62	 * WARNING: self-refresh doesn't seem to work when BDI2000 is connected,
63	 *   possibly because BDI sets SDRAM registers before wakeup code does
64	 */
65	lis	r4, registers@h
66	ori	r4, r4, registers@l
67	lwz	r10, 0xf0(r3)
68	stw	r10, (0x1d*4)(r4)
69
70	/* save registers to r4 [destroys r10] */
71	SAVE_SPRN(LR, 0x1c)
72	bl	save_regs
73
74	/* flush caches [destroys r3, r4] */
75	bl	flush_data_cache
76
77
78	/* copy code to sram */
79	mr	r4, r7
80	li	r3, (sram_code_end - sram_code)/4
81	mtctr	r3
82	lis	r3, sram_code@h
83	ori	r3, r3, sram_code@l
841:
85	lwz	r5, 0(r3)
86	stw	r5, 0(r4)
87	addi	r3, r3, 4
88	addi	r4, r4, 4
89	bdnz	1b
90
91	/* get tb_ticks_per_usec */
92	lis	r3, tb_ticks_per_usec@h
93	lwz	r11, tb_ticks_per_usec@l(r3)
94
95	/* disable I and D caches */
96	mfspr	r3, SPRN_HID0
97	ori	r3, r3, HID0_ICE | HID0_DCE
98	xori	r3, r3, HID0_ICE | HID0_DCE
99	sync; isync;
100	mtspr	SPRN_HID0, r3
101	sync; isync;
102
103	/* jump to sram */
104	mtlr	r7
105	blrl
106	/* doesn't return */
107
108
109sram_code:
110	/* self refresh */
111	lwz	r4, SDRAM_CTRL(r8)
112
113	/* send NOP (precharge) */
114	oris	r4, r4, SC_MODE_EN@h	/* mode_en */
115	stw	r4, SDRAM_CTRL(r8)
116	sync
117
118	ori	r4, r4, SC_SOFT_PRE	/* soft_pre */
119	stw	r4, SDRAM_CTRL(r8)
120	sync
121	xori	r4, r4, SC_SOFT_PRE
122
123	xoris	r4, r4, SC_MODE_EN@h	/* !mode_en */
124	stw	r4, SDRAM_CTRL(r8)
125	sync
126
127	/* delay (for NOP to finish) */
128	li	r12, 1
129	bl	udelay
130
131	/*
132	 * mode_en must not be set when enabling self-refresh
133	 * send AR with CKE low (self-refresh)
134	 */
135	oris	r4, r4, (SC_REF_EN | SC_CKE)@h
136	xoris	r4, r4, (SC_CKE)@h	/* ref_en !cke */
137	stw	r4, SDRAM_CTRL(r8)
138	sync
139
140	/* delay (after !CKE there should be two cycles) */
141	li	r12, 1
142	bl	udelay
143
144	/* disable clock */
145	lwz	r4, CDM_CE(r8)
146	ori	r4, r4, CDM_SDRAM
147	xori	r4, r4, CDM_SDRAM
148	stw	r4, CDM_CE(r8)
149	sync
150
151	/* delay a bit */
152	li	r12, 1
153	bl	udelay
154
155
156	/* turn off with QT chip */
157	li	r4, 0x02
158	stb	r4, GPIOW_GPIOE(r8)	/* enable gpio_wkup1 */
159	sync
160
161	stb	r4, GPIOW_DVO(r8)	/* "output" high */
162	sync
163	stb	r4, GPIOW_DDR(r8)	/* output */
164	sync
165	stb	r4, GPIOW_DVO(r8)	/* output high */
166	sync
167
168	/* 10uS delay */
169	li	r12, 10
170	bl	udelay
171
172	/* turn off */
173	li	r4, 0
174	stb	r4, GPIOW_DVO(r8)	/* output low */
175	sync
176
177	/* wait until we're offline */
178  1:
179	b	1b
180
181
182	/* local udelay in sram is needed */
183SYM_FUNC_START_LOCAL(udelay)
184	/* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
185	mullw	r12, r12, r11
186	mftb	r13	/* start */
187	add	r12, r13, r12 /* end */
188    1:
189	mftb	r13	/* current */
190	cmp	cr0, r13, r12
191	blt	1b
192	blr
193SYM_FUNC_END(udelay)
194
195sram_code_end:
196
197
198
199/* uboot jumps here on resume */
200lite5200_wakeup:
201	bl	restore_regs
202
203
204	/* HIDs, MSR */
205	LOAD_SPRN(HID1, 0x19)
206	/* FIXME: Should this use HID2_G2_LE? */
207	LOAD_SPRN(HID2_750FX, 0x1a)
208
209
210	/* address translation is tricky (see turn_on_mmu) */
211	mfmsr	r10
212	ori	r10, r10, MSR_DR | MSR_IR
213
214
215	mtspr	SPRN_SRR1, r10
216	lis	r10, mmu_on@h
217	ori	r10, r10, mmu_on@l
218	mtspr	SPRN_SRR0, r10
219	sync
220	rfi
221mmu_on:
222	/* kernel offset (r4 is still set from restore_registers) */
223	addis	r4, r4, CONFIG_KERNEL_START@h
224
225
226	/* restore MSR */
227	lwz	r10, (4*0x1b)(r4)
228	mtmsr	r10
229	sync; isync;
230
231	/* invalidate caches */
232	mfspr	r10, SPRN_HID0
233	ori	r5, r10, HID0_ICFI | HID0_DCI
234	mtspr	SPRN_HID0, r5	/* invalidate caches */
235	sync; isync;
236	mtspr	SPRN_HID0, r10
237	sync; isync;
238
239	/* enable caches */
240	lwz	r10, (4*0x18)(r4)
241	mtspr	SPRN_HID0, r10	/* restore (enable caches, DPM) */
242	/* ^ this has to be after address translation set in MSR */
243	sync
244	isync
245
246
247	/* restore 0xf0 (BDI2000) */
248	lis	r3, CONFIG_KERNEL_START@h
249	lwz	r10, (0x1d*4)(r4)
250	stw	r10, 0xf0(r3)
251
252	LOAD_SPRN(LR, 0x1c)
253
254
255	blr
256_ASM_NOKPROBE_SYMBOL(lite5200_wakeup)
257
258
259/* ---------------------------------------------------------------------- */
260/* boring code: helpers */
261
262/* save registers */
263#define SAVE_BAT(n, addr)		\
264	SAVE_SPRN(DBAT##n##L, addr);	\
265	SAVE_SPRN(DBAT##n##U, addr+1);	\
266	SAVE_SPRN(IBAT##n##L, addr+2);	\
267	SAVE_SPRN(IBAT##n##U, addr+3);
268
269#define SAVE_SR(n, addr)		\
270	mfsr	r10, n;			\
271	stw	r10, ((addr)*4)(r4);
272
273#define SAVE_4SR(n, addr)	\
274	SAVE_SR(n, addr);	\
275	SAVE_SR(n+1, addr+1);	\
276	SAVE_SR(n+2, addr+2);	\
277	SAVE_SR(n+3, addr+3);
278
279SYM_FUNC_START_LOCAL(save_regs)
280	stw	r0, 0(r4)
281	stw	r1, 0x4(r4)
282	stw	r2, 0x8(r4)
283	stmw	r11, 0xc(r4) /* 0xc -> 0x5f, (0x18*4-1) */
284
285	SAVE_SPRN(HID0, 0x18)
286	SAVE_SPRN(HID1, 0x19)
287	/* FIXME: Should this use HID2_G2_LE? */
288	SAVE_SPRN(HID2_750FX, 0x1a)
289	mfmsr	r10
290	stw	r10, (4*0x1b)(r4)
291	/*SAVE_SPRN(LR, 0x1c) have to save it before the call */
292	/* 0x1d reserved by 0xf0 */
293	SAVE_SPRN(RPA,   0x1e)
294	SAVE_SPRN(SDR1,  0x1f)
295
296	/* save MMU regs */
297	SAVE_BAT(0, 0x20)
298	SAVE_BAT(1, 0x24)
299	SAVE_BAT(2, 0x28)
300	SAVE_BAT(3, 0x2c)
301	SAVE_BAT(4, 0x30)
302	SAVE_BAT(5, 0x34)
303	SAVE_BAT(6, 0x38)
304	SAVE_BAT(7, 0x3c)
305
306	SAVE_4SR(0, 0x40)
307	SAVE_4SR(4, 0x44)
308	SAVE_4SR(8, 0x48)
309	SAVE_4SR(12, 0x4c)
310
311	SAVE_SPRN(SPRG0, 0x50)
312	SAVE_SPRN(SPRG1, 0x51)
313	SAVE_SPRN(SPRG2, 0x52)
314	SAVE_SPRN(SPRG3, 0x53)
315	SAVE_SPRN(SPRG4, 0x54)
316	SAVE_SPRN(SPRG5, 0x55)
317	SAVE_SPRN(SPRG6, 0x56)
318	SAVE_SPRN(SPRG7, 0x57)
319
320	SAVE_SPRN(IABR,  0x58)
321	SAVE_SPRN(DABR,  0x59)
322	SAVE_SPRN(TBRL,  0x5a)
323	SAVE_SPRN(TBRU,  0x5b)
324
325	blr
326SYM_FUNC_END(save_regs)
327
328
329/* restore registers */
330#define LOAD_BAT(n, addr)		\
331	LOAD_SPRN(DBAT##n##L, addr);	\
332	LOAD_SPRN(DBAT##n##U, addr+1);	\
333	LOAD_SPRN(IBAT##n##L, addr+2);	\
334	LOAD_SPRN(IBAT##n##U, addr+3);
335
336#define LOAD_SR(n, addr)		\
337	lwz	r10, ((addr)*4)(r4);	\
338	mtsr	n, r10;
339
340#define LOAD_4SR(n, addr)	\
341	LOAD_SR(n, addr);	\
342	LOAD_SR(n+1, addr+1);	\
343	LOAD_SR(n+2, addr+2);	\
344	LOAD_SR(n+3, addr+3);
345
346SYM_FUNC_START_LOCAL(restore_regs)
347	lis	r4, registers@h
348	ori	r4, r4, registers@l
349
350	/* MMU is not up yet */
351	subis	r4, r4, CONFIG_KERNEL_START@h
352
353	lwz	r0, 0(r4)
354	lwz	r1, 0x4(r4)
355	lwz	r2, 0x8(r4)
356	lmw	r11, 0xc(r4)
357
358	/*
359	 * these are a bit tricky
360	 *
361	 * 0x18 - HID0
362	 * 0x19 - HID1
363	 * 0x1a - HID2
364	 * 0x1b - MSR
365	 * 0x1c - LR
366	 * 0x1d - reserved by 0xf0 (BDI2000)
367	 */
368	LOAD_SPRN(RPA,   0x1e);
369	LOAD_SPRN(SDR1,  0x1f);
370
371	/* restore MMU regs */
372	LOAD_BAT(0, 0x20)
373	LOAD_BAT(1, 0x24)
374	LOAD_BAT(2, 0x28)
375	LOAD_BAT(3, 0x2c)
376	LOAD_BAT(4, 0x30)
377	LOAD_BAT(5, 0x34)
378	LOAD_BAT(6, 0x38)
379	LOAD_BAT(7, 0x3c)
380
381	LOAD_4SR(0, 0x40)
382	LOAD_4SR(4, 0x44)
383	LOAD_4SR(8, 0x48)
384	LOAD_4SR(12, 0x4c)
385
386	/* rest of regs */
387	LOAD_SPRN(SPRG0, 0x50);
388	LOAD_SPRN(SPRG1, 0x51);
389	LOAD_SPRN(SPRG2, 0x52);
390	LOAD_SPRN(SPRG3, 0x53);
391	LOAD_SPRN(SPRG4, 0x54);
392	LOAD_SPRN(SPRG5, 0x55);
393	LOAD_SPRN(SPRG6, 0x56);
394	LOAD_SPRN(SPRG7, 0x57);
395
396	LOAD_SPRN(IABR,  0x58);
397	LOAD_SPRN(DABR,  0x59);
398	LOAD_SPRN(TBWL,  0x5a);	/* these two have separate R/W regs */
399	LOAD_SPRN(TBWU,  0x5b);
400
401	blr
402_ASM_NOKPROBE_SYMBOL(restore_regs)
403SYM_FUNC_END(restore_regs)
404
405
406
407/* cache flushing code. copied from arch/ppc/boot/util.S */
408#define NUM_CACHE_LINES (128*8)
409
410/*
411 * Flush data cache
412 * Do this by just reading lots of stuff into the cache.
413 */
414SYM_FUNC_START_LOCAL(flush_data_cache)
415	lis	r3,CONFIG_KERNEL_START@h
416	ori	r3,r3,CONFIG_KERNEL_START@l
417	li	r4,NUM_CACHE_LINES
418	mtctr	r4
4191:
420	lwz	r4,0(r3)
421	addi	r3,r3,L1_CACHE_BYTES	/* Next line, please */
422	bdnz	1b
423	blr
424SYM_FUNC_END(flush_data_cache)
425