xref: /linux/arch/powerpc/kernel/idle_6xx.S (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1/*
2 *  This file contains the power_save function for 6xx & 7xxx CPUs
3 *  rewritten in assembler
4 *
5 *  Warning ! This code assumes that if your machine has a 750fx
6 *  it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 *  if this is not the case some additional changes will have to
8 *  be done to check a runtime var (a bit like powersave-nap)
9 *
10 *  This program is free software; you can redistribute it and/or
11 *  modify it under the terms of the GNU General Public License
12 *  as published by the Free Software Foundation; either version
13 *  2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/threads.h>
17#include <asm/reg.h>
18#include <asm/page.h>
19#include <asm/cputable.h>
20#include <asm/thread_info.h>
21#include <asm/ppc_asm.h>
22#include <asm/asm-offsets.h>
23#include <asm/feature-fixups.h>
24
25	.text
26
27/*
28 * Init idle, called at early CPU setup time from head.S for each CPU
29 * Make sure no rest of NAP mode remains in HID0, save default
30 * values for some CPU specific registers. Called with r24
31 * containing CPU number and r3 reloc offset
32 */
33_GLOBAL(init_idle_6xx)
34BEGIN_FTR_SECTION
35	mfspr	r4,SPRN_HID0
36	rlwinm	r4,r4,0,10,8	/* Clear NAP */
37	mtspr	SPRN_HID0, r4
38	b	1f
39END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
40	blr
411:
42	slwi	r5,r24,2
43	add	r5,r5,r3
44BEGIN_FTR_SECTION
45	mfspr	r4,SPRN_MSSCR0
46	addis	r6,r5, nap_save_msscr0@ha
47	stw	r4,nap_save_msscr0@l(r6)
48END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
49BEGIN_FTR_SECTION
50	mfspr	r4,SPRN_HID1
51	addis	r6,r5,nap_save_hid1@ha
52	stw	r4,nap_save_hid1@l(r6)
53END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
54	blr
55
56/*
57 * Here is the power_save_6xx function. This could eventually be
58 * split into several functions & changing the function pointer
59 * depending on the various features.
60 */
61_GLOBAL(ppc6xx_idle)
62	/* Check if we can nap or doze, put HID0 mask in r3
63	 */
64	lis	r3, 0
65BEGIN_FTR_SECTION
66	lis	r3,HID0_DOZE@h
67END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
68BEGIN_FTR_SECTION
69	/* We must dynamically check for the NAP feature as it
70	 * can be cleared by CPU init after the fixups are done
71	 */
72	lis	r4,cur_cpu_spec@ha
73	lwz	r4,cur_cpu_spec@l(r4)
74	lwz	r4,CPU_SPEC_FEATURES(r4)
75	andi.	r0,r4,CPU_FTR_CAN_NAP
76	beq	1f
77	/* Now check if user or arch enabled NAP mode */
78	lis	r4,powersave_nap@ha
79	lwz	r4,powersave_nap@l(r4)
80	cmpwi	0,r4,0
81	beq	1f
82	lis	r3,HID0_NAP@h
831:
84END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
85	cmpwi	0,r3,0
86	beqlr
87
88	/* Some pre-nap cleanups needed on some CPUs */
89	andis.	r0,r3,HID0_NAP@h
90	beq	2f
91BEGIN_FTR_SECTION
92	/* Disable L2 prefetch on some 745x and try to ensure
93	 * L2 prefetch engines are idle. As explained by errata
94	 * text, we can't be sure they are, we just hope very hard
95	 * that well be enough (sic !). At least I noticed Apple
96	 * doesn't even bother doing the dcbf's here...
97	 */
98	mfspr	r4,SPRN_MSSCR0
99	rlwinm	r4,r4,0,0,29
100	sync
101	mtspr	SPRN_MSSCR0,r4
102	sync
103	isync
104	lis	r4,KERNELBASE@h
105	dcbf	0,r4
106	dcbf	0,r4
107	dcbf	0,r4
108	dcbf	0,r4
109END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
1102:
111BEGIN_FTR_SECTION
112	/* Go to low speed mode on some 750FX */
113	lis	r4,powersave_lowspeed@ha
114	lwz	r4,powersave_lowspeed@l(r4)
115	cmpwi	0,r4,0
116	beq	1f
117	mfspr	r4,SPRN_HID1
118	oris	r4,r4,0x0001
119	mtspr	SPRN_HID1,r4
1201:
121END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
122
123	/* Go to NAP or DOZE now */
124	mfspr	r4,SPRN_HID0
125	lis	r5,(HID0_NAP|HID0_SLEEP)@h
126BEGIN_FTR_SECTION
127	oris	r5,r5,HID0_DOZE@h
128END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
129	andc	r4,r4,r5
130	or	r4,r4,r3
131BEGIN_FTR_SECTION
132	oris	r4,r4,HID0_DPM@h	/* that should be done once for all  */
133END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
134	mtspr	SPRN_HID0,r4
135BEGIN_FTR_SECTION
136	DSSALL
137	sync
138END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
139	lwz	r8,TI_LOCAL_FLAGS(r2)	/* set napping bit */
140	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
141	stw	r8,TI_LOCAL_FLAGS(r2)	/* it will return to our caller */
142	mfmsr	r7
143	ori	r7,r7,MSR_EE
144	oris	r7,r7,MSR_POW@h
1451:	sync
146	mtmsr	r7
147	isync
148	b	1b
149
150/*
151 * Return from NAP/DOZE mode, restore some CPU specific registers,
152 * we are called with DR/IR still off and r2 containing physical
153 * address of current.  R11 points to the exception frame (physical
154 * address).  We have to preserve r10.
155 */
156_GLOBAL(power_save_ppc32_restore)
157	lwz	r9,_LINK(r11)		/* interrupted in ppc6xx_idle: */
158	stw	r9,_NIP(r11)		/* make it do a blr */
159
160#ifdef CONFIG_SMP
161	lwz	r11,TASK_CPU(r2)	/* get cpu number * 4 */
162	slwi	r11,r11,2
163#else
164	li	r11,0
165#endif
166	/* Todo make sure all these are in the same page
167	 * and load r11 (@ha part + CPU offset) only once
168	 */
169BEGIN_FTR_SECTION
170	mfspr	r9,SPRN_HID0
171	andis.	r9,r9,HID0_NAP@h
172	beq	1f
173	addis	r9,r11,(nap_save_msscr0-KERNELBASE)@ha
174	lwz	r9,nap_save_msscr0@l(r9)
175	mtspr	SPRN_MSSCR0, r9
176	sync
177	isync
1781:
179END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
180BEGIN_FTR_SECTION
181	addis	r9,r11,(nap_save_hid1-KERNELBASE)@ha
182	lwz	r9,nap_save_hid1@l(r9)
183	mtspr	SPRN_HID1, r9
184END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
185	b	transfer_to_handler_cont
186
187	.data
188
189_GLOBAL(nap_save_msscr0)
190	.space	4*NR_CPUS
191
192_GLOBAL(nap_save_hid1)
193	.space	4*NR_CPUS
194
195_GLOBAL(powersave_lowspeed)
196	.long	0
197