xref: /linux/arch/mips/kernel/idle.c (revision 18f90d372cf35b387663f1567de701e5393f6eb5)
1 /*
2  * MIPS idle loop and WAIT instruction support.
3  *
4  * Copyright (C) xxxx  the Anonymous
5  * Copyright (C) 1994 - 2006 Ralf Baechle
6  * Copyright (C) 2003, 2004  Maciej W. Rozycki
7  * Copyright (C) 2001, 2004, 2011, 2012	 MIPS Technologies, Inc.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 #include <linux/cpu.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/irqflags.h>
18 #include <linux/printk.h>
19 #include <linux/sched.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-info.h>
22 #include <asm/cpu-type.h>
23 #include <asm/idle.h>
24 #include <asm/mipsregs.h>
25 
26 /*
27  * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
28  * the implementation of the "wait" feature differs between CPU families. This
29  * points to the function that implements CPU specific wait.
30  * The wait instruction stops the pipeline and reduces the power consumption of
31  * the CPU very much.
32  */
33 void (*cpu_wait)(void);
34 EXPORT_SYMBOL(cpu_wait);
35 
36 static void __cpuidle r3081_wait(void)
37 {
38 	unsigned long cfg = read_c0_conf();
39 	write_c0_conf(cfg | R30XX_CONF_HALT);
40 	local_irq_enable();
41 }
42 
43 static void __cpuidle r39xx_wait(void)
44 {
45 	if (!need_resched())
46 		write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
47 	local_irq_enable();
48 }
49 
50 void __cpuidle r4k_wait(void)
51 {
52 	local_irq_enable();
53 	__r4k_wait();
54 }
55 
56 /*
57  * This variant is preferable as it allows testing need_resched and going to
58  * sleep depending on the outcome atomically.  Unfortunately the "It is
59  * implementation-dependent whether the pipeline restarts when a non-enabled
60  * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
61  * using this version a gamble.
62  */
63 void __cpuidle r4k_wait_irqoff(void)
64 {
65 	if (!need_resched())
66 		__asm__(
67 		"	.set	push		\n"
68 		"	.set	arch=r4000	\n"
69 		"	wait			\n"
70 		"	.set	pop		\n");
71 	local_irq_enable();
72 }
73 
74 /*
75  * The RM7000 variant has to handle erratum 38.	 The workaround is to not
76  * have any pending stores when the WAIT instruction is executed.
77  */
78 static void __cpuidle rm7k_wait_irqoff(void)
79 {
80 	if (!need_resched())
81 		__asm__(
82 		"	.set	push					\n"
83 		"	.set	arch=r4000				\n"
84 		"	.set	noat					\n"
85 		"	mfc0	$1, $12					\n"
86 		"	sync						\n"
87 		"	mtc0	$1, $12		# stalls until W stage	\n"
88 		"	wait						\n"
89 		"	mtc0	$1, $12		# stalls until W stage	\n"
90 		"	.set	pop					\n");
91 	local_irq_enable();
92 }
93 
94 /*
95  * Au1 'wait' is only useful when the 32kHz counter is used as timer,
96  * since coreclock (and the cp0 counter) stops upon executing it. Only an
97  * interrupt can wake it, so they must be enabled before entering idle modes.
98  */
99 static void __cpuidle au1k_wait(void)
100 {
101 	unsigned long c0status = read_c0_status() | 1;	/* irqs on */
102 
103 	__asm__(
104 	"	.set	push			\n"
105 	"	.set	arch=r4000		\n"
106 	"	cache	0x14, 0(%0)		\n"
107 	"	cache	0x14, 32(%0)		\n"
108 	"	sync				\n"
109 	"	mtc0	%1, $12			\n" /* wr c0status */
110 	"	wait				\n"
111 	"	nop				\n"
112 	"	nop				\n"
113 	"	nop				\n"
114 	"	nop				\n"
115 	"	.set	pop			\n"
116 	: : "r" (au1k_wait), "r" (c0status));
117 }
118 
119 static int __initdata nowait;
120 
121 static int __init wait_disable(char *s)
122 {
123 	nowait = 1;
124 
125 	return 1;
126 }
127 
128 __setup("nowait", wait_disable);
129 
130 void __init check_wait(void)
131 {
132 	struct cpuinfo_mips *c = &current_cpu_data;
133 
134 	if (nowait) {
135 		printk("Wait instruction disabled.\n");
136 		return;
137 	}
138 
139 	/*
140 	 * MIPSr6 specifies that masked interrupts should unblock an executing
141 	 * wait instruction, and thus that it is safe for us to use
142 	 * r4k_wait_irqoff. Yippee!
143 	 */
144 	if (cpu_has_mips_r6) {
145 		cpu_wait = r4k_wait_irqoff;
146 		return;
147 	}
148 
149 	switch (current_cpu_type()) {
150 	case CPU_R3081:
151 	case CPU_R3081E:
152 		cpu_wait = r3081_wait;
153 		break;
154 	case CPU_TX3927:
155 		cpu_wait = r39xx_wait;
156 		break;
157 	case CPU_R4200:
158 /*	case CPU_R4300: */
159 	case CPU_R4600:
160 	case CPU_R4640:
161 	case CPU_R4650:
162 	case CPU_R4700:
163 	case CPU_R5000:
164 	case CPU_R5500:
165 	case CPU_NEVADA:
166 	case CPU_4KC:
167 	case CPU_4KEC:
168 	case CPU_4KSC:
169 	case CPU_5KC:
170 	case CPU_5KE:
171 	case CPU_25KF:
172 	case CPU_PR4450:
173 	case CPU_BMIPS3300:
174 	case CPU_BMIPS4350:
175 	case CPU_BMIPS4380:
176 	case CPU_CAVIUM_OCTEON:
177 	case CPU_CAVIUM_OCTEON_PLUS:
178 	case CPU_CAVIUM_OCTEON2:
179 	case CPU_CAVIUM_OCTEON3:
180 	case CPU_JZRISC:
181 	case CPU_LOONGSON1:
182 	case CPU_XLR:
183 	case CPU_XLP:
184 		cpu_wait = r4k_wait;
185 		break;
186 	case CPU_LOONGSON3:
187 		if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
188 			cpu_wait = r4k_wait;
189 		break;
190 
191 	case CPU_BMIPS5000:
192 		cpu_wait = r4k_wait_irqoff;
193 		break;
194 	case CPU_RM7000:
195 		cpu_wait = rm7k_wait_irqoff;
196 		break;
197 
198 	case CPU_PROAPTIV:
199 	case CPU_P5600:
200 		/*
201 		 * Incoming Fast Debug Channel (FDC) data during a wait
202 		 * instruction causes the wait never to resume, even if an
203 		 * interrupt is received. Avoid using wait at all if FDC data is
204 		 * likely to be received.
205 		 */
206 		if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
207 			break;
208 		/* fall through */
209 	case CPU_M14KC:
210 	case CPU_M14KEC:
211 	case CPU_24K:
212 	case CPU_34K:
213 	case CPU_1004K:
214 	case CPU_1074K:
215 	case CPU_INTERAPTIV:
216 	case CPU_M5150:
217 	case CPU_QEMU_GENERIC:
218 		cpu_wait = r4k_wait;
219 		if (read_c0_config7() & MIPS_CONF7_WII)
220 			cpu_wait = r4k_wait_irqoff;
221 		break;
222 
223 	case CPU_74K:
224 		cpu_wait = r4k_wait;
225 		if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
226 			cpu_wait = r4k_wait_irqoff;
227 		break;
228 
229 	case CPU_TX49XX:
230 		cpu_wait = r4k_wait_irqoff;
231 		break;
232 	case CPU_ALCHEMY:
233 		cpu_wait = au1k_wait;
234 		break;
235 	case CPU_20KC:
236 		/*
237 		 * WAIT on Rev1.0 has E1, E2, E3 and E16.
238 		 * WAIT on Rev2.0 and Rev3.0 has E16.
239 		 * Rev3.1 WAIT is nop, why bother
240 		 */
241 		if ((c->processor_id & 0xff) <= 0x64)
242 			break;
243 
244 		/*
245 		 * Another rev is incremeting c0_count at a reduced clock
246 		 * rate while in WAIT mode.  So we basically have the choice
247 		 * between using the cp0 timer as clocksource or avoiding
248 		 * the WAIT instruction.  Until more details are known,
249 		 * disable the use of WAIT for 20Kc entirely.
250 		   cpu_wait = r4k_wait;
251 		 */
252 		break;
253 	default:
254 		break;
255 	}
256 }
257 
258 void arch_cpu_idle(void)
259 {
260 	if (cpu_wait)
261 		cpu_wait();
262 	else
263 		local_irq_enable();
264 }
265 
266 #ifdef CONFIG_CPU_IDLE
267 
268 int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
269 			    struct cpuidle_driver *drv, int index)
270 {
271 	arch_cpu_idle();
272 	return index;
273 }
274 
275 #endif
276