xref: /titanic_41/usr/src/uts/sun4/os/mp_states.c (revision 1c42de6d020629af774dd9e9fc81be3f3ed9398e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/systm.h>
30 #include <sys/membar.h>
31 #include <sys/machsystm.h>
32 #include <sys/x_call.h>
33 #include <sys/platform_module.h>
34 #include <sys/cpuvar.h>
35 #include <sys/cpu_module.h>
36 #include <sys/cmp.h>
37 
38 #include <sys/cpu_sgnblk_defs.h>
39 
40 static cpuset_t cpu_idle_set;
41 static kmutex_t cpu_idle_lock;
42 typedef const char *fn_t;
43 
44 /*
45  * flags to determine if the PROM routines
46  * should be used to idle/resume/stop cpus
47  */
48 static int kern_idle[NCPU];		/* kernel's idle loop */
49 static int cpu_are_paused;
50 extern void debug_flush_windows();
51 
52 /*
53  * Initialize the idlestop mutex
54  */
55 void
56 idlestop_init(void)
57 {
58 	mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15));
59 }
60 
61 static void
62 cpu_idle_self(void)
63 {
64 	uint_t s;
65 	label_t save;
66 
67 	s = spl8();
68 	debug_flush_windows();
69 
70 	CPU->cpu_m.in_prom = 1;
71 	membar_stld();
72 
73 	save = curthread->t_pcb;
74 	(void) setjmp(&curthread->t_pcb);
75 
76 	kern_idle[CPU->cpu_id] = 1;
77 	while (kern_idle[CPU->cpu_id])
78 		/* SPIN */;
79 
80 	CPU->cpu_m.in_prom = 0;
81 	membar_stld();
82 
83 	curthread->t_pcb = save;
84 	splx(s);
85 }
86 
87 void
88 idle_other_cpus(void)
89 {
90 	int i, cpuid, ntries;
91 	int failed = 0;
92 
93 	if (ncpus == 1)
94 		return;
95 
96 	mutex_enter(&cpu_idle_lock);
97 
98 	cpuid = CPU->cpu_id;
99 	ASSERT(cpuid < NCPU);
100 
101 	cpu_idle_set = cpu_ready_set;
102 	CPUSET_DEL(cpu_idle_set, cpuid);
103 
104 	if (CPUSET_ISNULL(cpu_idle_set))
105 		return;
106 
107 	xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
108 	    (uint64_t)cpu_idle_self, NULL);
109 
110 	for (i = 0; i < NCPU; i++) {
111 		if (!CPU_IN_SET(cpu_idle_set, i))
112 			continue;
113 
114 		ntries = 0x10000;
115 		while (!cpu[i]->cpu_m.in_prom && ntries) {
116 			DELAY(50);
117 			ntries--;
118 		}
119 
120 		/*
121 		 * A cpu failing to idle is an error condition, since
122 		 * we can't be sure anymore of its state.
123 		 */
124 		if (!cpu[i]->cpu_m.in_prom) {
125 			cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i);
126 			failed++;
127 		}
128 	}
129 
130 	if (failed) {
131 		mutex_exit(&cpu_idle_lock);
132 		cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled");
133 	}
134 }
135 
136 void
137 resume_other_cpus(void)
138 {
139 	int i, ntries;
140 	int cpuid = CPU->cpu_id;
141 	boolean_t failed = B_FALSE;
142 
143 	if (ncpus == 1)
144 		return;
145 
146 	ASSERT(cpuid < NCPU);
147 	ASSERT(MUTEX_HELD(&cpu_idle_lock));
148 
149 	for (i = 0; i < NCPU; i++) {
150 		if (!CPU_IN_SET(cpu_idle_set, i))
151 			continue;
152 
153 		kern_idle[i] = 0;
154 		membar_stld();
155 	}
156 
157 	for (i = 0; i < NCPU; i++) {
158 		if (!CPU_IN_SET(cpu_idle_set, i))
159 			continue;
160 
161 		ntries = 0x10000;
162 		while (cpu[i]->cpu_m.in_prom && ntries) {
163 			DELAY(50);
164 			ntries--;
165 		}
166 
167 		/*
168 		 * A cpu failing to resume is an error condition, since
169 		 * intrs may have been directed there.
170 		 */
171 		if (cpu[i]->cpu_m.in_prom) {
172 			cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i);
173 			continue;
174 		}
175 		CPUSET_DEL(cpu_idle_set, i);
176 	}
177 
178 	failed = !CPUSET_ISNULL(cpu_idle_set);
179 
180 	mutex_exit(&cpu_idle_lock);
181 
182 	/*
183 	 * Non-zero if a cpu failed to resume
184 	 */
185 	if (failed)
186 		cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed");
187 
188 }
189 
190 /*
191  * Stop all other cpu's before halting or rebooting. We pause the cpu's
192  * instead of sending a cross call.
193  */
194 void
195 stop_other_cpus(void)
196 {
197 	mutex_enter(&cpu_lock);
198 	if (cpu_are_paused) {
199 		mutex_exit(&cpu_lock);
200 		return;
201 	}
202 
203 	if (ncpus > 1)
204 		intr_redist_all_cpus_shutdown();
205 
206 	pause_cpus(NULL);
207 	cpu_are_paused = 1;
208 
209 	mutex_exit(&cpu_lock);
210 }
211 
212 int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000;
213 
214 void
215 mp_cpu_quiesce(cpu_t *cp0)
216 {
217 
218 	volatile cpu_t  *cp = (volatile cpu_t *) cp0;
219 	int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
220 	int		cpuid = cp->cpu_id;
221 	int 		found_intr = 1;
222 	static fn_t	f = "mp_cpu_quiesce";
223 
224 	ASSERT(CPU->cpu_id != cpuid);
225 	ASSERT(MUTEX_HELD(&cpu_lock));
226 	ASSERT(cp->cpu_flags & CPU_QUIESCED);
227 
228 
229 	/*
230 	 * Declare CPU as no longer being READY to process interrupts and
231 	 * wait for them to stop. A CPU that is not READY can no longer
232 	 * participate in x-calls or x-traps.
233 	 */
234 	cp->cpu_flags &= ~CPU_READY;
235 	CPUSET_DEL(cpu_ready_set, cpuid);
236 	membar_sync();
237 
238 	for (i = 0; i < sanity_limit; i++) {
239 		if (cp->cpu_intr_actv == 0 &&
240 		    (cp->cpu_thread == cp->cpu_idle_thread ||
241 		    cp->cpu_thread == cp->cpu_startup_thread)) {
242 			found_intr = 0;
243 			break;
244 		}
245 		DELAY(1);
246 	}
247 
248 	if (found_intr) {
249 
250 		if (cp->cpu_intr_actv) {
251 			cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f);
252 		} else if (cp->cpu_thread != cp->cpu_idle_thread &&
253 		    cp->cpu_thread != cp->cpu_startup_thread) {
254 			cmn_err(CE_PANIC, "%s: CPU %d is not quiesced",
255 			    f, cpuid);
256 		}
257 
258 	}
259 }
260 
261 /*
262  * Start CPU on user request.
263  */
264 /* ARGSUSED */
265 int
266 mp_cpu_start(struct cpu *cp)
267 {
268 	ASSERT(MUTEX_HELD(&cpu_lock));
269 	/*
270 	 * Platforms that use CPU signatures require the signature
271 	 * block update to indicate that this CPU is in the OS now.
272 	 */
273 	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
274 
275 	cmp_error_resteer(cp->cpu_id);
276 
277 	return (0);			/* nothing special to do on this arch */
278 }
279 
280 /*
281  * Stop CPU on user request.
282  */
283 /* ARGSUSED */
284 int
285 mp_cpu_stop(struct cpu *cp)
286 {
287 	ASSERT(MUTEX_HELD(&cpu_lock));
288 
289 	cmp_error_resteer(cp->cpu_id);
290 
291 	/*
292 	 * Platforms that use CPU signatures require the signature
293 	 * block update to indicate that this CPU is offlined now.
294 	 */
295 	CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id);
296 	return (0);			/* nothing special to do on this arch */
297 }
298 
299 /*
300  * Power on CPU.
301  */
302 int
303 mp_cpu_poweron(struct cpu *cp)
304 {
305 	ASSERT(MUTEX_HELD(&cpu_lock));
306 	if (&plat_cpu_poweron)
307 		return (plat_cpu_poweron(cp));	/* platform-dependent hook */
308 
309 	return (ENOTSUP);
310 }
311 
312 /*
313  * Power off CPU.
314  */
315 int
316 mp_cpu_poweroff(struct cpu *cp)
317 {
318 	ASSERT(MUTEX_HELD(&cpu_lock));
319 	if (&plat_cpu_poweroff)
320 		return (plat_cpu_poweroff(cp));	/* platform-dependent hook */
321 
322 	return (ENOTSUP);
323 }
324 
325 void
326 mp_cpu_faulted_enter(struct cpu *cp)
327 {
328 	cpu_faulted_enter(cp);
329 }
330 
331 void
332 mp_cpu_faulted_exit(struct cpu *cp)
333 {
334 	cpu_faulted_exit(cp);
335 }
336