xref: /titanic_50/usr/src/uts/sun4v/os/mach_mp_states.c (revision f273041ff6419d6156c10c02bb1a527bfcfdc457)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/cpuvar.h>
30 #include <sys/cpu_module.h>
31 #include <sys/machsystm.h>
32 #include <sys/archsystm.h>
33 #include <sys/prom_plat.h>
34 #include <sys/hypervisor_api.h>
35 #include <sys/hsvc.h>
36 
37 extern uint64_t xc_tick_limit;
38 extern uint64_t xc_tick_jump_limit;
39 
40 extern void cpu_intrq_unregister_powerdown(uint64_t doneflag_va);
41 
42 /*
43  * set_idle_cpu is called from idle() when a CPU becomes idle.
44  */
45 /*ARGSUSED*/
46 void
set_idle_cpu(int cpun)47 set_idle_cpu(int cpun)
48 {
49 }
50 
51 /*
52  * unset_idle_cpu is called from idle() when a CPU is no longer idle.
53  */
54 /*ARGSUSED*/
55 void
unset_idle_cpu(int cpun)56 unset_idle_cpu(int cpun)
57 {
58 }
59 
60 /*
61  * Stop a CPU based on its cpuid, using the cpu_stop hypervisor call.
62  * Since this requires that the hypervisor force a remote CPU to stop,
63  * the assumption is made that this should take roughly the same amount
64  * of time as a executing a cross-call.  Consequently, the xcall
65  * timeout is used to determine when to give up waiting for the CPU to
66  * stop.
67  *
68  * Attempts to stop a CPU already in the stopped or error state will
69  * silently succeed. Zero is returned on success and a non-negative
70  * errno value is returned on failure.
71  */
72 int
stopcpu_bycpuid(int cpuid)73 stopcpu_bycpuid(int cpuid)
74 {
75 	uint64_t	loop_cnt;
76 	uint64_t	state;
77 	uint64_t	rv;
78 	uint64_t	major = 0;
79 	uint64_t	minor = 0;
80 	uint64_t	cpu_stop_time_limit;
81 	extern uint64_t	xc_func_time_limit;
82 
83 	ASSERT(MUTEX_HELD(&cpu_lock));
84 
85 	/*
86 	 * Check the state of the CPU up front to see if an
87 	 * attempt to stop it is even necessary.
88 	 */
89 	if (hv_cpu_state(cpuid, &state) != H_EOK)
90 		return (EINVAL);
91 
92 	/* treat stopped and error state the same */
93 	if (state != CPU_STATE_RUNNING) {
94 		/* nothing to do */
95 		return (0);
96 	}
97 
98 	/*
99 	 * The HV API to stop a CPU is only supported in
100 	 * version 1.1 and later of the core group. If an
101 	 * older version of the HV is in use, return not
102 	 * supported.
103 	 */
104 	if (hsvc_version(HSVC_GROUP_CORE, &major, &minor) != 0)
105 		return (EINVAL);
106 
107 	ASSERT(major != 0);
108 
109 	if ((major == 1) && (minor < 1))
110 		return (ENOTSUP);
111 
112 	/* use the mondo timeout if it has been initialized */
113 	cpu_stop_time_limit = xc_func_time_limit;
114 
115 	/*
116 	 * If called early in boot before the mondo time limit
117 	 * is set, use a reasonable timeout based on the the
118 	 * clock frequency of the current CPU.
119 	 */
120 	if (cpu_stop_time_limit == 0)
121 		cpu_stop_time_limit = cpunodes[CPU->cpu_id].clock_freq;
122 
123 	/* should only fail if called too early in boot */
124 	ASSERT(cpu_stop_time_limit > 0);
125 
126 	loop_cnt = 0;
127 
128 	/*
129 	 * Attempt to stop the CPU, retrying if it is busy.
130 	 */
131 	while (loop_cnt++ < cpu_stop_time_limit) {
132 
133 		if ((rv = hv_cpu_stop(cpuid)) != H_EWOULDBLOCK)
134 			break;
135 	}
136 
137 	if (loop_cnt == cpu_stop_time_limit)
138 		return (ETIMEDOUT);
139 
140 	if (rv != H_EOK)
141 		return (EINVAL);
142 
143 	/*
144 	 * Verify that the CPU has reached the stopped state.
145 	 */
146 	while (loop_cnt++ < cpu_stop_time_limit) {
147 
148 		if (hv_cpu_state(cpuid, &state) != H_EOK)
149 			return (EINVAL);
150 
151 		/* treat stopped and error state the same */
152 		if (state != CPU_STATE_RUNNING)
153 			break;
154 	}
155 
156 	return ((loop_cnt == cpu_stop_time_limit) ? ETIMEDOUT : 0);
157 }
158 
159 /*
160  * X-trap to the target to unregister its interrupt and error queues
161  * and put it in a safe place just before the CPU is stopped. After
162  * unregistering its queues, the target CPU must not return from the
163  * trap to priv or user context. Ensure that the interrupt CPU unregister
164  * succeeded.
165  */
166 void
xt_cpu_unreg_powerdown(struct cpu * cpup)167 xt_cpu_unreg_powerdown(struct cpu *cpup)
168 {
169 	uint8_t volatile not_done;
170 	uint64_t starttick, endtick, tick, lasttick;
171 	processorid_t cpuid = cpup->cpu_id;
172 
173 	kpreempt_disable();
174 
175 	/*
176 	 * Sun4v uses a queue for receiving mondos. Successful
177 	 * transmission of a mondo only indicates that the mondo
178 	 * has been written into the queue.
179 	 *
180 	 * Set the not_done flag to 1 before sending the cross
181 	 * trap and wait until the other cpu resets it to 0.
182 	 */
183 
184 	not_done = 1;
185 
186 	xt_one_unchecked(cpuid, (xcfunc_t *)cpu_intrq_unregister_powerdown,
187 	    (uint64_t)&not_done, 0);
188 
189 	starttick = lasttick = gettick();
190 	endtick = starttick + xc_tick_limit;
191 
192 	while (not_done) {
193 
194 		tick = gettick();
195 
196 		/*
197 		 * If there is a big jump between the current tick
198 		 * count and lasttick, we have probably hit a break
199 		 * point. Adjust endtick accordingly to avoid panic.
200 		 */
201 		if (tick > (lasttick + xc_tick_jump_limit)) {
202 			endtick += (tick - lasttick);
203 		}
204 
205 		lasttick = tick;
206 		if (tick > endtick) {
207 			cmn_err(CE_CONT, "Cross trap timeout at cpu id %x\n",
208 			    cpuid);
209 			cmn_err(CE_WARN, "xt_intrq_unreg_powerdown: timeout");
210 		}
211 	}
212 
213 	kpreempt_enable();
214 }
215 
216 int
plat_cpu_poweroff(struct cpu * cp)217 plat_cpu_poweroff(struct cpu *cp)
218 {
219 	int		rv = 0;
220 	int		status;
221 	processorid_t	cpuid = cp->cpu_id;
222 
223 	ASSERT(MUTEX_HELD(&cpu_lock));
224 
225 	/*
226 	 * Capture all CPUs (except for detaching proc) to prevent
227 	 * crosscalls to the detaching proc until it has cleared its
228 	 * bit in cpu_ready_set.
229 	 *
230 	 * The CPU's remain paused and the prom_mutex is known to be free.
231 	 * This prevents the x-trap victim from blocking when doing prom
232 	 * IEEE-1275 calls at a high PIL level.
233 	 */
234 	promsafe_pause_cpus();
235 
236 	/*
237 	 * Quiesce interrupts on the target CPU. We do this by setting
238 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set)
239 	 * to prevent it from receiving cross calls and cross traps. This
240 	 * prevents the processor from receiving any new soft interrupts.
241 	 */
242 	mp_cpu_quiesce(cp);
243 
244 	/*
245 	 * Send a cross trap to the cpu to unregister its interrupt
246 	 * error queues.
247 	 */
248 	xt_cpu_unreg_powerdown(cp);
249 
250 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
251 
252 	/* call into the Hypervisor to stop the CPU */
253 	if ((status = stopcpu_bycpuid(cpuid)) != 0) {
254 		rv = -1;
255 	}
256 
257 	start_cpus();
258 
259 	if (rv != 0) {
260 		cmn_err(CE_WARN, "failed to stop cpu %d (%d)", cpuid, status);
261 		/* mark the CPU faulted so that it cannot be onlined */
262 		cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_FAULTED;
263 	}
264 
265 	return (rv);
266 }
267 
268 int
plat_cpu_poweron(struct cpu * cp)269 plat_cpu_poweron(struct cpu *cp)
270 {
271 	extern void	restart_other_cpu(int);
272 
273 	ASSERT(MUTEX_HELD(&cpu_lock));
274 
275 	cp->cpu_flags &= ~CPU_POWEROFF;
276 
277 	restart_other_cpu(cp->cpu_id);
278 
279 	return (0);
280 }
281