xref: /titanic_44/usr/src/uts/i86pc/os/cpupm/speedstep.c (revision ad09f8b827db90c9a0093f0b6382803fa64a5fd1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright (c) 2009,  Intel Corporation.
27  * All Rights Reserved.
28  */
29 
30 #include <sys/x86_archext.h>
31 #include <sys/machsystm.h>
32 #include <sys/archsystm.h>
33 #include <sys/x_call.h>
34 #include <sys/acpi/acpi.h>
35 #include <sys/acpica.h>
36 #include <sys/speedstep.h>
37 #include <sys/cpu_acpi.h>
38 #include <sys/cpupm.h>
39 #include <sys/dtrace.h>
40 #include <sys/sdt.h>
41 
42 /*
43  * turbo related structure definitions
44  */
45 typedef struct cpupm_turbo_info {
46 	kstat_t		*turbo_ksp;		/* turbo kstat */
47 	int		in_turbo;		/* in turbo? */
48 	int		turbo_supported;	/* turbo flag */
49 	uint64_t	t_mcnt;			/* turbo mcnt */
50 	uint64_t	t_acnt;			/* turbo acnt */
51 } cpupm_turbo_info_t;
52 
53 typedef struct turbo_kstat_s {
54 	struct kstat_named	turbo_supported;	/* turbo flag */
55 	struct kstat_named	t_mcnt;			/* IA32_MPERF_MSR */
56 	struct kstat_named	t_acnt;			/* IA32_APERF_MSR */
57 } turbo_kstat_t;
58 
59 static int speedstep_init(cpu_t *);
60 static void speedstep_fini(cpu_t *);
61 static void speedstep_power(cpuset_t, uint32_t);
62 static void speedstep_stop(cpu_t *);
63 static boolean_t turbo_supported(void);
64 static int turbo_kstat_update(kstat_t *, int);
65 static void get_turbo_info(cpupm_turbo_info_t *);
66 static void reset_turbo_info(void);
67 static void record_turbo_info(cpupm_turbo_info_t *, uint32_t, uint32_t);
68 static void update_turbo_info(cpupm_turbo_info_t *);
69 
70 /*
71  * Interfaces for modules implementing Intel's Enhanced SpeedStep.
72  */
73 cpupm_state_ops_t speedstep_ops = {
74 	"Enhanced SpeedStep Technology",
75 	speedstep_init,
76 	speedstep_fini,
77 	speedstep_power,
78 	speedstep_stop
79 };
80 
81 /*
82  * Error returns
83  */
84 #define	ESS_RET_SUCCESS		0x00
85 #define	ESS_RET_NO_PM		0x01
86 #define	ESS_RET_UNSUP_STATE	0x02
87 
88 /*
89  * MSR registers for changing and reading processor power state.
90  */
91 #define	IA32_PERF_STAT_MSR		0x198
92 #define	IA32_PERF_CTL_MSR		0x199
93 
94 #define	IA32_CPUID_TSC_CONSTANT		0xF30
95 #define	IA32_MISC_ENABLE_MSR		0x1A0
96 #define	IA32_MISC_ENABLE_EST		(1<<16)
97 #define	IA32_MISC_ENABLE_CXE		(1<<25)
98 
99 #define	CPUID_TURBO_SUPPORT		(1 << 1)
100 #define	CPU_ACPI_P0			0
101 #define	CPU_IN_TURBO			1
102 
103 /*
104  * MSR for hardware coordination feedback mechanism
105  *   - IA32_MPERF: increments in proportion to a fixed frequency
106  *   - IA32_APERF: increments in proportion to actual performance
107  */
108 #define	IA32_MPERF_MSR			0xE7
109 #define	IA32_APERF_MSR			0xE8
110 
111 /*
112  * Debugging support
113  */
114 #ifdef	DEBUG
115 volatile int ess_debug = 0;
116 #define	ESSDEBUG(arglist) if (ess_debug) printf arglist;
117 #else
118 #define	ESSDEBUG(arglist)
119 #endif
120 
121 static kmutex_t turbo_mutex;
122 
123 turbo_kstat_t turbo_kstat = {
124 	{ "turbo_supported",	KSTAT_DATA_UINT32 },
125 	{ "turbo_mcnt",		KSTAT_DATA_UINT64 },
126 	{ "turbo_acnt",		KSTAT_DATA_UINT64 },
127 };
128 
129 /*
130  * kstat update function of the turbo mode info
131  */
132 static int
133 turbo_kstat_update(kstat_t *ksp, int flag)
134 {
135 	cpupm_turbo_info_t *turbo_info = ksp->ks_private;
136 
137 	if (flag == KSTAT_WRITE) {
138 		return (EACCES);
139 	}
140 
141 	/*
142 	 * update the count in case CPU is in the turbo
143 	 * mode for a long time
144 	 */
145 	if (turbo_info->in_turbo == CPU_IN_TURBO)
146 		update_turbo_info(turbo_info);
147 
148 	turbo_kstat.turbo_supported.value.ui32 =
149 	    turbo_info->turbo_supported;
150 	turbo_kstat.t_mcnt.value.ui64 = turbo_info->t_mcnt;
151 	turbo_kstat.t_acnt.value.ui64 = turbo_info->t_acnt;
152 
153 	return (0);
154 }
155 
156 /*
157  * Get count of MPERF/APERF MSR
158  */
159 static void
160 get_turbo_info(cpupm_turbo_info_t *turbo_info)
161 {
162 	ulong_t		iflag;
163 	uint64_t	mcnt, acnt;
164 
165 	iflag = intr_clear();
166 	mcnt = rdmsr(IA32_MPERF_MSR);
167 	acnt = rdmsr(IA32_APERF_MSR);
168 	turbo_info->t_mcnt += mcnt;
169 	turbo_info->t_acnt += acnt;
170 	intr_restore(iflag);
171 }
172 
173 /*
174  * Clear MPERF/APERF MSR
175  */
176 static void
177 reset_turbo_info(void)
178 {
179 	ulong_t		iflag;
180 
181 	iflag = intr_clear();
182 	wrmsr(IA32_MPERF_MSR, 0);
183 	wrmsr(IA32_APERF_MSR, 0);
184 	intr_restore(iflag);
185 }
186 
187 /*
188  * sum up the count of one CPU_ACPI_P0 transition
189  */
190 static void
191 record_turbo_info(cpupm_turbo_info_t *turbo_info,
192     uint32_t cur_state, uint32_t req_state)
193 {
194 	if (!turbo_info->turbo_supported)
195 		return;
196 	/*
197 	 * enter P0 state
198 	 */
199 	if (req_state == CPU_ACPI_P0) {
200 		reset_turbo_info();
201 		turbo_info->in_turbo = CPU_IN_TURBO;
202 	}
203 	/*
204 	 * Leave P0 state
205 	 */
206 	else if (cur_state == CPU_ACPI_P0) {
207 		turbo_info->in_turbo = 0;
208 		get_turbo_info(turbo_info);
209 	}
210 }
211 
212 /*
213  * update the sum of counts and clear MSRs
214  */
215 static void
216 update_turbo_info(cpupm_turbo_info_t *turbo_info)
217 {
218 	ulong_t		iflag;
219 	uint64_t	mcnt, acnt;
220 
221 	iflag = intr_clear();
222 	mcnt = rdmsr(IA32_MPERF_MSR);
223 	acnt = rdmsr(IA32_APERF_MSR);
224 	wrmsr(IA32_MPERF_MSR, 0);
225 	wrmsr(IA32_APERF_MSR, 0);
226 	turbo_info->t_mcnt += mcnt;
227 	turbo_info->t_acnt += acnt;
228 	intr_restore(iflag);
229 }
230 
231 /*
232  * Write the ctrl register. How it is written, depends upon the _PCT
233  * APCI object value.
234  */
235 static void
236 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
237 {
238 	cpu_acpi_pct_t *pct_ctrl;
239 	uint64_t reg;
240 
241 	pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
242 
243 	switch (pct_ctrl->cr_addrspace_id) {
244 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
245 		/*
246 		 * Read current power state because reserved bits must be
247 		 * preserved, compose new value, and write it.
248 		 */
249 		reg = rdmsr(IA32_PERF_CTL_MSR);
250 		reg &= ~((uint64_t)0xFFFF);
251 		reg |= ctrl;
252 		wrmsr(IA32_PERF_CTL_MSR, reg);
253 		break;
254 
255 	case ACPI_ADR_SPACE_SYSTEM_IO:
256 		(void) cpu_acpi_write_port(pct_ctrl->cr_address, ctrl,
257 		    pct_ctrl->cr_width);
258 		break;
259 
260 	default:
261 		DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t,
262 		    pct_ctrl->cr_addrspace_id);
263 		return;
264 	}
265 
266 	DTRACE_PROBE1(ess_ctrl_write, uint32_t, ctrl);
267 }
268 
269 /*
270  * Transition the current processor to the requested state.
271  */
272 void
273 speedstep_pstate_transition(uint32_t req_state)
274 {
275 	cpupm_mach_state_t *mach_state =
276 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
277 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
278 	cpu_acpi_pstate_t *req_pstate;
279 	uint32_t ctrl;
280 	cpupm_turbo_info_t *turbo_info =
281 	    (cpupm_turbo_info_t *)(mach_state->ms_vendor);
282 
283 	req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
284 	req_pstate += req_state;
285 
286 	DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate));
287 
288 	/*
289 	 * Initiate the processor p-state change.
290 	 */
291 	ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
292 	write_ctrl(handle, ctrl);
293 
294 	if (turbo_info)
295 		record_turbo_info(turbo_info,
296 		    mach_state->ms_pstate.cma_state.pstate, req_state);
297 
298 
299 	mach_state->ms_pstate.cma_state.pstate = req_state;
300 	cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
301 }
302 
303 static void
304 speedstep_power(cpuset_t set, uint32_t req_state)
305 {
306 	/*
307 	 * If thread is already running on target CPU then just
308 	 * make the transition request. Otherwise, we'll need to
309 	 * make a cross-call.
310 	 */
311 	kpreempt_disable();
312 	if (CPU_IN_SET(set, CPU->cpu_id)) {
313 		speedstep_pstate_transition(req_state);
314 		CPUSET_DEL(set, CPU->cpu_id);
315 	}
316 	if (!CPUSET_ISNULL(set)) {
317 		xc_call((xc_arg_t)req_state, NULL, NULL, CPUSET2BV(set),
318 		    (xc_func_t)speedstep_pstate_transition);
319 	}
320 	kpreempt_enable();
321 }
322 
323 /*
324  * Validate that this processor supports Speedstep and if so,
325  * get the P-state data from ACPI and cache it.
326  */
327 static int
328 speedstep_init(cpu_t *cp)
329 {
330 	cpupm_mach_state_t *mach_state =
331 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
332 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
333 	cpu_acpi_pct_t *pct_stat;
334 	cpupm_turbo_info_t *turbo_info;
335 
336 	ESSDEBUG(("speedstep_init: processor %d\n", cp->cpu_id));
337 
338 	/*
339 	 * Cache the P-state specific ACPI data.
340 	 */
341 	if (cpu_acpi_cache_pstate_data(handle) != 0) {
342 		cmn_err(CE_NOTE, "!SpeedStep support is being "
343 		    "disabled due to errors parsing ACPI P-state objects "
344 		    "exported by BIOS.");
345 		speedstep_fini(cp);
346 		return (ESS_RET_NO_PM);
347 	}
348 
349 	pct_stat = CPU_ACPI_PCT_STATUS(handle);
350 	switch (pct_stat->cr_addrspace_id) {
351 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
352 		ESSDEBUG(("Transitions will use fixed hardware\n"));
353 		break;
354 	case ACPI_ADR_SPACE_SYSTEM_IO:
355 		ESSDEBUG(("Transitions will use system IO\n"));
356 		break;
357 	default:
358 		cmn_err(CE_WARN, "!_PCT conifgured for unsupported "
359 		    "addrspace = %d.", pct_stat->cr_addrspace_id);
360 		cmn_err(CE_NOTE, "!CPU power management will not function.");
361 		speedstep_fini(cp);
362 		return (ESS_RET_NO_PM);
363 	}
364 
365 	cpupm_alloc_domains(cp, CPUPM_P_STATES);
366 
367 	if (!turbo_supported()) {
368 		mach_state->ms_vendor = NULL;
369 		goto ess_ret_success;
370 	}
371 	/*
372 	 * turbo mode supported
373 	 */
374 	turbo_info = mach_state->ms_vendor =
375 	    kmem_zalloc(sizeof (cpupm_turbo_info_t), KM_SLEEP);
376 	turbo_info->turbo_supported = 1;
377 	turbo_info->turbo_ksp = kstat_create("turbo", cp->cpu_id,
378 	    "turbo", "misc", KSTAT_TYPE_NAMED,
379 	    sizeof (turbo_kstat) / sizeof (kstat_named_t),
380 	    KSTAT_FLAG_VIRTUAL);
381 
382 	if (turbo_info->turbo_ksp == NULL) {
383 		cmn_err(CE_NOTE, "kstat_create(turbo) fail");
384 	} else {
385 		turbo_info->turbo_ksp->ks_data = &turbo_kstat;
386 		turbo_info->turbo_ksp->ks_lock = &turbo_mutex;
387 		turbo_info->turbo_ksp->ks_update = turbo_kstat_update;
388 		turbo_info->turbo_ksp->ks_data_size += MAXNAMELEN;
389 		turbo_info->turbo_ksp->ks_private = turbo_info;
390 
391 		kstat_install(turbo_info->turbo_ksp);
392 	}
393 
394 ess_ret_success:
395 
396 	ESSDEBUG(("Processor %d succeeded.\n", cp->cpu_id))
397 	return (ESS_RET_SUCCESS);
398 }
399 
400 /*
401  * Free resources allocated by speedstep_init().
402  */
403 static void
404 speedstep_fini(cpu_t *cp)
405 {
406 	cpupm_mach_state_t *mach_state =
407 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
408 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
409 	cpupm_turbo_info_t *turbo_info =
410 	    (cpupm_turbo_info_t *)(mach_state->ms_vendor);
411 
412 	cpupm_free_domains(&cpupm_pstate_domains);
413 	cpu_acpi_free_pstate_data(handle);
414 
415 	if (turbo_info) {
416 		if (turbo_info->turbo_ksp != NULL)
417 			kstat_delete(turbo_info->turbo_ksp);
418 		kmem_free(turbo_info, sizeof (cpupm_turbo_info_t));
419 	}
420 }
421 
422 static void
423 speedstep_stop(cpu_t *cp)
424 {
425 	cpupm_mach_state_t *mach_state =
426 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
427 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
428 	cpupm_turbo_info_t *turbo_info =
429 	    (cpupm_turbo_info_t *)(mach_state->ms_vendor);
430 
431 	cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
432 	cpu_acpi_free_pstate_data(handle);
433 
434 	if (turbo_info) {
435 		if (turbo_info->turbo_ksp != NULL)
436 			kstat_delete(turbo_info->turbo_ksp);
437 		kmem_free(turbo_info, sizeof (cpupm_turbo_info_t));
438 	}
439 }
440 
441 boolean_t
442 speedstep_supported(uint_t family, uint_t model)
443 {
444 	struct cpuid_regs cpu_regs;
445 
446 	/* Required features */
447 	if (!(x86_feature & X86_CPUID) ||
448 	    !(x86_feature & X86_MSR)) {
449 		return (B_FALSE);
450 	}
451 
452 	/*
453 	 * We only support family/model combinations which
454 	 * are P-state TSC invariant.
455 	 */
456 	if (!((family == 0xf && model >= 0x3) ||
457 	    (family == 0x6 && model >= 0xe))) {
458 		return (B_FALSE);
459 	}
460 
461 	/*
462 	 * Enhanced SpeedStep supported?
463 	 */
464 	cpu_regs.cp_eax = 0x1;
465 	(void) __cpuid_insn(&cpu_regs);
466 	if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
467 		return (B_FALSE);
468 	}
469 
470 	return (B_TRUE);
471 }
472 
473 boolean_t
474 turbo_supported(void)
475 {
476 	struct cpuid_regs cpu_regs;
477 
478 	/* Required features */
479 	if (!(x86_feature & X86_CPUID) ||
480 	    !(x86_feature & X86_MSR)) {
481 		return (B_FALSE);
482 	}
483 
484 	/*
485 	 * turbo mode supported?
486 	 */
487 	cpu_regs.cp_eax = 0x6;
488 	(void) __cpuid_insn(&cpu_regs);
489 	if (!(cpu_regs.cp_eax & CPUID_TURBO_SUPPORT)) {
490 		return (B_FALSE);
491 	}
492 
493 	return (B_TRUE);
494 }
495