xref: /freebsd/sys/x86/cpufreq/hwpstate_intel.c (revision e92ffd9b626833ebdbf2742c8ffddc6cd94b963e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Intel Corporation
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted providing that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/sbuf.h>
34 #include <sys/module.h>
35 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/smp.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 
45 #include <machine/cpu.h>
46 #include <machine/md_var.h>
47 #include <machine/cputypes.h>
48 #include <machine/specialreg.h>
49 
50 #include <contrib/dev/acpica/include/acpi.h>
51 
52 #include <dev/acpica/acpivar.h>
53 
54 #include <x86/cpufreq/hwpstate_intel_internal.h>
55 
56 #include "acpi_if.h"
57 #include "cpufreq_if.h"
58 
59 extern uint64_t	tsc_freq;
60 
61 static int	intel_hwpstate_probe(device_t dev);
62 static int	intel_hwpstate_attach(device_t dev);
63 static int	intel_hwpstate_detach(device_t dev);
64 static int	intel_hwpstate_suspend(device_t dev);
65 static int	intel_hwpstate_resume(device_t dev);
66 
67 static int      intel_hwpstate_get(device_t dev, struct cf_setting *cf);
68 static int      intel_hwpstate_type(device_t dev, int *type);
69 
70 static device_method_t intel_hwpstate_methods[] = {
71 	/* Device interface */
72 	DEVMETHOD(device_identify,	intel_hwpstate_identify),
73 	DEVMETHOD(device_probe,		intel_hwpstate_probe),
74 	DEVMETHOD(device_attach,	intel_hwpstate_attach),
75 	DEVMETHOD(device_detach,	intel_hwpstate_detach),
76 	DEVMETHOD(device_suspend,	intel_hwpstate_suspend),
77 	DEVMETHOD(device_resume,	intel_hwpstate_resume),
78 
79 	/* cpufreq interface */
80 	DEVMETHOD(cpufreq_drv_get,      intel_hwpstate_get),
81 	DEVMETHOD(cpufreq_drv_type,     intel_hwpstate_type),
82 
83 	DEVMETHOD_END
84 };
85 
86 struct hwp_softc {
87 	device_t		dev;
88 	bool 			hwp_notifications;
89 	bool			hwp_activity_window;
90 	bool			hwp_pref_ctrl;
91 	bool			hwp_pkg_ctrl;
92 	bool			hwp_pkg_ctrl_en;
93 	bool			hwp_perf_bias;
94 	bool			hwp_perf_bias_cached;
95 
96 	uint64_t		req; /* Cached copy of HWP_REQUEST */
97 	uint64_t		hwp_energy_perf_bias;	/* Cache PERF_BIAS */
98 
99 	uint8_t			high;
100 	uint8_t			guaranteed;
101 	uint8_t			efficient;
102 	uint8_t			low;
103 };
104 
105 static devclass_t hwpstate_intel_devclass;
106 static driver_t hwpstate_intel_driver = {
107 	"hwpstate_intel",
108 	intel_hwpstate_methods,
109 	sizeof(struct hwp_softc),
110 };
111 
112 DRIVER_MODULE(hwpstate_intel, cpu, hwpstate_intel_driver,
113     hwpstate_intel_devclass, NULL, NULL);
114 MODULE_VERSION(hwpstate_intel, 1);
115 
116 static bool hwpstate_pkg_ctrl_enable = true;
117 SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
118     &hwpstate_pkg_ctrl_enable, 0,
119     "Set 1 (default) to enable package-level control, 0 to disable");
120 
121 static int
122 intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
123 {
124 	device_t dev;
125 	struct pcpu *pc;
126 	struct sbuf *sb;
127 	struct hwp_softc *sc;
128 	uint64_t data, data2;
129 	int ret;
130 
131 	sc = (struct hwp_softc *)arg1;
132 	dev = sc->dev;
133 
134 	pc = cpu_get_pcpu(dev);
135 	if (pc == NULL)
136 		return (ENXIO);
137 
138 	sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
139 	sbuf_putc(sb, '\n');
140 	thread_lock(curthread);
141 	sched_bind(curthread, pc->pc_cpuid);
142 	thread_unlock(curthread);
143 
144 	rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
145 	sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
146 	    ((data & 1) ? "En" : "Dis"));
147 
148 	if (data == 0) {
149 		ret = 0;
150 		goto out;
151 	}
152 
153 	rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
154 	sbuf_printf(sb, "\tHighest Performance: %03ju\n", data & 0xff);
155 	sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n", (data >> 8) & 0xff);
156 	sbuf_printf(sb, "\tEfficient Performance: %03ju\n", (data >> 16) & 0xff);
157 	sbuf_printf(sb, "\tLowest Performance: %03ju\n", (data >> 24) & 0xff);
158 
159 	rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
160 	data2 = 0;
161 	if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL))
162 		rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
163 
164 	sbuf_putc(sb, '\n');
165 
166 #define pkg_print(x, name, offset) do {					\
167 	if (!sc->hwp_pkg_ctrl || (data & x) != 0) 			\
168 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
169 		    (unsigned)(data >> offset) & 0xff);			\
170 	else								\
171 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
172 		    (unsigned)(data2 >> offset) & 0xff);		\
173 } while (0)
174 
175 	pkg_print(IA32_HWP_REQUEST_EPP_VALID,
176 	    "Requested Efficiency Performance Preference", 24);
177 	pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
178 	    "Requested Desired Performance", 16);
179 	pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
180 	    "Requested Maximum Performance", 8);
181 	pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
182 	    "Requested Minimum Performance", 0);
183 #undef pkg_print
184 
185 	sbuf_putc(sb, '\n');
186 
187 out:
188 	thread_lock(curthread);
189 	sched_unbind(curthread);
190 	thread_unlock(curthread);
191 
192 	ret = sbuf_finish(sb);
193 	if (ret == 0)
194 		ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
195 	sbuf_delete(sb);
196 
197 	return (ret);
198 }
199 
200 static inline int
201 percent_to_raw(int x)
202 {
203 
204 	MPASS(x <= 100 && x >= 0);
205 	return (0xff * x / 100);
206 }
207 
208 /*
209  * Given x * 10 in [0, 1000], round to the integer nearest x.
210  *
211  * This allows round-tripping nice human readable numbers through this
212  * interface.  Otherwise, user-provided percentages such as 25, 50, 75 get
213  * rounded down to 24, 49, and 74, which is a bit ugly.
214  */
215 static inline int
216 round10(int xtimes10)
217 {
218 	return ((xtimes10 + 5) / 10);
219 }
220 
221 static inline int
222 raw_to_percent(int x)
223 {
224 	MPASS(x <= 0xff && x >= 0);
225 	return (round10(x * 1000 / 0xff));
226 }
227 
228 /* Range of MSR_IA32_ENERGY_PERF_BIAS is more limited: 0-0xf. */
229 static inline int
230 percent_to_raw_perf_bias(int x)
231 {
232 	/*
233 	 * Round up so that raw values present as nice round human numbers and
234 	 * also round-trip to the same raw value.
235 	 */
236 	MPASS(x <= 100 && x >= 0);
237 	return (((0xf * x) + 50) / 100);
238 }
239 
240 static inline int
241 raw_to_percent_perf_bias(int x)
242 {
243 	/* Rounding to nice human numbers despite a step interval of 6.67%. */
244 	MPASS(x <= 0xf && x >= 0);
245 	return (((x * 20) / 0xf) * 5);
246 }
247 
248 static int
249 sysctl_epp_select(SYSCTL_HANDLER_ARGS)
250 {
251 	struct hwp_softc *sc;
252 	device_t dev;
253 	struct pcpu *pc;
254 	uint64_t epb;
255 	uint32_t val;
256 	int ret;
257 
258 	dev = oidp->oid_arg1;
259 	sc = device_get_softc(dev);
260 	if (!sc->hwp_pref_ctrl && !sc->hwp_perf_bias)
261 		return (ENODEV);
262 
263 	pc = cpu_get_pcpu(dev);
264 	if (pc == NULL)
265 		return (ENXIO);
266 
267 	thread_lock(curthread);
268 	sched_bind(curthread, pc->pc_cpuid);
269 	thread_unlock(curthread);
270 
271 	if (sc->hwp_pref_ctrl) {
272 		val = (sc->req & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
273 		val = raw_to_percent(val);
274 	} else {
275 		/*
276 		 * If cpuid indicates EPP is not supported, the HWP controller
277 		 * uses MSR_IA32_ENERGY_PERF_BIAS instead (Intel SDM §14.4.4).
278 		 * This register is per-core (but not HT).
279 		 */
280 		if (!sc->hwp_perf_bias_cached) {
281 			ret = rdmsr_safe(MSR_IA32_ENERGY_PERF_BIAS, &epb);
282 			if (ret)
283 				goto out;
284 			sc->hwp_energy_perf_bias = epb;
285 			sc->hwp_perf_bias_cached = true;
286 		}
287 		val = sc->hwp_energy_perf_bias &
288 		    IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK;
289 		val = raw_to_percent_perf_bias(val);
290 	}
291 
292 	MPASS(val >= 0 && val <= 100);
293 
294 	ret = sysctl_handle_int(oidp, &val, 0, req);
295 	if (ret || req->newptr == NULL)
296 		goto out;
297 
298 	if (val > 100) {
299 		ret = EINVAL;
300 		goto out;
301 	}
302 
303 	if (sc->hwp_pref_ctrl) {
304 		val = percent_to_raw(val);
305 
306 		sc->req =
307 		    ((sc->req & ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE)
308 		    | (val << 24u));
309 
310 		if (sc->hwp_pkg_ctrl_en)
311 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
312 		else
313 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
314 	} else {
315 		val = percent_to_raw_perf_bias(val);
316 		MPASS((val & ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) == 0);
317 
318 		sc->hwp_energy_perf_bias =
319 		    ((sc->hwp_energy_perf_bias &
320 		    ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) | val);
321 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
322 		    sc->hwp_energy_perf_bias);
323 	}
324 
325 out:
326 	thread_lock(curthread);
327 	sched_unbind(curthread);
328 	thread_unlock(curthread);
329 
330 	return (ret);
331 }
332 
333 void
334 intel_hwpstate_identify(driver_t *driver, device_t parent)
335 {
336 	if (device_find_child(parent, "hwpstate_intel", -1) != NULL)
337 		return;
338 
339 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
340 		return;
341 
342 	if (resource_disabled("hwpstate_intel", 0))
343 		return;
344 
345 	/*
346 	 * Intel SDM 14.4.1 (HWP Programming Interfaces):
347 	 *   Availability of HWP baseline resource and capability,
348 	 *   CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
349 	 *   architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
350 	 *   IA32_HWP_REQUEST, IA32_HWP_STATUS.
351 	 */
352 	if ((cpu_power_eax & CPUTPM1_HWP) == 0)
353 		return;
354 
355 	if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", device_get_unit(parent))
356 	    == NULL)
357 		device_printf(parent, "hwpstate_intel: add child failed\n");
358 }
359 
360 static int
361 intel_hwpstate_probe(device_t dev)
362 {
363 
364 	device_set_desc(dev, "Intel Speed Shift");
365 	return (BUS_PROBE_NOWILDCARD);
366 }
367 
368 static int
369 set_autonomous_hwp(struct hwp_softc *sc)
370 {
371 	struct pcpu *pc;
372 	device_t dev;
373 	uint64_t caps;
374 	int ret;
375 
376 	dev = sc->dev;
377 
378 	pc = cpu_get_pcpu(dev);
379 	if (pc == NULL)
380 		return (ENXIO);
381 
382 	thread_lock(curthread);
383 	sched_bind(curthread, pc->pc_cpuid);
384 	thread_unlock(curthread);
385 
386 	/* XXX: Many MSRs aren't readable until feature is enabled */
387 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
388 	if (ret) {
389 		/*
390 		 * This is actually a package-level MSR, and only the first
391 		 * write is not ignored.  So it is harmless to enable it across
392 		 * all devices, and this allows us not to care especially in
393 		 * which order cores (and packages) are probed.  This error
394 		 * condition should not happen given we gate on the HWP CPUID
395 		 * feature flag, if the Intel SDM is correct.
396 		 */
397 		device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
398 		    pc->pc_cpuid, ret);
399 		goto out;
400 	}
401 
402 	ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
403 	if (ret) {
404 		device_printf(dev,
405 		    "Failed to read HWP request MSR for cpu%d (%d)\n",
406 		    pc->pc_cpuid, ret);
407 		goto out;
408 	}
409 
410 	ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
411 	if (ret) {
412 		device_printf(dev,
413 		    "Failed to read HWP capabilities MSR for cpu%d (%d)\n",
414 		    pc->pc_cpuid, ret);
415 		goto out;
416 	}
417 
418 	/*
419 	 * High and low are static; "guaranteed" is dynamic; and efficient is
420 	 * also dynamic.
421 	 */
422 	sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
423 	sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
424 	sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
425 	sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
426 
427 	/* hardware autonomous selection determines the performance target */
428 	sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
429 
430 	/* enable HW dynamic selection of window size */
431 	sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
432 
433 	/* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
434 	sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
435 	sc->req |= sc->low;
436 
437 	/* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
438 	sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
439 	sc->req |= sc->high << 8;
440 
441 	/* If supported, request package-level control for this CPU. */
442 	if (sc->hwp_pkg_ctrl_en)
443 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
444 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
445 	else
446 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
447 	if (ret) {
448 		device_printf(dev,
449 		    "Failed to setup%s autonomous HWP for cpu%d\n",
450 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
451 		goto out;
452 	}
453 
454 	/* If supported, write the PKG-wide control MSR. */
455 	if (sc->hwp_pkg_ctrl_en) {
456 		/*
457 		 * "The structure of the IA32_HWP_REQUEST_PKG MSR
458 		 * (package-level) is identical to the IA32_HWP_REQUEST MSR
459 		 * with the exception of the Package Control field, which does
460 		 * not exist." (Intel SDM §14.4.4)
461 		 */
462 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
463 		if (ret) {
464 			device_printf(dev,
465 			    "Failed to set autonomous HWP for package\n");
466 		}
467 	}
468 
469 out:
470 	thread_lock(curthread);
471 	sched_unbind(curthread);
472 	thread_unlock(curthread);
473 
474 	return (ret);
475 }
476 
477 static int
478 intel_hwpstate_attach(device_t dev)
479 {
480 	struct hwp_softc *sc;
481 	int ret;
482 
483 	sc = device_get_softc(dev);
484 	sc->dev = dev;
485 
486 	/* eax */
487 	if (cpu_power_eax & CPUTPM1_HWP_NOTIFICATION)
488 		sc->hwp_notifications = true;
489 	if (cpu_power_eax & CPUTPM1_HWP_ACTIVITY_WINDOW)
490 		sc->hwp_activity_window = true;
491 	if (cpu_power_eax & CPUTPM1_HWP_PERF_PREF)
492 		sc->hwp_pref_ctrl = true;
493 	if (cpu_power_eax & CPUTPM1_HWP_PKG)
494 		sc->hwp_pkg_ctrl = true;
495 
496 	/* Allow administrators to disable pkg-level control. */
497 	sc->hwp_pkg_ctrl_en = (sc->hwp_pkg_ctrl && hwpstate_pkg_ctrl_enable);
498 
499 	/* ecx */
500 	if (cpu_power_ecx & CPUID_PERF_BIAS)
501 		sc->hwp_perf_bias = true;
502 
503 	ret = set_autonomous_hwp(sc);
504 	if (ret)
505 		return (ret);
506 
507 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
508 	    SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
509 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
510 	    sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
511 
512 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
513 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
514 	    "epp", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, dev, 0,
515 	    sysctl_epp_select, "I",
516 	    "Efficiency/Performance Preference "
517 	    "(range from 0, most performant, through 100, most efficient)");
518 
519 	return (cpufreq_register(dev));
520 }
521 
522 static int
523 intel_hwpstate_detach(device_t dev)
524 {
525 
526 	return (cpufreq_unregister(dev));
527 }
528 
529 static int
530 intel_hwpstate_get(device_t dev, struct cf_setting *set)
531 {
532 	struct pcpu *pc;
533 	uint64_t rate;
534 	int ret;
535 
536 	if (set == NULL)
537 		return (EINVAL);
538 
539 	pc = cpu_get_pcpu(dev);
540 	if (pc == NULL)
541 		return (ENXIO);
542 
543 	memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
544 	set->dev = dev;
545 
546 	ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
547 	if (ret == 0)
548 		set->freq = rate / 1000000;
549 
550 	set->volts = CPUFREQ_VAL_UNKNOWN;
551 	set->power = CPUFREQ_VAL_UNKNOWN;
552 	set->lat = CPUFREQ_VAL_UNKNOWN;
553 
554 	return (0);
555 }
556 
557 static int
558 intel_hwpstate_type(device_t dev, int *type)
559 {
560 	if (type == NULL)
561 		return (EINVAL);
562 	*type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
563 
564 	return (0);
565 }
566 
567 static int
568 intel_hwpstate_suspend(device_t dev)
569 {
570 	return (0);
571 }
572 
573 /*
574  * Redo a subset of set_autonomous_hwp on resume; untested.  Without this,
575  * testers observed that on resume MSR_IA32_HWP_REQUEST was bogus.
576  */
577 static int
578 intel_hwpstate_resume(device_t dev)
579 {
580 	struct hwp_softc *sc;
581 	struct pcpu *pc;
582 	int ret;
583 
584 	sc = device_get_softc(dev);
585 
586 	pc = cpu_get_pcpu(dev);
587 	if (pc == NULL)
588 		return (ENXIO);
589 
590 	thread_lock(curthread);
591 	sched_bind(curthread, pc->pc_cpuid);
592 	thread_unlock(curthread);
593 
594 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
595 	if (ret) {
596 		device_printf(dev,
597 		    "Failed to enable HWP for cpu%d after suspend (%d)\n",
598 		    pc->pc_cpuid, ret);
599 		goto out;
600 	}
601 
602 	if (sc->hwp_pkg_ctrl_en)
603 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
604 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
605 	else
606 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
607 	if (ret) {
608 		device_printf(dev,
609 		    "Failed to set%s autonomous HWP for cpu%d after suspend\n",
610 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
611 		goto out;
612 	}
613 	if (sc->hwp_pkg_ctrl_en) {
614 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
615 		if (ret) {
616 			device_printf(dev,
617 			    "Failed to set autonomous HWP for package after "
618 			    "suspend\n");
619 			goto out;
620 		}
621 	}
622 	if (!sc->hwp_pref_ctrl && sc->hwp_perf_bias_cached) {
623 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
624 		    sc->hwp_energy_perf_bias);
625 		if (ret) {
626 			device_printf(dev,
627 			    "Failed to set energy perf bias for cpu%d after "
628 			    "suspend\n", pc->pc_cpuid);
629 		}
630 	}
631 
632 out:
633 	thread_lock(curthread);
634 	sched_unbind(curthread);
635 	thread_unlock(curthread);
636 
637 	return (ret);
638 }
639