xref: /freebsd/sys/x86/cpufreq/hwpstate_intel.c (revision acb1f1269c6f4ff89a0d28ba742f6687e9ef779d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Intel Corporation
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted providing that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/sbuf.h>
33 #include <sys/module.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/cpu.h>
40 #include <sys/smp.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 
44 #include <machine/cpu.h>
45 #include <machine/md_var.h>
46 #include <machine/cputypes.h>
47 #include <machine/specialreg.h>
48 
49 #include <contrib/dev/acpica/include/acpi.h>
50 
51 #include <dev/acpica/acpivar.h>
52 
53 #include <x86/cpufreq/hwpstate_intel_internal.h>
54 
55 #include "acpi_if.h"
56 #include "cpufreq_if.h"
57 
58 extern uint64_t	tsc_freq;
59 
60 static int	intel_hwpstate_probe(device_t dev);
61 static int	intel_hwpstate_attach(device_t dev);
62 static int	intel_hwpstate_detach(device_t dev);
63 static int	intel_hwpstate_suspend(device_t dev);
64 static int	intel_hwpstate_resume(device_t dev);
65 
66 static int      intel_hwpstate_get(device_t dev, struct cf_setting *cf);
67 static int      intel_hwpstate_type(device_t dev, int *type);
68 
69 static device_method_t intel_hwpstate_methods[] = {
70 	/* Device interface */
71 	DEVMETHOD(device_identify,	intel_hwpstate_identify),
72 	DEVMETHOD(device_probe,		intel_hwpstate_probe),
73 	DEVMETHOD(device_attach,	intel_hwpstate_attach),
74 	DEVMETHOD(device_detach,	intel_hwpstate_detach),
75 	DEVMETHOD(device_suspend,	intel_hwpstate_suspend),
76 	DEVMETHOD(device_resume,	intel_hwpstate_resume),
77 
78 	/* cpufreq interface */
79 	DEVMETHOD(cpufreq_drv_get,      intel_hwpstate_get),
80 	DEVMETHOD(cpufreq_drv_type,     intel_hwpstate_type),
81 
82 	DEVMETHOD_END
83 };
84 
85 struct hwp_softc {
86 	device_t		dev;
87 	bool 			hwp_notifications;
88 	bool			hwp_activity_window;
89 	bool			hwp_pref_ctrl;
90 	bool			hwp_pkg_ctrl;
91 	bool			hwp_pkg_ctrl_en;
92 	bool			hwp_perf_bias;
93 	bool			hwp_perf_bias_cached;
94 
95 	uint64_t		req; /* Cached copy of HWP_REQUEST */
96 	uint64_t		hwp_energy_perf_bias;	/* Cache PERF_BIAS */
97 
98 	uint8_t			high;
99 	uint8_t			guaranteed;
100 	uint8_t			efficient;
101 	uint8_t			low;
102 };
103 
104 static devclass_t hwpstate_intel_devclass;
105 static driver_t hwpstate_intel_driver = {
106 	"hwpstate_intel",
107 	intel_hwpstate_methods,
108 	sizeof(struct hwp_softc),
109 };
110 
111 DRIVER_MODULE(hwpstate_intel, cpu, hwpstate_intel_driver,
112     hwpstate_intel_devclass, NULL, NULL);
113 MODULE_VERSION(hwpstate_intel, 1);
114 
115 static bool hwpstate_pkg_ctrl_enable = true;
116 SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
117     &hwpstate_pkg_ctrl_enable, 0,
118     "Set 1 (default) to enable package-level control, 0 to disable");
119 
120 static int
121 intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
122 {
123 	device_t dev;
124 	struct pcpu *pc;
125 	struct sbuf *sb;
126 	struct hwp_softc *sc;
127 	uint64_t data, data2;
128 	int ret;
129 
130 	sc = (struct hwp_softc *)arg1;
131 	dev = sc->dev;
132 
133 	pc = cpu_get_pcpu(dev);
134 	if (pc == NULL)
135 		return (ENXIO);
136 
137 	sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
138 	sbuf_putc(sb, '\n');
139 	thread_lock(curthread);
140 	sched_bind(curthread, pc->pc_cpuid);
141 	thread_unlock(curthread);
142 
143 	rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
144 	sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
145 	    ((data & 1) ? "En" : "Dis"));
146 
147 	if (data == 0) {
148 		ret = 0;
149 		goto out;
150 	}
151 
152 	rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
153 	sbuf_printf(sb, "\tHighest Performance: %03ju\n", data & 0xff);
154 	sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n", (data >> 8) & 0xff);
155 	sbuf_printf(sb, "\tEfficient Performance: %03ju\n", (data >> 16) & 0xff);
156 	sbuf_printf(sb, "\tLowest Performance: %03ju\n", (data >> 24) & 0xff);
157 
158 	rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
159 	data2 = 0;
160 	if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL))
161 		rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
162 
163 	sbuf_putc(sb, '\n');
164 
165 #define pkg_print(x, name, offset) do {					\
166 	if (!sc->hwp_pkg_ctrl || (data & x) != 0) 			\
167 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
168 		    (unsigned)(data >> offset) & 0xff);			\
169 	else								\
170 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
171 		    (unsigned)(data2 >> offset) & 0xff);		\
172 } while (0)
173 
174 	pkg_print(IA32_HWP_REQUEST_EPP_VALID,
175 	    "Requested Efficiency Performance Preference", 24);
176 	pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
177 	    "Requested Desired Performance", 16);
178 	pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
179 	    "Requested Maximum Performance", 8);
180 	pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
181 	    "Requested Minimum Performance", 0);
182 #undef pkg_print
183 
184 	sbuf_putc(sb, '\n');
185 
186 out:
187 	thread_lock(curthread);
188 	sched_unbind(curthread);
189 	thread_unlock(curthread);
190 
191 	ret = sbuf_finish(sb);
192 	if (ret == 0)
193 		ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
194 	sbuf_delete(sb);
195 
196 	return (ret);
197 }
198 
199 static inline int
200 percent_to_raw(int x)
201 {
202 
203 	MPASS(x <= 100 && x >= 0);
204 	return (0xff * x / 100);
205 }
206 
207 /*
208  * Given x * 10 in [0, 1000], round to the integer nearest x.
209  *
210  * This allows round-tripping nice human readable numbers through this
211  * interface.  Otherwise, user-provided percentages such as 25, 50, 75 get
212  * rounded down to 24, 49, and 74, which is a bit ugly.
213  */
214 static inline int
215 round10(int xtimes10)
216 {
217 	return ((xtimes10 + 5) / 10);
218 }
219 
220 static inline int
221 raw_to_percent(int x)
222 {
223 	MPASS(x <= 0xff && x >= 0);
224 	return (round10(x * 1000 / 0xff));
225 }
226 
227 /* Range of MSR_IA32_ENERGY_PERF_BIAS is more limited: 0-0xf. */
228 static inline int
229 percent_to_raw_perf_bias(int x)
230 {
231 	/*
232 	 * Round up so that raw values present as nice round human numbers and
233 	 * also round-trip to the same raw value.
234 	 */
235 	MPASS(x <= 100 && x >= 0);
236 	return (((0xf * x) + 50) / 100);
237 }
238 
239 static inline int
240 raw_to_percent_perf_bias(int x)
241 {
242 	/* Rounding to nice human numbers despite a step interval of 6.67%. */
243 	MPASS(x <= 0xf && x >= 0);
244 	return (((x * 20) / 0xf) * 5);
245 }
246 
247 static int
248 sysctl_epp_select(SYSCTL_HANDLER_ARGS)
249 {
250 	struct hwp_softc *sc;
251 	device_t dev;
252 	struct pcpu *pc;
253 	uint64_t epb;
254 	uint32_t val;
255 	int ret;
256 
257 	dev = oidp->oid_arg1;
258 	sc = device_get_softc(dev);
259 	if (!sc->hwp_pref_ctrl && !sc->hwp_perf_bias)
260 		return (ENODEV);
261 
262 	pc = cpu_get_pcpu(dev);
263 	if (pc == NULL)
264 		return (ENXIO);
265 
266 	thread_lock(curthread);
267 	sched_bind(curthread, pc->pc_cpuid);
268 	thread_unlock(curthread);
269 
270 	if (sc->hwp_pref_ctrl) {
271 		val = (sc->req & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
272 		val = raw_to_percent(val);
273 	} else {
274 		/*
275 		 * If cpuid indicates EPP is not supported, the HWP controller
276 		 * uses MSR_IA32_ENERGY_PERF_BIAS instead (Intel SDM §14.4.4).
277 		 * This register is per-core (but not HT).
278 		 */
279 		if (!sc->hwp_perf_bias_cached) {
280 			ret = rdmsr_safe(MSR_IA32_ENERGY_PERF_BIAS, &epb);
281 			if (ret)
282 				goto out;
283 			sc->hwp_energy_perf_bias = epb;
284 			sc->hwp_perf_bias_cached = true;
285 		}
286 		val = sc->hwp_energy_perf_bias &
287 		    IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK;
288 		val = raw_to_percent_perf_bias(val);
289 	}
290 
291 	MPASS(val >= 0 && val <= 100);
292 
293 	ret = sysctl_handle_int(oidp, &val, 0, req);
294 	if (ret || req->newptr == NULL)
295 		goto out;
296 
297 	if (val > 100) {
298 		ret = EINVAL;
299 		goto out;
300 	}
301 
302 	if (sc->hwp_pref_ctrl) {
303 		val = percent_to_raw(val);
304 
305 		sc->req =
306 		    ((sc->req & ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE)
307 		    | (val << 24u));
308 
309 		if (sc->hwp_pkg_ctrl_en)
310 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
311 		else
312 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
313 	} else {
314 		val = percent_to_raw_perf_bias(val);
315 		MPASS((val & ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) == 0);
316 
317 		sc->hwp_energy_perf_bias =
318 		    ((sc->hwp_energy_perf_bias &
319 		    ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) | val);
320 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
321 		    sc->hwp_energy_perf_bias);
322 	}
323 
324 out:
325 	thread_lock(curthread);
326 	sched_unbind(curthread);
327 	thread_unlock(curthread);
328 
329 	return (ret);
330 }
331 
332 void
333 intel_hwpstate_identify(driver_t *driver, device_t parent)
334 {
335 	if (device_find_child(parent, "hwpstate_intel", -1) != NULL)
336 		return;
337 
338 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
339 		return;
340 
341 	if (resource_disabled("hwpstate_intel", 0))
342 		return;
343 
344 	/*
345 	 * Intel SDM 14.4.1 (HWP Programming Interfaces):
346 	 *   Availability of HWP baseline resource and capability,
347 	 *   CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
348 	 *   architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
349 	 *   IA32_HWP_REQUEST, IA32_HWP_STATUS.
350 	 */
351 	if ((cpu_power_eax & CPUTPM1_HWP) == 0)
352 		return;
353 
354 	if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", -1) == NULL)
355 		return;
356 
357 	if (bootverbose)
358 		device_printf(parent, "hwpstate registered\n");
359 }
360 
361 static int
362 intel_hwpstate_probe(device_t dev)
363 {
364 
365 	device_set_desc(dev, "Intel Speed Shift");
366 	return (BUS_PROBE_NOWILDCARD);
367 }
368 
369 static int
370 set_autonomous_hwp(struct hwp_softc *sc)
371 {
372 	struct pcpu *pc;
373 	device_t dev;
374 	uint64_t caps;
375 	int ret;
376 
377 	dev = sc->dev;
378 
379 	pc = cpu_get_pcpu(dev);
380 	if (pc == NULL)
381 		return (ENXIO);
382 
383 	thread_lock(curthread);
384 	sched_bind(curthread, pc->pc_cpuid);
385 	thread_unlock(curthread);
386 
387 	/* XXX: Many MSRs aren't readable until feature is enabled */
388 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
389 	if (ret) {
390 		/*
391 		 * This is actually a package-level MSR, and only the first
392 		 * write is not ignored.  So it is harmless to enable it across
393 		 * all devices, and this allows us not to care especially in
394 		 * which order cores (and packages) are probed.  This error
395 		 * condition should not happen given we gate on the HWP CPUID
396 		 * feature flag, if the Intel SDM is correct.
397 		 */
398 		device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
399 		    pc->pc_cpuid, ret);
400 		goto out;
401 	}
402 
403 	ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
404 	if (ret) {
405 		device_printf(dev,
406 		    "Failed to read HWP request MSR for cpu%d (%d)\n",
407 		    pc->pc_cpuid, ret);
408 		goto out;
409 	}
410 
411 	ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
412 	if (ret) {
413 		device_printf(dev,
414 		    "Failed to read HWP capabilities MSR for cpu%d (%d)\n",
415 		    pc->pc_cpuid, ret);
416 		goto out;
417 	}
418 
419 	/*
420 	 * High and low are static; "guaranteed" is dynamic; and efficient is
421 	 * also dynamic.
422 	 */
423 	sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
424 	sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
425 	sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
426 	sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
427 
428 	/* hardware autonomous selection determines the performance target */
429 	sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
430 
431 	/* enable HW dynamic selection of window size */
432 	sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
433 
434 	/* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
435 	sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
436 	sc->req |= sc->low;
437 
438 	/* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
439 	sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
440 	sc->req |= sc->high << 8;
441 
442 	/* If supported, request package-level control for this CPU. */
443 	if (sc->hwp_pkg_ctrl_en)
444 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
445 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
446 	else
447 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
448 	if (ret) {
449 		device_printf(dev,
450 		    "Failed to setup%s autonomous HWP for cpu%d\n",
451 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
452 		goto out;
453 	}
454 
455 	/* If supported, write the PKG-wide control MSR. */
456 	if (sc->hwp_pkg_ctrl_en) {
457 		/*
458 		 * "The structure of the IA32_HWP_REQUEST_PKG MSR
459 		 * (package-level) is identical to the IA32_HWP_REQUEST MSR
460 		 * with the exception of the Package Control field, which does
461 		 * not exist." (Intel SDM §14.4.4)
462 		 */
463 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
464 		if (ret) {
465 			device_printf(dev,
466 			    "Failed to set autonomous HWP for package\n");
467 		}
468 	}
469 
470 out:
471 	thread_lock(curthread);
472 	sched_unbind(curthread);
473 	thread_unlock(curthread);
474 
475 	return (ret);
476 }
477 
478 static int
479 intel_hwpstate_attach(device_t dev)
480 {
481 	struct hwp_softc *sc;
482 	int ret;
483 
484 	sc = device_get_softc(dev);
485 	sc->dev = dev;
486 
487 	/* eax */
488 	if (cpu_power_eax & CPUTPM1_HWP_NOTIFICATION)
489 		sc->hwp_notifications = true;
490 	if (cpu_power_eax & CPUTPM1_HWP_ACTIVITY_WINDOW)
491 		sc->hwp_activity_window = true;
492 	if (cpu_power_eax & CPUTPM1_HWP_PERF_PREF)
493 		sc->hwp_pref_ctrl = true;
494 	if (cpu_power_eax & CPUTPM1_HWP_PKG)
495 		sc->hwp_pkg_ctrl = true;
496 
497 	/* Allow administrators to disable pkg-level control. */
498 	sc->hwp_pkg_ctrl_en = (sc->hwp_pkg_ctrl && hwpstate_pkg_ctrl_enable);
499 
500 	/* ecx */
501 	if (cpu_power_ecx & CPUID_PERF_BIAS)
502 		sc->hwp_perf_bias = true;
503 
504 	ret = set_autonomous_hwp(sc);
505 	if (ret)
506 		return (ret);
507 
508 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 	    SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
510 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_NEEDGIANT,
511 	    sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
512 
513 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
515 	    "epp", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, dev, 0,
516 	    sysctl_epp_select, "I",
517 	    "Efficiency/Performance Preference "
518 	    "(range from 0, most performant, through 100, most efficient)");
519 
520 	return (cpufreq_register(dev));
521 }
522 
523 static int
524 intel_hwpstate_detach(device_t dev)
525 {
526 
527 	return (cpufreq_unregister(dev));
528 }
529 
530 static int
531 intel_hwpstate_get(device_t dev, struct cf_setting *set)
532 {
533 	struct pcpu *pc;
534 	uint64_t rate;
535 	int ret;
536 
537 	if (set == NULL)
538 		return (EINVAL);
539 
540 	pc = cpu_get_pcpu(dev);
541 	if (pc == NULL)
542 		return (ENXIO);
543 
544 	memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
545 	set->dev = dev;
546 
547 	ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
548 	if (ret == 0)
549 		set->freq = rate / 1000000;
550 
551 	set->volts = CPUFREQ_VAL_UNKNOWN;
552 	set->power = CPUFREQ_VAL_UNKNOWN;
553 	set->lat = CPUFREQ_VAL_UNKNOWN;
554 
555 	return (0);
556 }
557 
558 static int
559 intel_hwpstate_type(device_t dev, int *type)
560 {
561 	if (type == NULL)
562 		return (EINVAL);
563 	*type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
564 
565 	return (0);
566 }
567 
568 static int
569 intel_hwpstate_suspend(device_t dev)
570 {
571 	return (0);
572 }
573 
574 /*
575  * Redo a subset of set_autonomous_hwp on resume; untested.  Without this,
576  * testers observed that on resume MSR_IA32_HWP_REQUEST was bogus.
577  */
578 static int
579 intel_hwpstate_resume(device_t dev)
580 {
581 	struct hwp_softc *sc;
582 	struct pcpu *pc;
583 	int ret;
584 
585 	sc = device_get_softc(dev);
586 
587 	pc = cpu_get_pcpu(dev);
588 	if (pc == NULL)
589 		return (ENXIO);
590 
591 	thread_lock(curthread);
592 	sched_bind(curthread, pc->pc_cpuid);
593 	thread_unlock(curthread);
594 
595 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
596 	if (ret) {
597 		device_printf(dev,
598 		    "Failed to enable HWP for cpu%d after suspend (%d)\n",
599 		    pc->pc_cpuid, ret);
600 		goto out;
601 	}
602 
603 	if (sc->hwp_pkg_ctrl_en)
604 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
605 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
606 	else
607 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
608 	if (ret) {
609 		device_printf(dev,
610 		    "Failed to set%s autonomous HWP for cpu%d after suspend\n",
611 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
612 		goto out;
613 	}
614 	if (sc->hwp_pkg_ctrl_en) {
615 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
616 		if (ret) {
617 			device_printf(dev,
618 			    "Failed to set autonomous HWP for package after "
619 			    "suspend\n");
620 			goto out;
621 		}
622 	}
623 	if (!sc->hwp_pref_ctrl && sc->hwp_perf_bias_cached) {
624 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
625 		    sc->hwp_energy_perf_bias);
626 		if (ret) {
627 			device_printf(dev,
628 			    "Failed to set energy perf bias for cpu%d after "
629 			    "suspend\n", pc->pc_cpuid);
630 		}
631 	}
632 
633 out:
634 	thread_lock(curthread);
635 	sched_unbind(curthread);
636 	thread_unlock(curthread);
637 
638 	return (ret);
639 }
640