xref: /freebsd/sys/x86/cpufreq/hwpstate_intel.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018 Intel Corporation
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted providing that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/sbuf.h>
32 #include <sys/module.h>
33 #include <sys/systm.h>
34 #include <sys/errno.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/cpu.h>
39 #include <sys/smp.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 
43 #include <machine/cpu.h>
44 #include <machine/md_var.h>
45 #include <machine/cputypes.h>
46 #include <machine/specialreg.h>
47 
48 #include <contrib/dev/acpica/include/acpi.h>
49 
50 #include <dev/acpica/acpivar.h>
51 
52 #include <x86/cpufreq/hwpstate_intel_internal.h>
53 
54 #include "acpi_if.h"
55 #include "cpufreq_if.h"
56 
57 extern uint64_t	tsc_freq;
58 
59 static int	intel_hwpstate_probe(device_t dev);
60 static int	intel_hwpstate_attach(device_t dev);
61 static int	intel_hwpstate_detach(device_t dev);
62 static int	intel_hwpstate_suspend(device_t dev);
63 static int	intel_hwpstate_resume(device_t dev);
64 
65 static int      intel_hwpstate_get(device_t dev, struct cf_setting *cf);
66 static int      intel_hwpstate_type(device_t dev, int *type);
67 
68 static device_method_t intel_hwpstate_methods[] = {
69 	/* Device interface */
70 	DEVMETHOD(device_identify,	intel_hwpstate_identify),
71 	DEVMETHOD(device_probe,		intel_hwpstate_probe),
72 	DEVMETHOD(device_attach,	intel_hwpstate_attach),
73 	DEVMETHOD(device_detach,	intel_hwpstate_detach),
74 	DEVMETHOD(device_suspend,	intel_hwpstate_suspend),
75 	DEVMETHOD(device_resume,	intel_hwpstate_resume),
76 
77 	/* cpufreq interface */
78 	DEVMETHOD(cpufreq_drv_get,      intel_hwpstate_get),
79 	DEVMETHOD(cpufreq_drv_type,     intel_hwpstate_type),
80 
81 	DEVMETHOD_END
82 };
83 
84 struct hwp_softc {
85 	device_t		dev;
86 	bool 			hwp_notifications;
87 	bool			hwp_activity_window;
88 	bool			hwp_pref_ctrl;
89 	bool			hwp_pkg_ctrl;
90 	bool			hwp_pkg_ctrl_en;
91 	bool			hwp_perf_bias;
92 	bool			hwp_perf_bias_cached;
93 
94 	uint64_t		req; /* Cached copy of HWP_REQUEST */
95 	uint64_t		hwp_energy_perf_bias;	/* Cache PERF_BIAS */
96 
97 	uint8_t			high;
98 	uint8_t			guaranteed;
99 	uint8_t			efficient;
100 	uint8_t			low;
101 };
102 
103 static driver_t hwpstate_intel_driver = {
104 	"hwpstate_intel",
105 	intel_hwpstate_methods,
106 	sizeof(struct hwp_softc),
107 };
108 
109 DRIVER_MODULE(hwpstate_intel, cpu, hwpstate_intel_driver, NULL, NULL);
110 MODULE_VERSION(hwpstate_intel, 1);
111 
112 static bool hwpstate_pkg_ctrl_enable = true;
113 SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
114     &hwpstate_pkg_ctrl_enable, 0,
115     "Set 1 (default) to enable package-level control, 0 to disable");
116 
117 static int
118 intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
119 {
120 	device_t dev;
121 	struct pcpu *pc;
122 	struct sbuf *sb;
123 	struct hwp_softc *sc;
124 	uint64_t data, data2;
125 	int ret;
126 
127 	sc = (struct hwp_softc *)arg1;
128 	dev = sc->dev;
129 
130 	pc = cpu_get_pcpu(dev);
131 	if (pc == NULL)
132 		return (ENXIO);
133 
134 	sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
135 	sbuf_putc(sb, '\n');
136 	thread_lock(curthread);
137 	sched_bind(curthread, pc->pc_cpuid);
138 	thread_unlock(curthread);
139 
140 	rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
141 	sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
142 	    ((data & 1) ? "En" : "Dis"));
143 
144 	if (data == 0) {
145 		ret = 0;
146 		goto out;
147 	}
148 
149 	rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
150 	sbuf_printf(sb, "\tHighest Performance: %03ju\n", data & 0xff);
151 	sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n", (data >> 8) & 0xff);
152 	sbuf_printf(sb, "\tEfficient Performance: %03ju\n", (data >> 16) & 0xff);
153 	sbuf_printf(sb, "\tLowest Performance: %03ju\n", (data >> 24) & 0xff);
154 
155 	rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
156 	data2 = 0;
157 	if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL))
158 		rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
159 
160 	sbuf_putc(sb, '\n');
161 
162 #define pkg_print(x, name, offset) do {					\
163 	if (!sc->hwp_pkg_ctrl || (data & x) != 0) 			\
164 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
165 		    (unsigned)(data >> offset) & 0xff);			\
166 	else								\
167 		sbuf_printf(sb, "\t%s: %03u\n", name,			\
168 		    (unsigned)(data2 >> offset) & 0xff);		\
169 } while (0)
170 
171 	pkg_print(IA32_HWP_REQUEST_EPP_VALID,
172 	    "Requested Efficiency Performance Preference", 24);
173 	pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
174 	    "Requested Desired Performance", 16);
175 	pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
176 	    "Requested Maximum Performance", 8);
177 	pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
178 	    "Requested Minimum Performance", 0);
179 #undef pkg_print
180 
181 	sbuf_putc(sb, '\n');
182 
183 out:
184 	thread_lock(curthread);
185 	sched_unbind(curthread);
186 	thread_unlock(curthread);
187 
188 	ret = sbuf_finish(sb);
189 	if (ret == 0)
190 		ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
191 	sbuf_delete(sb);
192 
193 	return (ret);
194 }
195 
196 static inline int
197 percent_to_raw(int x)
198 {
199 
200 	MPASS(x <= 100 && x >= 0);
201 	return (0xff * x / 100);
202 }
203 
204 /*
205  * Given x * 10 in [0, 1000], round to the integer nearest x.
206  *
207  * This allows round-tripping nice human readable numbers through this
208  * interface.  Otherwise, user-provided percentages such as 25, 50, 75 get
209  * rounded down to 24, 49, and 74, which is a bit ugly.
210  */
211 static inline int
212 round10(int xtimes10)
213 {
214 	return ((xtimes10 + 5) / 10);
215 }
216 
217 static inline int
218 raw_to_percent(int x)
219 {
220 	MPASS(x <= 0xff && x >= 0);
221 	return (round10(x * 1000 / 0xff));
222 }
223 
224 /* Range of MSR_IA32_ENERGY_PERF_BIAS is more limited: 0-0xf. */
225 static inline int
226 percent_to_raw_perf_bias(int x)
227 {
228 	/*
229 	 * Round up so that raw values present as nice round human numbers and
230 	 * also round-trip to the same raw value.
231 	 */
232 	MPASS(x <= 100 && x >= 0);
233 	return (((0xf * x) + 50) / 100);
234 }
235 
236 static inline int
237 raw_to_percent_perf_bias(int x)
238 {
239 	/* Rounding to nice human numbers despite a step interval of 6.67%. */
240 	MPASS(x <= 0xf && x >= 0);
241 	return (((x * 20) / 0xf) * 5);
242 }
243 
244 static int
245 sysctl_epp_select(SYSCTL_HANDLER_ARGS)
246 {
247 	struct hwp_softc *sc;
248 	device_t dev;
249 	struct pcpu *pc;
250 	uint64_t epb;
251 	uint32_t val;
252 	int ret;
253 
254 	dev = oidp->oid_arg1;
255 	sc = device_get_softc(dev);
256 	if (!sc->hwp_pref_ctrl && !sc->hwp_perf_bias)
257 		return (ENODEV);
258 
259 	pc = cpu_get_pcpu(dev);
260 	if (pc == NULL)
261 		return (ENXIO);
262 
263 	thread_lock(curthread);
264 	sched_bind(curthread, pc->pc_cpuid);
265 	thread_unlock(curthread);
266 
267 	if (sc->hwp_pref_ctrl) {
268 		val = (sc->req & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
269 		val = raw_to_percent(val);
270 	} else {
271 		/*
272 		 * If cpuid indicates EPP is not supported, the HWP controller
273 		 * uses MSR_IA32_ENERGY_PERF_BIAS instead (Intel SDM §14.4.4).
274 		 * This register is per-core (but not HT).
275 		 */
276 		if (!sc->hwp_perf_bias_cached) {
277 			ret = rdmsr_safe(MSR_IA32_ENERGY_PERF_BIAS, &epb);
278 			if (ret)
279 				goto out;
280 			sc->hwp_energy_perf_bias = epb;
281 			sc->hwp_perf_bias_cached = true;
282 		}
283 		val = sc->hwp_energy_perf_bias &
284 		    IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK;
285 		val = raw_to_percent_perf_bias(val);
286 	}
287 
288 	MPASS(val >= 0 && val <= 100);
289 
290 	ret = sysctl_handle_int(oidp, &val, 0, req);
291 	if (ret || req->newptr == NULL)
292 		goto out;
293 
294 	if (val > 100) {
295 		ret = EINVAL;
296 		goto out;
297 	}
298 
299 	if (sc->hwp_pref_ctrl) {
300 		val = percent_to_raw(val);
301 
302 		sc->req =
303 		    ((sc->req & ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE)
304 		    | (val << 24u));
305 
306 		if (sc->hwp_pkg_ctrl_en)
307 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
308 		else
309 			ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
310 	} else {
311 		val = percent_to_raw_perf_bias(val);
312 		MPASS((val & ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) == 0);
313 
314 		sc->hwp_energy_perf_bias =
315 		    ((sc->hwp_energy_perf_bias &
316 		    ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) | val);
317 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
318 		    sc->hwp_energy_perf_bias);
319 	}
320 
321 out:
322 	thread_lock(curthread);
323 	sched_unbind(curthread);
324 	thread_unlock(curthread);
325 
326 	return (ret);
327 }
328 
329 void
330 intel_hwpstate_identify(driver_t *driver, device_t parent)
331 {
332 	if (device_find_child(parent, "hwpstate_intel", -1) != NULL)
333 		return;
334 
335 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
336 		return;
337 
338 	if (resource_disabled("hwpstate_intel", 0))
339 		return;
340 
341 	/*
342 	 * Intel SDM 14.4.1 (HWP Programming Interfaces):
343 	 *   Availability of HWP baseline resource and capability,
344 	 *   CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
345 	 *   architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
346 	 *   IA32_HWP_REQUEST, IA32_HWP_STATUS.
347 	 */
348 	if ((cpu_power_eax & CPUTPM1_HWP) == 0)
349 		return;
350 
351 	if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", device_get_unit(parent))
352 	    == NULL)
353 		device_printf(parent, "hwpstate_intel: add child failed\n");
354 }
355 
356 static int
357 intel_hwpstate_probe(device_t dev)
358 {
359 
360 	device_set_desc(dev, "Intel Speed Shift");
361 	return (BUS_PROBE_NOWILDCARD);
362 }
363 
364 static int
365 set_autonomous_hwp(struct hwp_softc *sc)
366 {
367 	struct pcpu *pc;
368 	device_t dev;
369 	uint64_t caps;
370 	int ret;
371 
372 	dev = sc->dev;
373 
374 	pc = cpu_get_pcpu(dev);
375 	if (pc == NULL)
376 		return (ENXIO);
377 
378 	thread_lock(curthread);
379 	sched_bind(curthread, pc->pc_cpuid);
380 	thread_unlock(curthread);
381 
382 	/* XXX: Many MSRs aren't readable until feature is enabled */
383 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
384 	if (ret) {
385 		/*
386 		 * This is actually a package-level MSR, and only the first
387 		 * write is not ignored.  So it is harmless to enable it across
388 		 * all devices, and this allows us not to care especially in
389 		 * which order cores (and packages) are probed.  This error
390 		 * condition should not happen given we gate on the HWP CPUID
391 		 * feature flag, if the Intel SDM is correct.
392 		 */
393 		device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
394 		    pc->pc_cpuid, ret);
395 		goto out;
396 	}
397 
398 	ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
399 	if (ret) {
400 		device_printf(dev,
401 		    "Failed to read HWP request MSR for cpu%d (%d)\n",
402 		    pc->pc_cpuid, ret);
403 		goto out;
404 	}
405 
406 	ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
407 	if (ret) {
408 		device_printf(dev,
409 		    "Failed to read HWP capabilities MSR for cpu%d (%d)\n",
410 		    pc->pc_cpuid, ret);
411 		goto out;
412 	}
413 
414 	/*
415 	 * High and low are static; "guaranteed" is dynamic; and efficient is
416 	 * also dynamic.
417 	 */
418 	sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
419 	sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
420 	sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
421 	sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
422 
423 	/* hardware autonomous selection determines the performance target */
424 	sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
425 
426 	/* enable HW dynamic selection of window size */
427 	sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
428 
429 	/* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
430 	sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
431 	sc->req |= sc->low;
432 
433 	/* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
434 	sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
435 	sc->req |= sc->high << 8;
436 
437 	/* If supported, request package-level control for this CPU. */
438 	if (sc->hwp_pkg_ctrl_en)
439 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
440 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
441 	else
442 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
443 	if (ret) {
444 		device_printf(dev,
445 		    "Failed to setup%s autonomous HWP for cpu%d\n",
446 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
447 		goto out;
448 	}
449 
450 	/* If supported, write the PKG-wide control MSR. */
451 	if (sc->hwp_pkg_ctrl_en) {
452 		/*
453 		 * "The structure of the IA32_HWP_REQUEST_PKG MSR
454 		 * (package-level) is identical to the IA32_HWP_REQUEST MSR
455 		 * with the exception of the Package Control field, which does
456 		 * not exist." (Intel SDM §14.4.4)
457 		 */
458 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
459 		if (ret) {
460 			device_printf(dev,
461 			    "Failed to set autonomous HWP for package\n");
462 		}
463 	}
464 
465 out:
466 	thread_lock(curthread);
467 	sched_unbind(curthread);
468 	thread_unlock(curthread);
469 
470 	return (ret);
471 }
472 
473 static int
474 intel_hwpstate_attach(device_t dev)
475 {
476 	struct hwp_softc *sc;
477 	int ret;
478 
479 	sc = device_get_softc(dev);
480 	sc->dev = dev;
481 
482 	/* eax */
483 	if (cpu_power_eax & CPUTPM1_HWP_NOTIFICATION)
484 		sc->hwp_notifications = true;
485 	if (cpu_power_eax & CPUTPM1_HWP_ACTIVITY_WINDOW)
486 		sc->hwp_activity_window = true;
487 	if (cpu_power_eax & CPUTPM1_HWP_PERF_PREF)
488 		sc->hwp_pref_ctrl = true;
489 	if (cpu_power_eax & CPUTPM1_HWP_PKG)
490 		sc->hwp_pkg_ctrl = true;
491 
492 	/* Allow administrators to disable pkg-level control. */
493 	sc->hwp_pkg_ctrl_en = (sc->hwp_pkg_ctrl && hwpstate_pkg_ctrl_enable);
494 
495 	/* ecx */
496 	if (cpu_power_ecx & CPUID_PERF_BIAS)
497 		sc->hwp_perf_bias = true;
498 
499 	ret = set_autonomous_hwp(sc);
500 	if (ret)
501 		return (ret);
502 
503 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
504 	    SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
505 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
506 	    sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
507 
508 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
510 	    "epp", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, dev, 0,
511 	    sysctl_epp_select, "I",
512 	    "Efficiency/Performance Preference "
513 	    "(range from 0, most performant, through 100, most efficient)");
514 
515 	return (cpufreq_register(dev));
516 }
517 
518 static int
519 intel_hwpstate_detach(device_t dev)
520 {
521 
522 	return (cpufreq_unregister(dev));
523 }
524 
525 static int
526 intel_hwpstate_get(device_t dev, struct cf_setting *set)
527 {
528 	struct pcpu *pc;
529 	uint64_t rate;
530 	int ret;
531 
532 	if (set == NULL)
533 		return (EINVAL);
534 
535 	pc = cpu_get_pcpu(dev);
536 	if (pc == NULL)
537 		return (ENXIO);
538 
539 	memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
540 	set->dev = dev;
541 
542 	ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
543 	if (ret == 0)
544 		set->freq = rate / 1000000;
545 
546 	set->volts = CPUFREQ_VAL_UNKNOWN;
547 	set->power = CPUFREQ_VAL_UNKNOWN;
548 	set->lat = CPUFREQ_VAL_UNKNOWN;
549 
550 	return (0);
551 }
552 
553 static int
554 intel_hwpstate_type(device_t dev, int *type)
555 {
556 	if (type == NULL)
557 		return (EINVAL);
558 	*type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
559 
560 	return (0);
561 }
562 
563 static int
564 intel_hwpstate_suspend(device_t dev)
565 {
566 	return (0);
567 }
568 
569 /*
570  * Redo a subset of set_autonomous_hwp on resume; untested.  Without this,
571  * testers observed that on resume MSR_IA32_HWP_REQUEST was bogus.
572  */
573 static int
574 intel_hwpstate_resume(device_t dev)
575 {
576 	struct hwp_softc *sc;
577 	struct pcpu *pc;
578 	int ret;
579 
580 	sc = device_get_softc(dev);
581 
582 	pc = cpu_get_pcpu(dev);
583 	if (pc == NULL)
584 		return (ENXIO);
585 
586 	thread_lock(curthread);
587 	sched_bind(curthread, pc->pc_cpuid);
588 	thread_unlock(curthread);
589 
590 	ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
591 	if (ret) {
592 		device_printf(dev,
593 		    "Failed to enable HWP for cpu%d after suspend (%d)\n",
594 		    pc->pc_cpuid, ret);
595 		goto out;
596 	}
597 
598 	if (sc->hwp_pkg_ctrl_en)
599 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
600 		    IA32_HWP_REQUEST_PACKAGE_CONTROL);
601 	else
602 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
603 	if (ret) {
604 		device_printf(dev,
605 		    "Failed to set%s autonomous HWP for cpu%d after suspend\n",
606 		    sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
607 		goto out;
608 	}
609 	if (sc->hwp_pkg_ctrl_en) {
610 		ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
611 		if (ret) {
612 			device_printf(dev,
613 			    "Failed to set autonomous HWP for package after "
614 			    "suspend\n");
615 			goto out;
616 		}
617 	}
618 	if (!sc->hwp_pref_ctrl && sc->hwp_perf_bias_cached) {
619 		ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
620 		    sc->hwp_energy_perf_bias);
621 		if (ret) {
622 			device_printf(dev,
623 			    "Failed to set energy perf bias for cpu%d after "
624 			    "suspend\n", pc->pc_cpuid);
625 		}
626 	}
627 
628 out:
629 	thread_lock(curthread);
630 	sched_unbind(curthread);
631 	thread_unlock(curthread);
632 
633 	return (ret);
634 }
635