xref: /freebsd/sys/x86/cpufreq/hwpstate_amd.c (revision 0e6e1f14f0a7cfabec38106e41cb2f7a39363e55)
1  /*-
2   * SPDX-License-Identifier: BSD-2-Clause
3   *
4   * Copyright (c) 2005 Nate Lawson
5   * Copyright (c) 2004 Colin Percival
6   * Copyright (c) 2004-2005 Bruno Durcot
7   * Copyright (c) 2004 FUKUDA Nobuhiko
8   * Copyright (c) 2009 Michael Reifenberger
9   * Copyright (c) 2009 Norikatsu Shigemura
10   * Copyright (c) 2008-2009 Gen Otsuji
11   *
12   * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
13   * in various parts. The authors of these files are Nate Lawson,
14   * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
15   * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
16   * Thank you.
17   *
18   * Redistribution and use in source and binary forms, with or without
19   * modification, are permitted providing that the following conditions
20   * are met:
21   * 1. Redistributions of source code must retain the above copyright
22   *    notice, this list of conditions and the following disclaimer.
23   * 2. Redistributions in binary form must reproduce the above copyright
24   *    notice, this list of conditions and the following disclaimer in the
25   *    documentation and/or other materials provided with the distribution.
26   *
27   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
28   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29   * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30   * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
31   * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32   * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33   * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36   * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37   * POSSIBILITY OF SUCH DAMAGE.
38   */
39  
40  /*
41   * For more info:
42   * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
43   * 31116 Rev 3.20  February 04, 2009
44   * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
45   * 41256 Rev 3.00 - July 07, 2008
46   * Processor Programming Reference (PPR) for AMD Family 1Ah Model 02h,
47   * Revision C1 Processors Volume 1 of 7 - Sep 29, 2024
48   */
49  
50  #include <sys/param.h>
51  #include <sys/bus.h>
52  #include <sys/cpu.h>
53  #include <sys/kernel.h>
54  #include <sys/module.h>
55  #include <sys/malloc.h>
56  #include <sys/proc.h>
57  #include <sys/pcpu.h>
58  #include <sys/smp.h>
59  #include <sys/sched.h>
60  
61  #include <machine/md_var.h>
62  #include <machine/cputypes.h>
63  #include <machine/specialreg.h>
64  
65  #include <contrib/dev/acpica/include/acpi.h>
66  
67  #include <dev/acpica/acpivar.h>
68  
69  #include "acpi_if.h"
70  #include "cpufreq_if.h"
71  
72  #define	MSR_AMD_10H_11H_LIMIT	0xc0010061
73  #define	MSR_AMD_10H_11H_CONTROL	0xc0010062
74  #define	MSR_AMD_10H_11H_STATUS	0xc0010063
75  #define	MSR_AMD_10H_11H_CONFIG	0xc0010064
76  
77  #define	AMD_10H_11H_MAX_STATES	16
78  
79  /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
80  #define	AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)	(((msr) >> 4) & 0x7)
81  #define	AMD_10H_11H_GET_PSTATE_LIMIT(msr)	(((msr)) & 0x7)
82  /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
83  #define	AMD_10H_11H_CUR_VID(msr)		(((msr) >> 9) & 0x7F)
84  #define	AMD_10H_11H_CUR_DID(msr)		(((msr) >> 6) & 0x07)
85  #define	AMD_10H_11H_CUR_FID(msr)		((msr) & 0x3F)
86  
87  #define	AMD_17H_CUR_IDIV(msr)			(((msr) >> 30) & 0x03)
88  #define	AMD_17H_CUR_IDD(msr)			(((msr) >> 22) & 0xFF)
89  #define	AMD_17H_CUR_VID(msr)			(((msr) >> 14) & 0xFF)
90  #define	AMD_17H_CUR_DID(msr)			(((msr) >> 8) & 0x3F)
91  #define	AMD_17H_CUR_FID(msr)			((msr) & 0xFF)
92  
93  #define	AMD_1AH_CUR_FID(msr)			((msr) & 0xFFF)
94  
95  #define	HWPSTATE_DEBUG(dev, msg...)			\
96  	do {						\
97  		if (hwpstate_verbose)			\
98  			device_printf(dev, msg);	\
99  	} while (0)
100  
101  struct hwpstate_setting {
102  	int	freq;		/* CPU clock in Mhz or 100ths of a percent. */
103  	int	volts;		/* Voltage in mV. */
104  	int	power;		/* Power consumed in mW. */
105  	int	lat;		/* Transition latency in us. */
106  	int	pstate_id;	/* P-State id */
107  };
108  
109  struct hwpstate_softc {
110  	device_t		dev;
111  	struct hwpstate_setting	hwpstate_settings[AMD_10H_11H_MAX_STATES];
112  	int			cfnum;
113  };
114  
115  static void	hwpstate_identify(driver_t *driver, device_t parent);
116  static int	hwpstate_probe(device_t dev);
117  static int	hwpstate_attach(device_t dev);
118  static int	hwpstate_detach(device_t dev);
119  static int	hwpstate_set(device_t dev, const struct cf_setting *cf);
120  static int	hwpstate_get(device_t dev, struct cf_setting *cf);
121  static int	hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
122  static int	hwpstate_type(device_t dev, int *type);
123  static int	hwpstate_shutdown(device_t dev);
124  static int	hwpstate_features(driver_t *driver, u_int *features);
125  static int	hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
126  static int	hwpstate_get_info_from_msr(device_t dev);
127  static int	hwpstate_goto_pstate(device_t dev, int pstate_id);
128  
129  static int	hwpstate_verbose;
130  SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
131      &hwpstate_verbose, 0, "Debug hwpstate");
132  
133  static int	hwpstate_verify;
134  SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
135      &hwpstate_verify, 0, "Verify P-state after setting");
136  
137  static bool	hwpstate_pstate_limit;
138  SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN,
139      &hwpstate_pstate_limit, 0,
140      "If enabled (1), limit administrative control of P-states to the value in "
141      "CurPstateLimit");
142  
143  static device_method_t hwpstate_methods[] = {
144  	/* Device interface */
145  	DEVMETHOD(device_identify,	hwpstate_identify),
146  	DEVMETHOD(device_probe,		hwpstate_probe),
147  	DEVMETHOD(device_attach,	hwpstate_attach),
148  	DEVMETHOD(device_detach,	hwpstate_detach),
149  	DEVMETHOD(device_shutdown,	hwpstate_shutdown),
150  
151  	/* cpufreq interface */
152  	DEVMETHOD(cpufreq_drv_set,	hwpstate_set),
153  	DEVMETHOD(cpufreq_drv_get,	hwpstate_get),
154  	DEVMETHOD(cpufreq_drv_settings,	hwpstate_settings),
155  	DEVMETHOD(cpufreq_drv_type,	hwpstate_type),
156  
157  	/* ACPI interface */
158  	DEVMETHOD(acpi_get_features,	hwpstate_features),
159  	{0, 0}
160  };
161  
162  static driver_t hwpstate_driver = {
163  	"hwpstate",
164  	hwpstate_methods,
165  	sizeof(struct hwpstate_softc),
166  };
167  
168  DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, 0, 0);
169  
170  static int
171  hwpstate_amd_iscale(int val, int div)
172  {
173  	switch (div) {
174  	case 3: /* divide by 1000 */
175  		val /= 10;
176  	case 2: /* divide by 100 */
177  		val /= 10;
178  	case 1: /* divide by 10 */
179  		val /= 10;
180  	case 0: /* divide by 1 */
181  	    ;
182  	}
183  
184  	return (val);
185  }
186  
187  /*
188   * Go to Px-state on all cpus, considering the limit register (if so
189   * configured).
190   */
191  static int
192  hwpstate_goto_pstate(device_t dev, int id)
193  {
194  	sbintime_t sbt;
195  	uint64_t msr;
196  	int cpu, i, j, limit;
197  
198  	if (hwpstate_pstate_limit) {
199  		/* get the current pstate limit */
200  		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
201  		limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
202  		if (limit > id) {
203  			HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d "
204  			    "due to HW limit\n", id, limit);
205  			id = limit;
206  		}
207  	}
208  
209  	cpu = curcpu;
210  	HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
211  	/* Go To Px-state */
212  	wrmsr(MSR_AMD_10H_11H_CONTROL, id);
213  
214  	/*
215  	 * We are going to the same Px-state on all cpus.
216  	 * Probably should take _PSD into account.
217  	 */
218  	CPU_FOREACH(i) {
219  		if (i == cpu)
220  			continue;
221  
222  		/* Bind to each cpu. */
223  		thread_lock(curthread);
224  		sched_bind(curthread, i);
225  		thread_unlock(curthread);
226  		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
227  		/* Go To Px-state */
228  		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
229  	}
230  
231  	/*
232  	 * Verify whether each core is in the requested P-state.
233  	 */
234  	if (hwpstate_verify) {
235  		CPU_FOREACH(i) {
236  			thread_lock(curthread);
237  			sched_bind(curthread, i);
238  			thread_unlock(curthread);
239  			/* wait loop (100*100 usec is enough ?) */
240  			for (j = 0; j < 100; j++) {
241  				/* get the result. not assure msr=id */
242  				msr = rdmsr(MSR_AMD_10H_11H_STATUS);
243  				if (msr == id)
244  					break;
245  				sbt = SBT_1MS / 10;
246  				tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
247  				    sbt >> tc_precexp, 0);
248  			}
249  			HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
250  			    (int)msr, i);
251  			if (msr != id) {
252  				HWPSTATE_DEBUG(dev,
253  				    "error: loop is not enough.\n");
254  				return (ENXIO);
255  			}
256  		}
257  	}
258  
259  	return (0);
260  }
261  
262  static int
263  hwpstate_set(device_t dev, const struct cf_setting *cf)
264  {
265  	struct hwpstate_softc *sc;
266  	struct hwpstate_setting *set;
267  	int i;
268  
269  	if (cf == NULL)
270  		return (EINVAL);
271  	sc = device_get_softc(dev);
272  	set = sc->hwpstate_settings;
273  	for (i = 0; i < sc->cfnum; i++)
274  		if (CPUFREQ_CMP(cf->freq, set[i].freq))
275  			break;
276  	if (i == sc->cfnum)
277  		return (EINVAL);
278  
279  	return (hwpstate_goto_pstate(dev, set[i].pstate_id));
280  }
281  
282  static int
283  hwpstate_get(device_t dev, struct cf_setting *cf)
284  {
285  	struct hwpstate_softc *sc;
286  	struct hwpstate_setting set;
287  	uint64_t msr;
288  
289  	sc = device_get_softc(dev);
290  	if (cf == NULL)
291  		return (EINVAL);
292  	msr = rdmsr(MSR_AMD_10H_11H_STATUS);
293  	if (msr >= sc->cfnum)
294  		return (EINVAL);
295  	set = sc->hwpstate_settings[msr];
296  
297  	cf->freq = set.freq;
298  	cf->volts = set.volts;
299  	cf->power = set.power;
300  	cf->lat = set.lat;
301  	cf->dev = dev;
302  	return (0);
303  }
304  
305  static int
306  hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
307  {
308  	struct hwpstate_softc *sc;
309  	struct hwpstate_setting set;
310  	int i;
311  
312  	if (sets == NULL || count == NULL)
313  		return (EINVAL);
314  	sc = device_get_softc(dev);
315  	if (*count < sc->cfnum)
316  		return (E2BIG);
317  	for (i = 0; i < sc->cfnum; i++, sets++) {
318  		set = sc->hwpstate_settings[i];
319  		sets->freq = set.freq;
320  		sets->volts = set.volts;
321  		sets->power = set.power;
322  		sets->lat = set.lat;
323  		sets->dev = dev;
324  	}
325  	*count = sc->cfnum;
326  
327  	return (0);
328  }
329  
330  static int
331  hwpstate_type(device_t dev, int *type)
332  {
333  
334  	if (type == NULL)
335  		return (EINVAL);
336  
337  	*type = CPUFREQ_TYPE_ABSOLUTE;
338  	return (0);
339  }
340  
341  static void
342  hwpstate_identify(driver_t *driver, device_t parent)
343  {
344  
345  	if (device_find_child(parent, "hwpstate", -1) != NULL)
346  		return;
347  
348  	if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) &&
349  	    cpu_vendor_id != CPU_VENDOR_HYGON)
350  		return;
351  
352  	/*
353  	 * Check if hardware pstate enable bit is set.
354  	 */
355  	if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
356  		HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
357  		return;
358  	}
359  
360  	if (resource_disabled("hwpstate", 0))
361  		return;
362  
363  	if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent))
364  	    == NULL)
365  		device_printf(parent, "hwpstate: add child failed\n");
366  }
367  
368  static int
369  hwpstate_probe(device_t dev)
370  {
371  	struct hwpstate_softc *sc;
372  	device_t perf_dev;
373  	uint64_t msr;
374  	int error, type;
375  
376  	/*
377  	 * Only hwpstate0.
378  	 * It goes well with acpi_throttle.
379  	 */
380  	if (device_get_unit(dev) != 0)
381  		return (ENXIO);
382  
383  	sc = device_get_softc(dev);
384  	sc->dev = dev;
385  
386  	/*
387  	 * Check if acpi_perf has INFO only flag.
388  	 */
389  	perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
390  	error = TRUE;
391  	if (perf_dev && device_is_attached(perf_dev)) {
392  		error = CPUFREQ_DRV_TYPE(perf_dev, &type);
393  		if (error == 0) {
394  			if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
395  				/*
396  				 * If acpi_perf doesn't have INFO_ONLY flag,
397  				 * it will take care of pstate transitions.
398  				 */
399  				HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
400  				return (ENXIO);
401  			} else {
402  				/*
403  				 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
404  				 * we can get _PSS info from acpi_perf
405  				 * without going into ACPI.
406  				 */
407  				HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
408  				error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
409  			}
410  		}
411  	}
412  
413  	if (error == 0) {
414  		/*
415  		 * Now we get _PSS info from acpi_perf without error.
416  		 * Let's check it.
417  		 */
418  		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
419  		if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
420  			HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
421  			    " count mismatch\n", (intmax_t)msr, sc->cfnum);
422  			error = TRUE;
423  		}
424  	}
425  
426  	/*
427  	 * If we cannot get info from acpi_perf,
428  	 * Let's get info from MSRs.
429  	 */
430  	if (error)
431  		error = hwpstate_get_info_from_msr(dev);
432  	if (error)
433  		return (error);
434  
435  	device_set_desc(dev, "Cool`n'Quiet 2.0");
436  	return (0);
437  }
438  
439  static int
440  hwpstate_attach(device_t dev)
441  {
442  
443  	return (cpufreq_register(dev));
444  }
445  
446  static int
447  hwpstate_get_info_from_msr(device_t dev)
448  {
449  	struct hwpstate_softc *sc;
450  	struct hwpstate_setting *hwpstate_set;
451  	uint64_t msr;
452  	int family, i, fid, did;
453  
454  	family = CPUID_TO_FAMILY(cpu_id);
455  	sc = device_get_softc(dev);
456  	/* Get pstate count */
457  	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
458  	sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
459  	hwpstate_set = sc->hwpstate_settings;
460  	for (i = 0; i < sc->cfnum; i++) {
461  		msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
462  		if ((msr & ((uint64_t)1 << 63)) == 0) {
463  			HWPSTATE_DEBUG(dev, "msr is not valid.\n");
464  			return (ENXIO);
465  		}
466  		did = AMD_10H_11H_CUR_DID(msr);
467  		fid = AMD_10H_11H_CUR_FID(msr);
468  
469  		hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
470  		hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
471  		hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
472  		/* Convert fid/did to frequency. */
473  		switch (family) {
474  		case 0x11:
475  			hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
476  			break;
477  		case 0x10:
478  		case 0x12:
479  		case 0x15:
480  		case 0x16:
481  			hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
482  			break;
483  		case 0x17:
484  		case 0x18:
485  		case 0x19:
486  		case 0x1A:
487  			/* calculate freq */
488  			if (family == 0x1A) {
489  				fid = AMD_1AH_CUR_FID(msr);
490  				/* 1Ah CPU don't use a divisor */
491  				hwpstate_set[i].freq = fid;
492  				if (fid > 0x0f)
493  					hwpstate_set[i].freq *= 5;
494  				else {
495  					HWPSTATE_DEBUG(dev,
496  					    "unexpected fid: %d\n", fid);
497  					return (ENXIO);
498  				}
499  			} else {
500  				did = AMD_17H_CUR_DID(msr);
501  				if (did == 0) {
502  					HWPSTATE_DEBUG(dev,
503  					    "unexpected did: 0\n");
504  					did = 1;
505  				}
506  				fid = AMD_17H_CUR_FID(msr);
507  				hwpstate_set[i].freq = (200 * fid) / did;
508  			}
509  
510  			/* Vid step is 6.25mV, so scale by 100. */
511  			hwpstate_set[i].volts =
512  			    (155000 - (625 * AMD_17H_CUR_VID(msr))) / 100;
513  			/*
514  			 * Calculate current first.
515  			 * This equation is mentioned in
516  			 * "BKDG for AMD Family 15h Models 70h-7fh Processors",
517  			 * section 2.5.2.1.6.
518  			 */
519  			hwpstate_set[i].power = AMD_17H_CUR_IDD(msr) * 1000;
520  			hwpstate_set[i].power = hwpstate_amd_iscale(
521  			    hwpstate_set[i].power, AMD_17H_CUR_IDIV(msr));
522  			hwpstate_set[i].power *= hwpstate_set[i].volts;
523  			/* Milli amps * milli volts to milli watts. */
524  			hwpstate_set[i].power /= 1000;
525  			break;
526  		default:
527  			HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family"
528  			    " 0x%02x CPUs are not supported yet\n",
529  			    cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD",
530  			    family);
531  			return (ENXIO);
532  		}
533  		hwpstate_set[i].pstate_id = i;
534  	}
535  	return (0);
536  }
537  
538  static int
539  hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
540  {
541  	struct hwpstate_softc *sc;
542  	struct cf_setting *perf_set;
543  	struct hwpstate_setting *hwpstate_set;
544  	int count, error, i;
545  
546  	perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
547  	if (perf_set == NULL) {
548  		HWPSTATE_DEBUG(dev, "nomem\n");
549  		return (ENOMEM);
550  	}
551  	/*
552  	 * Fetch settings from acpi_perf.
553  	 * Now it is attached, and has info only flag.
554  	 */
555  	count = MAX_SETTINGS;
556  	error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
557  	if (error) {
558  		HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
559  		goto out;
560  	}
561  	sc = device_get_softc(dev);
562  	sc->cfnum = count;
563  	hwpstate_set = sc->hwpstate_settings;
564  	for (i = 0; i < count; i++) {
565  		if (i == perf_set[i].spec[0]) {
566  			hwpstate_set[i].pstate_id = i;
567  			hwpstate_set[i].freq = perf_set[i].freq;
568  			hwpstate_set[i].volts = perf_set[i].volts;
569  			hwpstate_set[i].power = perf_set[i].power;
570  			hwpstate_set[i].lat = perf_set[i].lat;
571  		} else {
572  			HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
573  			error = ENXIO;
574  			goto out;
575  		}
576  	}
577  out:
578  	if (perf_set)
579  		free(perf_set, M_TEMP);
580  	return (error);
581  }
582  
583  static int
584  hwpstate_detach(device_t dev)
585  {
586  
587  	hwpstate_goto_pstate(dev, 0);
588  	return (cpufreq_unregister(dev));
589  }
590  
591  static int
592  hwpstate_shutdown(device_t dev)
593  {
594  
595  	/* hwpstate_goto_pstate(dev, 0); */
596  	return (0);
597  }
598  
599  static int
600  hwpstate_features(driver_t *driver, u_int *features)
601  {
602  
603  	/* Notify the ACPI CPU that we support direct access to MSRs */
604  	*features = ACPI_CAP_PERF_MSRS;
605  	return (0);
606  }
607