xref: /freebsd/sys/dev/acpica/acpi_cpu.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * Copyright (c) 2003-2005 Nate Lawson (SDG)
3  * Copyright (c) 2001 Michael Smith
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/cpu.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/pcpu.h>
37 #include <sys/power.h>
38 #include <sys/proc.h>
39 #include <sys/sched.h>
40 #include <sys/sbuf.h>
41 #include <sys/smp.h>
42 
43 #include <dev/pci/pcivar.h>
44 #include <machine/atomic.h>
45 #include <machine/bus.h>
46 #if defined(__amd64__) || defined(__i386__)
47 #include <machine/clock.h>
48 #include <machine/specialreg.h>
49 #include <machine/md_var.h>
50 #endif
51 #include <sys/rman.h>
52 
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55 
56 #include <dev/acpica/acpivar.h>
57 
58 /*
59  * Support for ACPI Processor devices, including C[1-3] sleep states.
60  */
61 
62 /* Hooks for the ACPI CA debugging infrastructure */
63 #define _COMPONENT	ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
65 
66 struct acpi_cx {
67     struct resource	*p_lvlx;	/* Register to read to enter state. */
68     uint32_t		 type;		/* C1-3 (C4 and up treated as C3). */
69     uint32_t		 trans_lat;	/* Transition latency (usec). */
70     uint32_t		 power;		/* Power consumed (mW). */
71     int			 res_type;	/* Resource type for p_lvlx. */
72     int			 res_rid;	/* Resource ID for p_lvlx. */
73     bool		 do_mwait;
74     uint32_t		 mwait_hint;
75     bool		 mwait_hw_coord;
76     bool		 mwait_bm_avoidance;
77 };
78 #define MAX_CX_STATES	 8
79 
80 struct acpi_cpu_softc {
81     device_t		 cpu_dev;
82     ACPI_HANDLE		 cpu_handle;
83     struct pcpu		*cpu_pcpu;
84     uint32_t		 cpu_acpi_id;	/* ACPI processor id */
85     uint32_t		 cpu_p_blk;	/* ACPI P_BLK location */
86     uint32_t		 cpu_p_blk_len;	/* P_BLK length (must be 6). */
87     struct acpi_cx	 cpu_cx_states[MAX_CX_STATES];
88     int			 cpu_cx_count;	/* Number of valid Cx states. */
89     int			 cpu_prev_sleep;/* Last idle sleep duration. */
90     int			 cpu_features;	/* Child driver supported features. */
91     /* Runtime state. */
92     int			 cpu_non_c2;	/* Index of lowest non-C2 state. */
93     int			 cpu_non_c3;	/* Index of lowest non-C3 state. */
94     u_int		 cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
95     /* Values for sysctl. */
96     struct sysctl_ctx_list cpu_sysctl_ctx;
97     struct sysctl_oid	*cpu_sysctl_tree;
98     int			 cpu_cx_lowest;
99     int			 cpu_cx_lowest_lim;
100     int			 cpu_disable_idle; /* Disable entry to idle function */
101     char 		 cpu_cx_supported[64];
102 };
103 
104 struct acpi_cpu_device {
105     struct resource_list	ad_rl;
106 };
107 
108 #define CPU_GET_REG(reg, width) 					\
109     (bus_space_read_ ## width(rman_get_bustag((reg)), 			\
110 		      rman_get_bushandle((reg)), 0))
111 #define CPU_SET_REG(reg, width, val)					\
112     (bus_space_write_ ## width(rman_get_bustag((reg)), 			\
113 		       rman_get_bushandle((reg)), 0, (val)))
114 
115 #define ACPI_NOTIFY_CX_STATES	0x81	/* _CST changed. */
116 
117 #define CPU_QUIRK_NO_C3		(1<<0)	/* C3-type states are not usable. */
118 #define CPU_QUIRK_NO_BM_CTRL	(1<<2)	/* No bus mastering control. */
119 
120 #define PCI_VENDOR_INTEL	0x8086
121 #define PCI_DEVICE_82371AB_3	0x7113	/* PIIX4 chipset for quirks. */
122 #define PCI_REVISION_A_STEP	0
123 #define PCI_REVISION_B_STEP	1
124 #define PCI_REVISION_4E		2
125 #define PCI_REVISION_4M		3
126 #define PIIX4_DEVACTB_REG	0x58
127 #define PIIX4_BRLD_EN_IRQ0	(1<<0)
128 #define PIIX4_BRLD_EN_IRQ	(1<<1)
129 #define PIIX4_BRLD_EN_IRQ8	(1<<5)
130 #define PIIX4_STOP_BREAK_MASK	(PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
131 #define PIIX4_PCNTRL_BST_EN	(1<<10)
132 
133 #define	CST_FFH_VENDOR_INTEL	1
134 #define	CST_FFH_INTEL_CL_C1IO	1
135 #define	CST_FFH_INTEL_CL_MWAIT	2
136 #define	CST_FFH_MWAIT_HW_COORD	0x0001
137 #define	CST_FFH_MWAIT_BM_AVOID	0x0002
138 
139 #define	CPUDEV_DEVICE_ID	"ACPI0007"
140 
141 /* Knob to disable acpi_cpu devices */
142 bool acpi_cpu_disabled = false;
143 
144 /* Platform hardware resource information. */
145 static uint32_t		 cpu_smi_cmd;	/* Value to write to SMI_CMD. */
146 static uint8_t		 cpu_cst_cnt;	/* Indicate we are _CST aware. */
147 static int		 cpu_quirks;	/* Indicate any hardware bugs. */
148 
149 /* Values for sysctl. */
150 static struct sysctl_ctx_list cpu_sysctl_ctx;
151 static struct sysctl_oid *cpu_sysctl_tree;
152 static int		 cpu_cx_generic;
153 static int		 cpu_cx_lowest_lim;
154 #if defined(__i386__) || defined(__amd64__)
155 static bool		 cppc_notify;
156 #endif
157 
158 static struct acpi_cpu_softc **cpu_softc;
159 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
160 
161 static int	acpi_cpu_probe(device_t dev);
162 static int	acpi_cpu_attach(device_t dev);
163 static int	acpi_cpu_suspend(device_t dev);
164 static int	acpi_cpu_resume(device_t dev);
165 static int	acpi_pcpu_get_id(device_t dev, uint32_t acpi_id,
166 		    u_int *cpu_id);
167 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
168 static device_t	acpi_cpu_add_child(device_t dev, u_int order, const char *name,
169 		    int unit);
170 static int	acpi_cpu_read_ivar(device_t dev, device_t child, int index,
171 		    uintptr_t *result);
172 static int	acpi_cpu_shutdown(device_t dev);
173 static void	acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
174 static void	acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
175 static int	acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
176 static void	acpi_cpu_startup(void *arg);
177 static void	acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
178 static void	acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
179 #if defined(__i386__) || defined(__amd64__)
180 static void	acpi_cpu_idle(sbintime_t sbt);
181 #endif
182 static void	acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
183 static void	acpi_cpu_quirks(void);
184 static void	acpi_cpu_quirks_piix4(void);
185 static int	acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
186 static int	acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
187 static int	acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
188 static int	acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
189 static int	acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
190 #if defined(__i386__) || defined(__amd64__)
191 static int	acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
192 #endif
193 
194 static device_method_t acpi_cpu_methods[] = {
195     /* Device interface */
196     DEVMETHOD(device_probe,	acpi_cpu_probe),
197     DEVMETHOD(device_attach,	acpi_cpu_attach),
198     DEVMETHOD(device_detach,	bus_generic_detach),
199     DEVMETHOD(device_shutdown,	acpi_cpu_shutdown),
200     DEVMETHOD(device_suspend,	acpi_cpu_suspend),
201     DEVMETHOD(device_resume,	acpi_cpu_resume),
202 
203     /* Bus interface */
204     DEVMETHOD(bus_add_child,	acpi_cpu_add_child),
205     DEVMETHOD(bus_read_ivar,	acpi_cpu_read_ivar),
206     DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
207     DEVMETHOD(bus_get_resource,	bus_generic_rl_get_resource),
208     DEVMETHOD(bus_set_resource,	bus_generic_rl_set_resource),
209     DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
210     DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
211     DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
212     DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
213     DEVMETHOD(bus_setup_intr,	bus_generic_setup_intr),
214     DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
215 
216     DEVMETHOD_END
217 };
218 
219 static driver_t acpi_cpu_driver = {
220     "cpu",
221     acpi_cpu_methods,
222     sizeof(struct acpi_cpu_softc),
223 };
224 
225 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, 0, 0);
226 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
227 
228 static int
229 acpi_cpu_probe(device_t dev)
230 {
231     static char		   *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL };
232     int			   acpi_id, cpu_id;
233     ACPI_BUFFER		   buf;
234     ACPI_HANDLE		   handle;
235     ACPI_OBJECT		   *obj;
236     ACPI_STATUS		   status;
237     ACPI_OBJECT_TYPE	   type;
238 
239     if (acpi_disabled("cpu") || acpi_cpu_disabled)
240 	return (ENXIO);
241     type = acpi_get_type(dev);
242     if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE)
243 	return (ENXIO);
244     if (type == ACPI_TYPE_DEVICE &&
245 	ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids, NULL) >= 0)
246 	return (ENXIO);
247 
248     handle = acpi_get_handle(dev);
249     if (cpu_softc == NULL)
250 	cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
251 	    (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
252 
253     if (type == ACPI_TYPE_PROCESSOR) {
254 	/* Get our Processor object. */
255 	buf.Pointer = NULL;
256 	buf.Length = ACPI_ALLOCATE_BUFFER;
257 	status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
258 	if (ACPI_FAILURE(status)) {
259 	    device_printf(dev, "probe failed to get Processor obj - %s\n",
260 		AcpiFormatException(status));
261 	    return (ENXIO);
262 	}
263 	obj = (ACPI_OBJECT *)buf.Pointer;
264 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
265 	    device_printf(dev, "Processor object has bad type %d\n",
266 		obj->Type);
267 	    AcpiOsFree(obj);
268 	    return (ENXIO);
269 	}
270 
271 	/*
272 	 * Find the processor associated with our unit.  We could use the
273 	 * ProcId as a key, however, some boxes do not have the same values
274 	 * in their Processor object as the ProcId values in the MADT.
275 	 */
276 	acpi_id = obj->Processor.ProcId;
277 	AcpiOsFree(obj);
278     } else {
279 	status = acpi_GetInteger(handle, "_UID", &acpi_id);
280 	if (ACPI_FAILURE(status)) {
281 	    device_printf(dev, "Device object has bad value - %s\n",
282 		AcpiFormatException(status));
283 	    return (ENXIO);
284 	}
285     }
286     if (acpi_pcpu_get_id(dev, acpi_id, &cpu_id) != 0) {
287 	if (bootverbose && (type != ACPI_TYPE_PROCESSOR || acpi_id != 255))
288 	    printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
289 		acpi_name(acpi_get_handle(dev)), acpi_id);
290 	return (ENXIO);
291     }
292 
293     if (device_set_unit(dev, cpu_id) != 0)
294 	return (ENXIO);
295 
296     device_set_desc(dev, "ACPI CPU");
297 
298     if (!bootverbose && device_get_unit(dev) != 0) {
299 	    device_quiet(dev);
300 	    device_quiet_children(dev);
301     }
302 
303     return (BUS_PROBE_DEFAULT);
304 }
305 
306 static int
307 acpi_cpu_attach(device_t dev)
308 {
309     ACPI_BUFFER		   buf;
310     ACPI_OBJECT		   arg, *obj;
311     ACPI_OBJECT_LIST	   arglist;
312     struct pcpu		   *pcpu_data;
313     struct acpi_cpu_softc *sc;
314     struct acpi_softc	  *acpi_sc;
315     ACPI_STATUS		   status;
316     u_int		   features;
317     int			   cpu_id, drv_count, i;
318     driver_t 		  **drivers;
319     uint32_t		   cap_set[3];
320 
321     /* UUID needed by _OSC evaluation */
322     static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
323 				       0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
324 				       0x58, 0x71, 0x39, 0x53 };
325 
326     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
327 
328     sc = device_get_softc(dev);
329     sc->cpu_dev = dev;
330     sc->cpu_handle = acpi_get_handle(dev);
331     cpu_id = device_get_unit(dev);
332     cpu_softc[cpu_id] = sc;
333     pcpu_data = pcpu_find(cpu_id);
334     pcpu_data->pc_device = dev;
335     sc->cpu_pcpu = pcpu_data;
336     cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
337     cpu_cst_cnt = AcpiGbl_FADT.CstControl;
338 
339     if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
340 	buf.Pointer = NULL;
341 	buf.Length = ACPI_ALLOCATE_BUFFER;
342 	status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
343 	if (ACPI_FAILURE(status)) {
344 	    device_printf(dev, "attach failed to get Processor obj - %s\n",
345 		AcpiFormatException(status));
346 	    return (ENXIO);
347 	}
348 	obj = (ACPI_OBJECT *)buf.Pointer;
349 	sc->cpu_p_blk = obj->Processor.PblkAddress;
350 	sc->cpu_p_blk_len = obj->Processor.PblkLength;
351 	sc->cpu_acpi_id = obj->Processor.ProcId;
352 	AcpiOsFree(obj);
353     } else {
354 	KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE,
355 	    ("Unexpected ACPI object"));
356 	status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id);
357 	if (ACPI_FAILURE(status)) {
358 	    device_printf(dev, "Device object has bad value - %s\n",
359 		AcpiFormatException(status));
360 	    return (ENXIO);
361 	}
362 	sc->cpu_p_blk = 0;
363 	sc->cpu_p_blk_len = 0;
364     }
365     ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
366 		     device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
367 
368     /*
369      * If this is the first cpu we attach, create and initialize the generic
370      * resources that will be used by all acpi cpu devices.
371      */
372     if (device_get_unit(dev) == 0) {
373 	/* Assume we won't be using generic Cx mode by default */
374 	cpu_cx_generic = FALSE;
375 
376 	/* Install hw.acpi.cpu sysctl tree */
377 	acpi_sc = acpi_device_get_parent_softc(dev);
378 	sysctl_ctx_init(&cpu_sysctl_ctx);
379 	cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
380 	    SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
381 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "node for CPU children");
382 
383 #if defined(__i386__) || defined(__amd64__)
384 	/* Add sysctl handler to control registering for CPPC notifications */
385 	cppc_notify = 1;
386 	SYSCTL_ADD_BOOL(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
387 	    OID_AUTO, "cppc_notify", CTLFLAG_RDTUN | CTLFLAG_MPSAFE,
388 	    &cppc_notify, 0, "Register for CPPC Notifications");
389 #endif
390     }
391 
392     /*
393      * Before calling any CPU methods, collect child driver feature hints
394      * and notify ACPI of them.  We support unified SMP power control
395      * so advertise this ourselves.  Note this is not the same as independent
396      * SMP control where each CPU can have different settings.
397      */
398     sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
399       ACPI_CAP_C1_IO_HALT;
400 
401 #if defined(__i386__) || defined(__amd64__)
402     /*
403      * Ask for MWAIT modes if not disabled and interrupts work
404      * reasonable with MWAIT.
405      */
406     if (!acpi_disabled("mwait") && cpu_mwait_usable())
407 	sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
408 
409     /*
410      * Work around a lingering SMM bug which leads to freezes when handling
411      * CPPC notifications. Tell the SMM we will handle any CPPC notifications.
412      */
413     if ((cpu_power_eax & CPUTPM1_HWP_NOTIFICATION) && cppc_notify)
414 	    sc->cpu_features |= ACPI_CAP_INTR_CPPC;
415 #endif
416 
417     if (devclass_get_drivers(device_get_devclass(dev), &drivers,
418 	&drv_count) == 0) {
419 	for (i = 0; i < drv_count; i++) {
420 	    if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
421 		sc->cpu_features |= features;
422 	}
423 	free(drivers, M_TEMP);
424     }
425 
426     /*
427      * CPU capabilities are specified in
428      * Intel Processor Vendor-Specific ACPI Interface Specification.
429      */
430     if (sc->cpu_features) {
431 	cap_set[1] = sc->cpu_features;
432 	status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
433 	    cap_set, false);
434 	if (ACPI_SUCCESS(status)) {
435 	    if (cap_set[0] != 0)
436 		device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
437 	}
438 	else {
439 	    arglist.Pointer = &arg;
440 	    arglist.Count = 1;
441 	    arg.Type = ACPI_TYPE_BUFFER;
442 	    arg.Buffer.Length = sizeof(cap_set);
443 	    arg.Buffer.Pointer = (uint8_t *)cap_set;
444 	    cap_set[0] = 1; /* revision */
445 	    cap_set[1] = 1; /* number of capabilities integers */
446 	    cap_set[2] = sc->cpu_features;
447 	    AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
448 	}
449     }
450 
451     /* Probe for Cx state support. */
452     acpi_cpu_cx_probe(sc);
453 
454     return (0);
455 }
456 
457 static void
458 acpi_cpu_postattach(void *unused __unused)
459 {
460     struct acpi_cpu_softc *sc;
461     int attached = 0, i;
462 
463     if (cpu_softc == NULL)
464 	return;
465 
466     bus_topo_lock();
467     CPU_FOREACH(i) {
468 	if ((sc = cpu_softc[i]) != NULL)
469 		bus_generic_probe(sc->cpu_dev);
470     }
471     CPU_FOREACH(i) {
472 	if ((sc = cpu_softc[i]) != NULL) {
473 		bus_generic_attach(sc->cpu_dev);
474 		attached = 1;
475 	}
476     }
477     bus_topo_unlock();
478 
479     if (attached) {
480 #ifdef EARLY_AP_STARTUP
481 	acpi_cpu_startup(NULL);
482 #else
483 	/* Queue post cpu-probing task handler */
484 	AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
485 #endif
486     }
487 }
488 
489 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
490     acpi_cpu_postattach, NULL);
491 
492 static void
493 disable_idle(struct acpi_cpu_softc *sc)
494 {
495     cpuset_t cpuset;
496 
497     CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
498     sc->cpu_disable_idle = TRUE;
499 
500     /*
501      * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
502      * Note that this code depends on the fact that the rendezvous IPI
503      * can not penetrate context where interrupts are disabled and acpi_cpu_idle
504      * is called and executed in such a context with interrupts being re-enabled
505      * right before return.
506      */
507     smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL,
508 	smp_no_rendezvous_barrier, NULL);
509 }
510 
511 static void
512 enable_idle(struct acpi_cpu_softc *sc)
513 {
514 
515     sc->cpu_disable_idle = FALSE;
516 }
517 
518 #if defined(__i386__) || defined(__amd64__)
519 static int
520 is_idle_disabled(struct acpi_cpu_softc *sc)
521 {
522 
523     return (sc->cpu_disable_idle);
524 }
525 #endif
526 
527 /*
528  * Disable any entry to the idle function during suspend and re-enable it
529  * during resume.
530  */
531 static int
532 acpi_cpu_suspend(device_t dev)
533 {
534     int error;
535 
536     error = bus_generic_suspend(dev);
537     if (error)
538 	return (error);
539     disable_idle(device_get_softc(dev));
540     return (0);
541 }
542 
543 static int
544 acpi_cpu_resume(device_t dev)
545 {
546 
547     enable_idle(device_get_softc(dev));
548     return (bus_generic_resume(dev));
549 }
550 
551 /*
552  * Find the processor associated with a given ACPI ID.
553  */
554 static int
555 acpi_pcpu_get_id(device_t dev, uint32_t acpi_id, u_int *cpu_id)
556 {
557     struct pcpu	*pc;
558     u_int	 i;
559 
560     CPU_FOREACH(i) {
561 	pc = pcpu_find(i);
562 	if (pc->pc_acpi_id == acpi_id) {
563 	    *cpu_id = pc->pc_cpuid;
564 	    return (0);
565 	}
566     }
567 
568     /*
569      * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
570      * UP box) use the ACPI ID from the first processor we find.
571      */
572     if (mp_ncpus == 1) {
573 	pc = pcpu_find(0);
574 	if (pc->pc_acpi_id == 0xffffffff)
575 	    pc->pc_acpi_id = acpi_id;
576 	*cpu_id = 0;
577 	return (0);
578     }
579 
580     return (ESRCH);
581 }
582 
583 static struct resource_list *
584 acpi_cpu_get_rlist(device_t dev, device_t child)
585 {
586     struct acpi_cpu_device *ad;
587 
588     ad = device_get_ivars(child);
589     if (ad == NULL)
590 	return (NULL);
591     return (&ad->ad_rl);
592 }
593 
594 static device_t
595 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
596 {
597     struct acpi_cpu_device *ad;
598     device_t child;
599 
600     if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
601 	return (NULL);
602 
603     resource_list_init(&ad->ad_rl);
604 
605     child = device_add_child_ordered(dev, order, name, unit);
606     if (child != NULL)
607 	device_set_ivars(child, ad);
608     else
609 	free(ad, M_TEMP);
610     return (child);
611 }
612 
613 static int
614 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
615 {
616     struct acpi_cpu_softc *sc;
617 
618     sc = device_get_softc(dev);
619     switch (index) {
620     case ACPI_IVAR_HANDLE:
621 	*result = (uintptr_t)sc->cpu_handle;
622 	break;
623     case CPU_IVAR_PCPU:
624 	*result = (uintptr_t)sc->cpu_pcpu;
625 	break;
626 #if defined(__amd64__) || defined(__i386__)
627     case CPU_IVAR_NOMINAL_MHZ:
628 	if (tsc_is_invariant) {
629 	    *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
630 	    break;
631 	}
632 	/* FALLTHROUGH */
633 #endif
634     default:
635 	return (ENOENT);
636     }
637     return (0);
638 }
639 
640 static int
641 acpi_cpu_shutdown(device_t dev)
642 {
643     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
644 
645     /* Allow children to shutdown first. */
646     bus_generic_shutdown(dev);
647 
648     /*
649      * Disable any entry to the idle function.
650      */
651     disable_idle(device_get_softc(dev));
652 
653     /*
654      * CPU devices are not truly detached and remain referenced,
655      * so their resources are not freed.
656      */
657 
658     return_VALUE (0);
659 }
660 
661 static void
662 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
663 {
664     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
665 
666     /* Use initial sleep value of 1 sec. to start with lowest idle state. */
667     sc->cpu_prev_sleep = 1000000;
668     sc->cpu_cx_lowest = 0;
669     sc->cpu_cx_lowest_lim = 0;
670 
671     /*
672      * Check for the ACPI 2.0 _CST sleep states object. If we can't find
673      * any, we'll revert to generic FADT/P_BLK Cx control method which will
674      * be handled by acpi_cpu_startup. We need to defer to after having
675      * probed all the cpus in the system before probing for generic Cx
676      * states as we may already have found cpus with valid _CST packages
677      */
678     if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
679 	/*
680 	 * We were unable to find a _CST package for this cpu or there
681 	 * was an error parsing it. Switch back to generic mode.
682 	 */
683 	cpu_cx_generic = TRUE;
684 	if (bootverbose)
685 	    device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
686     }
687 
688     /*
689      * TODO: _CSD Package should be checked here.
690      */
691 }
692 
693 static void
694 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
695 {
696     ACPI_GENERIC_ADDRESS	 gas;
697     struct acpi_cx		*cx_ptr;
698 
699     sc->cpu_cx_count = 0;
700     cx_ptr = sc->cpu_cx_states;
701 
702     /* Use initial sleep value of 1 sec. to start with lowest idle state. */
703     sc->cpu_prev_sleep = 1000000;
704 
705     /* C1 has been required since just after ACPI 1.0 */
706     cx_ptr->type = ACPI_STATE_C1;
707     cx_ptr->trans_lat = 0;
708     cx_ptr++;
709     sc->cpu_non_c2 = sc->cpu_cx_count;
710     sc->cpu_non_c3 = sc->cpu_cx_count;
711     sc->cpu_cx_count++;
712 
713     /*
714      * The spec says P_BLK must be 6 bytes long.  However, some systems
715      * use it to indicate a fractional set of features present so we
716      * take 5 as C2.  Some may also have a value of 7 to indicate
717      * another C3 but most use _CST for this (as required) and having
718      * "only" C1-C3 is not a hardship.
719      */
720     if (sc->cpu_p_blk_len < 5)
721 	return;
722 
723     /* Validate and allocate resources for C2 (P_LVL2). */
724     gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
725     gas.BitWidth = 8;
726     if (AcpiGbl_FADT.C2Latency <= 100) {
727 	gas.Address = sc->cpu_p_blk + 4;
728 	cx_ptr->res_rid = 0;
729 	acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
730 	    &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
731 	if (cx_ptr->p_lvlx != NULL) {
732 	    cx_ptr->type = ACPI_STATE_C2;
733 	    cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
734 	    cx_ptr++;
735 	    sc->cpu_non_c3 = sc->cpu_cx_count;
736 	    sc->cpu_cx_count++;
737 	}
738     }
739     if (sc->cpu_p_blk_len < 6)
740 	return;
741 
742     /* Validate and allocate resources for C3 (P_LVL3). */
743     if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
744 	gas.Address = sc->cpu_p_blk + 5;
745 	cx_ptr->res_rid = 1;
746 	acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
747 	    &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
748 	if (cx_ptr->p_lvlx != NULL) {
749 	    cx_ptr->type = ACPI_STATE_C3;
750 	    cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
751 	    cx_ptr++;
752 	    sc->cpu_cx_count++;
753 	}
754     }
755 }
756 
757 #if defined(__i386__) || defined(__amd64__)
758 static void
759 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
760 {
761 
762 	cx_ptr->do_mwait = true;
763 	cx_ptr->mwait_hint = address & 0xffffffff;
764 	cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
765 	cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
766 }
767 #endif
768 
769 static void
770 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
771 {
772 
773 	if (cx_ptr->p_lvlx == NULL)
774 		return;
775 	bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
776 	    cx_ptr->p_lvlx);
777 	cx_ptr->p_lvlx = NULL;
778 }
779 
780 /*
781  * Parse a _CST package and set up its Cx states.  Since the _CST object
782  * can change dynamically, our notify handler may call this function
783  * to clean up and probe the new _CST package.
784  */
785 static int
786 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
787 {
788     struct	 acpi_cx *cx_ptr;
789     ACPI_STATUS	 status;
790     ACPI_BUFFER	 buf;
791     ACPI_OBJECT	*top;
792     ACPI_OBJECT	*pkg;
793     uint32_t	 count;
794     int		 i;
795 #if defined(__i386__) || defined(__amd64__)
796     uint64_t	 address;
797     int		 vendor, class, accsize;
798 #endif
799 
800     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
801 
802     buf.Pointer = NULL;
803     buf.Length = ACPI_ALLOCATE_BUFFER;
804     status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
805     if (ACPI_FAILURE(status))
806 	return (ENXIO);
807 
808     /* _CST is a package with a count and at least one Cx package. */
809     top = (ACPI_OBJECT *)buf.Pointer;
810     if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
811 	device_printf(sc->cpu_dev, "invalid _CST package\n");
812 	AcpiOsFree(buf.Pointer);
813 	return (ENXIO);
814     }
815     if (count != top->Package.Count - 1) {
816 	device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
817 	       count, top->Package.Count - 1);
818 	count = top->Package.Count - 1;
819     }
820     if (count > MAX_CX_STATES) {
821 	device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
822 	count = MAX_CX_STATES;
823     }
824 
825     sc->cpu_non_c2 = 0;
826     sc->cpu_non_c3 = 0;
827     sc->cpu_cx_count = 0;
828     cx_ptr = sc->cpu_cx_states;
829 
830     /*
831      * C1 has been required since just after ACPI 1.0.
832      * Reserve the first slot for it.
833      */
834     cx_ptr->type = ACPI_STATE_C0;
835     cx_ptr++;
836     sc->cpu_cx_count++;
837 
838     /* Set up all valid states. */
839     for (i = 0; i < count; i++) {
840 	pkg = &top->Package.Elements[i + 1];
841 	if (!ACPI_PKG_VALID(pkg, 4) ||
842 	    acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
843 	    acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
844 	    acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
845 	    device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
846 	    continue;
847 	}
848 
849 	/* Validate the state to see if we should use it. */
850 	switch (cx_ptr->type) {
851 	case ACPI_STATE_C1:
852 	    acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
853 #if defined(__i386__) || defined(__amd64__)
854 	    if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
855 	      &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
856 		if (class == CST_FFH_INTEL_CL_C1IO) {
857 		    /* C1 I/O then Halt */
858 		    cx_ptr->res_rid = sc->cpu_cx_count;
859 		    bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
860 		      cx_ptr->res_rid, address, 1);
861 		    cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
862 		      SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
863 		      RF_SHAREABLE);
864 		    if (cx_ptr->p_lvlx == NULL) {
865 			bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
866 			  cx_ptr->res_rid);
867 			device_printf(sc->cpu_dev,
868 			  "C1 I/O failed to allocate port %d, "
869 			  "degrading to C1 Halt", (int)address);
870 		    }
871 		} else if (class == CST_FFH_INTEL_CL_MWAIT) {
872 		    acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
873 		}
874 	    }
875 #endif
876 	    if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
877 		/* This is the first C1 state.  Use the reserved slot. */
878 		sc->cpu_cx_states[0] = *cx_ptr;
879 	    } else {
880 		sc->cpu_non_c2 = sc->cpu_cx_count;
881 		sc->cpu_non_c3 = sc->cpu_cx_count;
882 		cx_ptr++;
883 		sc->cpu_cx_count++;
884 	    }
885 	    continue;
886 	case ACPI_STATE_C2:
887 	    sc->cpu_non_c3 = sc->cpu_cx_count;
888 	    break;
889 	case ACPI_STATE_C3:
890 	default:
891 	    if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
892 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
893 				 "acpi_cpu%d: C3[%d] not available.\n",
894 				 device_get_unit(sc->cpu_dev), i));
895 		continue;
896 	    }
897 	    break;
898 	}
899 
900 	/* Free up any previous register. */
901 	acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
902 
903 	/* Allocate the control register for C2 or C3. */
904 #if defined(__i386__) || defined(__amd64__)
905 	if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
906 	  &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
907 	  class == CST_FFH_INTEL_CL_MWAIT) {
908 	    /* Native C State Instruction use (mwait) */
909 	    acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
910 	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
911 	      "acpi_cpu%d: Got C%d/mwait - %d latency\n",
912 	      device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
913 	    cx_ptr++;
914 	    sc->cpu_cx_count++;
915 	} else
916 #endif
917 	{
918 	    cx_ptr->res_rid = sc->cpu_cx_count;
919 	    acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
920 		&cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
921 	    if (cx_ptr->p_lvlx) {
922 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
923 		     "acpi_cpu%d: Got C%d - %d latency\n",
924 		     device_get_unit(sc->cpu_dev), cx_ptr->type,
925 		     cx_ptr->trans_lat));
926 		cx_ptr++;
927 		sc->cpu_cx_count++;
928 	    }
929 	}
930     }
931     AcpiOsFree(buf.Pointer);
932 
933     /* If C1 state was not found, we need one now. */
934     cx_ptr = sc->cpu_cx_states;
935     if (cx_ptr->type == ACPI_STATE_C0) {
936 	cx_ptr->type = ACPI_STATE_C1;
937 	cx_ptr->trans_lat = 0;
938     }
939 
940     return (0);
941 }
942 
943 /*
944  * Call this *after* all CPUs have been attached.
945  */
946 static void
947 acpi_cpu_startup(void *arg)
948 {
949     struct acpi_cpu_softc *sc;
950     int i;
951 
952     /*
953      * Setup any quirks that might necessary now that we have probed
954      * all the CPUs
955      */
956     acpi_cpu_quirks();
957 
958     if (cpu_cx_generic) {
959 	/*
960 	 * We are using generic Cx mode, probe for available Cx states
961 	 * for all processors.
962 	 */
963 	CPU_FOREACH(i) {
964 	    if ((sc = cpu_softc[i]) != NULL)
965 		acpi_cpu_generic_cx_probe(sc);
966 	}
967     } else {
968 	/*
969 	 * We are using _CST mode, remove C3 state if necessary.
970 	 * As we now know for sure that we will be using _CST mode
971 	 * install our notify handler.
972 	 */
973 	CPU_FOREACH(i) {
974 	    if ((sc = cpu_softc[i]) == NULL)
975 		continue;
976 	    if (cpu_quirks & CPU_QUIRK_NO_C3) {
977 		sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
978 	    }
979 	    AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
980 		acpi_cpu_notify, sc);
981 	}
982     }
983 
984     /* Perform Cx final initialization. */
985     CPU_FOREACH(i) {
986 	if ((sc = cpu_softc[i]) != NULL)
987 	    acpi_cpu_startup_cx(sc);
988     }
989 
990     /* Add a sysctl handler to handle global Cx lowest setting */
991     SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
992 	OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
993 	NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
994 	"Global lowest Cx sleep state to use");
995 
996     /* Take over idling from cpu_idle_default(). */
997     cpu_cx_lowest_lim = 0;
998     CPU_FOREACH(i) {
999 	if ((sc = cpu_softc[i]) != NULL)
1000 	    enable_idle(sc);
1001     }
1002 #if defined(__i386__) || defined(__amd64__)
1003     cpu_idle_hook = acpi_cpu_idle;
1004 #endif
1005 }
1006 
1007 static void
1008 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1009 {
1010     struct sbuf sb;
1011     int i;
1012 
1013     /*
1014      * Set up the list of Cx states
1015      */
1016     sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1017 	SBUF_FIXEDLEN);
1018     for (i = 0; i < sc->cpu_cx_count; i++)
1019 	sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1020 	    sc->cpu_cx_states[i].trans_lat);
1021     sbuf_trim(&sb);
1022     sbuf_finish(&sb);
1023 }
1024 
1025 static void
1026 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1027 {
1028     acpi_cpu_cx_list(sc);
1029 
1030     SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1031 		      SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1032 		      OID_AUTO, "cx_supported", CTLFLAG_RD,
1033 		      sc->cpu_cx_supported, 0,
1034 		      "Cx/microsecond values for supported Cx states");
1035     SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1036         SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1037 	"cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1038 	(void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1039 	"lowest Cx sleep state to use");
1040     SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1041         SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1042 	"cx_usage", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1043 	(void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1044 	"percent usage for each Cx state");
1045     SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1046         SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1047 	"cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1048 	(void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1049 	"Cx sleep state counters");
1050 #if defined(__i386__) || defined(__amd64__)
1051     SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1052         SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1053 	"cx_method", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1054 	(void *)sc, 0, acpi_cpu_method_sysctl, "A", "Cx entrance methods");
1055 #endif
1056 
1057     /* Signal platform that we can handle _CST notification. */
1058     if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1059 	ACPI_LOCK(acpi);
1060 	AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1061 	ACPI_UNLOCK(acpi);
1062     }
1063 }
1064 
1065 #if defined(__i386__) || defined(__amd64__)
1066 /*
1067  * Idle the CPU in the lowest state possible.  This function is called with
1068  * interrupts disabled.  Note that once it re-enables interrupts, a task
1069  * switch can occur so do not access shared data (i.e. the softc) after
1070  * interrupts are re-enabled.
1071  */
1072 static void
1073 acpi_cpu_idle(sbintime_t sbt)
1074 {
1075     struct	acpi_cpu_softc *sc;
1076     struct	acpi_cx *cx_next;
1077     uint64_t	start_ticks, end_ticks;
1078     uint32_t	start_time, end_time;
1079     ACPI_STATUS	status;
1080     int		bm_active, cx_next_idx, i, us;
1081 
1082     /*
1083      * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
1084      * since there is no ACPI processor object for this CPU.  This occurs
1085      * for logical CPUs in the HTT case.
1086      */
1087     sc = cpu_softc[PCPU_GET(cpuid)];
1088     if (sc == NULL) {
1089 	acpi_cpu_c1();
1090 	return;
1091     }
1092 
1093     /* If disabled, take the safe path. */
1094     if (is_idle_disabled(sc)) {
1095 	acpi_cpu_c1();
1096 	return;
1097     }
1098 
1099     /* Find the lowest state that has small enough latency. */
1100     us = sc->cpu_prev_sleep;
1101     if (sbt >= 0 && us > (sbt >> 12))
1102 	us = (sbt >> 12);
1103     cx_next_idx = 0;
1104     if (cpu_disable_c2_sleep)
1105 	i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1106     else if (cpu_disable_c3_sleep)
1107 	i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1108     else
1109 	i = sc->cpu_cx_lowest;
1110     for (; i >= 0; i--) {
1111 	if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1112 	    cx_next_idx = i;
1113 	    break;
1114 	}
1115     }
1116 
1117     /*
1118      * Check for bus master activity.  If there was activity, clear
1119      * the bit and use the lowest non-C3 state.  Note that the USB
1120      * driver polling for new devices keeps this bit set all the
1121      * time if USB is loaded.
1122      */
1123     cx_next = &sc->cpu_cx_states[cx_next_idx];
1124     if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1125 	cx_next_idx > sc->cpu_non_c3 &&
1126 	(!cx_next->do_mwait || cx_next->mwait_bm_avoidance)) {
1127 	status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1128 	if (ACPI_SUCCESS(status) && bm_active != 0) {
1129 	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1130 	    cx_next_idx = sc->cpu_non_c3;
1131 	    cx_next = &sc->cpu_cx_states[cx_next_idx];
1132 	}
1133     }
1134 
1135     /* Select the next state and update statistics. */
1136     sc->cpu_cx_stats[cx_next_idx]++;
1137     KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1138 
1139     /*
1140      * Execute HLT (or equivalent) and wait for an interrupt.  We can't
1141      * precisely calculate the time spent in C1 since the place we wake up
1142      * is an ISR.  Assume we slept no more then half of quantum, unless
1143      * we are called inside critical section, delaying context switch.
1144      */
1145     if (cx_next->type == ACPI_STATE_C1) {
1146 	start_ticks = cpu_ticks();
1147 	if (cx_next->p_lvlx != NULL) {
1148 	    /* C1 I/O then Halt */
1149 	    CPU_GET_REG(cx_next->p_lvlx, 1);
1150 	}
1151 	if (cx_next->do_mwait)
1152 	    acpi_cpu_idle_mwait(cx_next->mwait_hint);
1153 	else
1154 	    acpi_cpu_c1();
1155 	end_ticks = cpu_ticks();
1156 	/* acpi_cpu_c1() returns with interrupts enabled. */
1157 	if (cx_next->do_mwait)
1158 	    ACPI_ENABLE_IRQS();
1159 	end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1160 	if (!cx_next->do_mwait && curthread->td_critnest == 0)
1161 		end_time = min(end_time, 500000 / hz);
1162 	sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1163 	return;
1164     }
1165 
1166     /*
1167      * For C3, disable bus master arbitration and enable bus master wake
1168      * if BM control is available, otherwise flush the CPU cache.
1169      */
1170     if (cx_next->type == ACPI_STATE_C3) {
1171 	if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1172 	    AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1173 	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1174 	} else
1175 	    ACPI_FLUSH_CPU_CACHE();
1176     }
1177 
1178     /*
1179      * Read from P_LVLx to enter C2(+), checking time spent asleep.
1180      * Use the ACPI timer for measuring sleep time.  Since we need to
1181      * get the time very close to the CPU start/stop clock logic, this
1182      * is the only reliable time source.
1183      */
1184     if (cx_next->type == ACPI_STATE_C3) {
1185 	AcpiGetTimer(&start_time);
1186 	start_ticks = 0;
1187     } else {
1188 	start_time = 0;
1189 	start_ticks = cpu_ticks();
1190     }
1191     if (cx_next->do_mwait) {
1192 	acpi_cpu_idle_mwait(cx_next->mwait_hint);
1193     } else {
1194 	CPU_GET_REG(cx_next->p_lvlx, 1);
1195 	/*
1196 	 * Read the end time twice.  Since it may take an arbitrary time
1197 	 * to enter the idle state, the first read may be executed before
1198 	 * the processor has stopped.  Doing it again provides enough
1199 	 * margin that we are certain to have a correct value.
1200 	 */
1201 	AcpiGetTimer(&end_time);
1202     }
1203 
1204     if (cx_next->type == ACPI_STATE_C3)
1205 	AcpiGetTimer(&end_time);
1206     else
1207 	end_ticks = cpu_ticks();
1208 
1209     /* Enable bus master arbitration and disable bus master wakeup. */
1210     if (cx_next->type == ACPI_STATE_C3 &&
1211       (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1212 	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1213 	AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1214     }
1215     ACPI_ENABLE_IRQS();
1216 
1217     if (cx_next->type == ACPI_STATE_C3)
1218 	AcpiGetTimerDuration(start_time, end_time, &end_time);
1219     else
1220 	end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1221     sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1222 }
1223 #endif
1224 
1225 /*
1226  * Re-evaluate the _CST object when we are notified that it changed.
1227  */
1228 static void
1229 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1230 {
1231     struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1232 
1233     if (notify != ACPI_NOTIFY_CX_STATES)
1234 	return;
1235 
1236     /*
1237      * C-state data for target CPU is going to be in flux while we execute
1238      * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1239      * Also, it may happen that multiple ACPI taskqueues may concurrently
1240      * execute notifications for the same CPU.  ACPI_SERIAL is used to
1241      * protect against that.
1242      */
1243     ACPI_SERIAL_BEGIN(cpu);
1244     disable_idle(sc);
1245 
1246     /* Update the list of Cx states. */
1247     acpi_cpu_cx_cst(sc);
1248     acpi_cpu_cx_list(sc);
1249     acpi_cpu_set_cx_lowest(sc);
1250 
1251     enable_idle(sc);
1252     ACPI_SERIAL_END(cpu);
1253 
1254     acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1255 }
1256 
1257 static void
1258 acpi_cpu_quirks(void)
1259 {
1260     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1261 
1262     /*
1263      * Bus mastering arbitration control is needed to keep caches coherent
1264      * while sleeping in C3.  If it's not present but a working flush cache
1265      * instruction is present, flush the caches before entering C3 instead.
1266      * Otherwise, just disable C3 completely.
1267      */
1268     if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1269 	AcpiGbl_FADT.Pm2ControlLength == 0) {
1270 	if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1271 	    (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1272 	    cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1273 	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1274 		"acpi_cpu: no BM control, using flush cache method\n"));
1275 	} else {
1276 	    cpu_quirks |= CPU_QUIRK_NO_C3;
1277 	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1278 		"acpi_cpu: no BM control, C3 not available\n"));
1279 	}
1280     }
1281 
1282     /*
1283      * If we are using generic Cx mode, C3 on multiple CPUs requires using
1284      * the expensive flush cache instruction.
1285      */
1286     if (cpu_cx_generic && mp_ncpus > 1) {
1287 	cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1288 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1289 	    "acpi_cpu: SMP, using flush cache mode for C3\n"));
1290     }
1291 
1292     /* Look for various quirks of the PIIX4 part. */
1293     acpi_cpu_quirks_piix4();
1294 }
1295 
1296 static void
1297 acpi_cpu_quirks_piix4(void)
1298 {
1299 #ifdef __i386__
1300     device_t acpi_dev;
1301     uint32_t val;
1302     ACPI_STATUS status;
1303 
1304     acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1305     if (acpi_dev != NULL) {
1306 	switch (pci_get_revid(acpi_dev)) {
1307 	/*
1308 	 * Disable C3 support for all PIIX4 chipsets.  Some of these parts
1309 	 * do not report the BMIDE status to the BM status register and
1310 	 * others have a livelock bug if Type-F DMA is enabled.  Linux
1311 	 * works around the BMIDE bug by reading the BM status directly
1312 	 * but we take the simpler approach of disabling C3 for these
1313 	 * parts.
1314 	 *
1315 	 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1316 	 * Livelock") from the January 2002 PIIX4 specification update.
1317 	 * Applies to all PIIX4 models.
1318 	 *
1319 	 * Also, make sure that all interrupts cause a "Stop Break"
1320 	 * event to exit from C2 state.
1321 	 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1322 	 * should be set to zero, otherwise it causes C2 to short-sleep.
1323 	 * PIIX4 doesn't properly support C3 and bus master activity
1324 	 * need not break out of C2.
1325 	 */
1326 	case PCI_REVISION_A_STEP:
1327 	case PCI_REVISION_B_STEP:
1328 	case PCI_REVISION_4E:
1329 	case PCI_REVISION_4M:
1330 	    cpu_quirks |= CPU_QUIRK_NO_C3;
1331 	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1332 		"acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1333 
1334 	    val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1335 	    if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1336 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1337 		    "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1338 	    	val |= PIIX4_STOP_BREAK_MASK;
1339 		pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1340 	    }
1341 	    status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1342 	    if (ACPI_SUCCESS(status) && val != 0) {
1343 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1344 		    "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1345 		AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1346 	    }
1347 	    break;
1348 	default:
1349 	    break;
1350 	}
1351     }
1352 #endif
1353 }
1354 
1355 static int
1356 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1357 {
1358 	struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1359 	struct sbuf	 sb;
1360 	char		 buf[128];
1361 	int		 error, i;
1362 	uintmax_t	 fract, sum, whole;
1363 
1364 	sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1365 	sum = 0;
1366 	for (i = 0; i < sc->cpu_cx_count; i++)
1367 		sum += sc->cpu_cx_stats[i];
1368 	for (i = 0; i < sc->cpu_cx_count; i++) {
1369 		if (sum > 0) {
1370 			whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1371 			fract = (whole % sum) * 100;
1372 			sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1373 			    (u_int)(fract / sum));
1374 		} else
1375 			sbuf_printf(&sb, "0.00%% ");
1376 	}
1377 	sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1378 	error = sbuf_finish(&sb);
1379 	sbuf_delete(&sb);
1380 	return (error);
1381 }
1382 
1383 /*
1384  * XXX TODO: actually add support to count each entry/exit
1385  * from the Cx states.
1386  */
1387 static int
1388 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1389 {
1390 	struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1391 	struct sbuf	 sb;
1392 	char		 buf[128];
1393 	int		 error, i;
1394 
1395 	sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1396 	for (i = 0; i < sc->cpu_cx_count; i++) {
1397 		if (i > 0)
1398 			sbuf_putc(&sb, ' ');
1399 		sbuf_printf(&sb, "%u", sc->cpu_cx_stats[i]);
1400 	}
1401 	error = sbuf_finish(&sb);
1402 	sbuf_delete(&sb);
1403 	return (error);
1404 }
1405 
1406 #if defined(__i386__) || defined(__amd64__)
1407 static int
1408 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1409 {
1410 	struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1411 	struct acpi_cx *cx;
1412 	struct sbuf sb;
1413 	char buf[128];
1414 	int error, i;
1415 
1416 	sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1417 	for (i = 0; i < sc->cpu_cx_count; i++) {
1418 		cx = &sc->cpu_cx_states[i];
1419 		if (i > 0)
1420 			sbuf_putc(&sb, ' ');
1421 		sbuf_printf(&sb, "C%d/", i + 1);
1422 		if (cx->do_mwait) {
1423 			sbuf_cat(&sb, "mwait");
1424 			if (cx->mwait_hw_coord)
1425 				sbuf_cat(&sb, "/hwc");
1426 			if (cx->mwait_bm_avoidance)
1427 				sbuf_cat(&sb, "/bma");
1428 		} else if (cx->type == ACPI_STATE_C1) {
1429 			sbuf_cat(&sb, "hlt");
1430 		} else {
1431 			sbuf_cat(&sb, "io");
1432 		}
1433 		if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1434 			sbuf_cat(&sb, "/iohlt");
1435 	}
1436 	error = sbuf_finish(&sb);
1437 	sbuf_delete(&sb);
1438 	return (error);
1439 }
1440 #endif
1441 
1442 static int
1443 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1444 {
1445     int i;
1446 
1447     ACPI_SERIAL_ASSERT(cpu);
1448     sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1449 
1450     /* If not disabling, cache the new lowest non-C3 state. */
1451     sc->cpu_non_c3 = 0;
1452     for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1453 	if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1454 	    sc->cpu_non_c3 = i;
1455 	    break;
1456 	}
1457     }
1458 
1459     /* Reset the statistics counters. */
1460     bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1461     return (0);
1462 }
1463 
1464 static int
1465 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1466 {
1467     struct	 acpi_cpu_softc *sc;
1468     char	 state[8];
1469     int		 val, error;
1470 
1471     sc = (struct acpi_cpu_softc *) arg1;
1472     snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1473     error = sysctl_handle_string(oidp, state, sizeof(state), req);
1474     if (error != 0 || req->newptr == NULL)
1475 	return (error);
1476     if (strlen(state) < 2 || toupper(state[0]) != 'C')
1477 	return (EINVAL);
1478     if (strcasecmp(state, "Cmax") == 0)
1479 	val = MAX_CX_STATES;
1480     else {
1481 	val = (int) strtol(state + 1, NULL, 10);
1482 	if (val < 1 || val > MAX_CX_STATES)
1483 	    return (EINVAL);
1484     }
1485 
1486     ACPI_SERIAL_BEGIN(cpu);
1487     sc->cpu_cx_lowest_lim = val - 1;
1488     acpi_cpu_set_cx_lowest(sc);
1489     ACPI_SERIAL_END(cpu);
1490 
1491     return (0);
1492 }
1493 
1494 static int
1495 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1496 {
1497     struct	acpi_cpu_softc *sc;
1498     char	state[8];
1499     int		val, error, i;
1500 
1501     snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1502     error = sysctl_handle_string(oidp, state, sizeof(state), req);
1503     if (error != 0 || req->newptr == NULL)
1504 	return (error);
1505     if (strlen(state) < 2 || toupper(state[0]) != 'C')
1506 	return (EINVAL);
1507     if (strcasecmp(state, "Cmax") == 0)
1508 	val = MAX_CX_STATES;
1509     else {
1510 	val = (int) strtol(state + 1, NULL, 10);
1511 	if (val < 1 || val > MAX_CX_STATES)
1512 	    return (EINVAL);
1513     }
1514 
1515     /* Update the new lowest useable Cx state for all CPUs. */
1516     ACPI_SERIAL_BEGIN(cpu);
1517     cpu_cx_lowest_lim = val - 1;
1518     CPU_FOREACH(i) {
1519 	if ((sc = cpu_softc[i]) == NULL)
1520 	    continue;
1521 	sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1522 	acpi_cpu_set_cx_lowest(sc);
1523     }
1524     ACPI_SERIAL_END(cpu);
1525 
1526     return (0);
1527 }
1528