xref: /titanic_51/usr/src/uts/common/io/cpudrv.c (revision e2553d6841698282283135811d10eebeefb512bc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * CPU Device driver. The driver is not DDI-compliant.
28  *
29  * The driver supports following features:
30  *	- Power management.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/errno.h>
36 #include <sys/modctl.h>
37 #include <sys/kmem.h>
38 #include <sys/conf.h>
39 #include <sys/cmn_err.h>
40 #include <sys/stat.h>
41 #include <sys/debug.h>
42 #include <sys/systm.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/sdt.h>
46 
47 #include <sys/machsystm.h>
48 #include <sys/x_call.h>
49 #include <sys/cpudrv_mach.h>
50 #include <sys/msacct.h>
51 
52 /*
53  * CPU power management
54  *
55  * The supported power saving model is to slow down the CPU (on SPARC by
56  * dividing the CPU clock and on x86 by dropping down a P-state).
57  * Periodically we determine the amount of time the CPU is running
58  * idle thread and threads in user mode during the last quantum.  If the idle
59  * thread was running less than its low water mark for current speed for
60  * number of consecutive sampling periods, or number of running threads in
61  * user mode are above its high water mark, we arrange to go to the higher
62  * speed.  If the idle thread was running more than its high water mark without
63  * dropping a number of consecutive times below the mark, and number of threads
64  * running in user mode are below its low water mark, we arrange to go to the
65  * next lower speed.  While going down, we go through all the speeds.  While
66  * going up we go to the maximum speed to minimize impact on the user, but have
67  * provisions in the driver to go to other speeds.
68  *
69  * The driver does not have knowledge of a particular implementation of this
70  * scheme and will work with all CPUs supporting this model. On SPARC, the
71  * driver determines supported speeds by looking at 'clock-divisors' property
72  * created by OBP. On x86, the driver retrieves the supported speeds from
73  * ACPI.
74  */
75 
76 /*
77  * Configuration function prototypes and data structures
78  */
79 static int cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
80 static int cpudrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
81 static int cpudrv_power(dev_info_t *dip, int comp, int level);
82 
83 struct dev_ops cpudrv_ops = {
84 	DEVO_REV,		/* rev */
85 	0,			/* refcnt */
86 	nodev,			/* getinfo */
87 	nulldev,		/* identify */
88 	nulldev,		/* probe */
89 	cpudrv_attach,		/* attach */
90 	cpudrv_detach,		/* detach */
91 	nodev,			/* reset */
92 	(struct cb_ops *)NULL,	/* cb_ops */
93 	(struct bus_ops *)NULL,	/* bus_ops */
94 	cpudrv_power,		/* power */
95 	ddi_quiesce_not_needed,		/* quiesce */
96 };
97 
98 static struct modldrv modldrv = {
99 	&mod_driverops,			/* modops */
100 	"CPU Driver",			/* linkinfo */
101 	&cpudrv_ops,			/* dev_ops */
102 };
103 
104 static struct modlinkage modlinkage = {
105 	MODREV_1,		/* rev */
106 	&modldrv,		/* linkage */
107 	NULL
108 };
109 
110 /*
111  * Function prototypes
112  */
113 static int cpudrv_pm_init_power(cpudrv_devstate_t *cpudsp);
114 static void cpudrv_pm_free(cpudrv_devstate_t *cpudsp);
115 static int cpudrv_pm_comp_create(cpudrv_devstate_t *cpudsp);
116 static void cpudrv_pm_monitor_disp(void *arg);
117 static void cpudrv_pm_monitor(void *arg);
118 
119 /*
120  * Driver global variables
121  */
122 uint_t cpudrv_debug = 0;
123 void *cpudrv_state;
124 static uint_t cpudrv_pm_idle_hwm = CPUDRV_PM_IDLE_HWM;
125 static uint_t cpudrv_pm_idle_lwm = CPUDRV_PM_IDLE_LWM;
126 static uint_t cpudrv_pm_idle_buf_zone = CPUDRV_PM_IDLE_BUF_ZONE;
127 static uint_t cpudrv_pm_idle_bhwm_cnt_max = CPUDRV_PM_IDLE_BHWM_CNT_MAX;
128 static uint_t cpudrv_pm_idle_blwm_cnt_max = CPUDRV_PM_IDLE_BLWM_CNT_MAX;
129 static uint_t cpudrv_pm_user_hwm = CPUDRV_PM_USER_HWM;
130 
131 /*
132  * cpudrv_direct_pm allows user applications to directly control the
133  * power state transitions (direct pm) without following the normal
134  * direct pm protocol. This is needed because the normal protocol
135  * requires that a device only be lowered when it is idle, and be
136  * brought up when it request to do so by calling pm_raise_power().
137  * Ignoring this protocol is harmless for CPU (other than speed).
138  * Moreover it might be the case that CPU is never idle or wants
139  * to be at higher speed because of the addition CPU cycles required
140  * to run the user application.
141  *
142  * The driver will still report idle/busy status to the framework. Although
143  * framework will ignore this information for direct pm devices and not
144  * try to bring them down when idle, user applications can still use this
145  * information if they wants.
146  *
147  * In the future, provide an ioctl to control setting of this mode. In
148  * that case, this variable should move to the state structure and
149  * be protected by the lock in the state structure.
150  */
151 int cpudrv_direct_pm = 0;
152 
153 /*
154  * Arranges for the handler function to be called at the interval suitable
155  * for current speed.
156  */
157 #define	CPUDRV_PM_MONITOR_INIT(cpudsp) { \
158 	if (CPUDRV_PM_POWER_ENABLED(cpudsp)) { \
159 		ASSERT(mutex_owned(&(cpudsp)->lock)); \
160 		(cpudsp)->cpudrv_pm.timeout_id = \
161 		    timeout(cpudrv_pm_monitor_disp, \
162 		    (cpudsp), (((cpudsp)->cpudrv_pm.cur_spd == NULL) ? \
163 		    CPUDRV_PM_QUANT_CNT_OTHR : \
164 		    (cpudsp)->cpudrv_pm.cur_spd->quant_cnt)); \
165 	} \
166 }
167 
168 /*
169  * Arranges for the handler function not to be called back.
170  */
171 #define	CPUDRV_PM_MONITOR_FINI(cpudsp) { \
172 	timeout_id_t tmp_tid; \
173 	ASSERT(mutex_owned(&(cpudsp)->lock)); \
174 	tmp_tid = (cpudsp)->cpudrv_pm.timeout_id; \
175 	(cpudsp)->cpudrv_pm.timeout_id = 0; \
176 	mutex_exit(&(cpudsp)->lock); \
177 	if (tmp_tid != 0) { \
178 		(void) untimeout(tmp_tid); \
179 		mutex_enter(&(cpudsp)->cpudrv_pm.timeout_lock); \
180 		while ((cpudsp)->cpudrv_pm.timeout_count != 0) \
181 			cv_wait(&(cpudsp)->cpudrv_pm.timeout_cv, \
182 			    &(cpudsp)->cpudrv_pm.timeout_lock); \
183 		mutex_exit(&(cpudsp)->cpudrv_pm.timeout_lock); \
184 	} \
185 	mutex_enter(&(cpudsp)->lock); \
186 }
187 
188 int
189 _init(void)
190 {
191 	int	error;
192 
193 	DPRINTF(D_INIT, (" _init: function called\n"));
194 	if ((error = ddi_soft_state_init(&cpudrv_state,
195 	    sizeof (cpudrv_devstate_t), 0)) != 0) {
196 		return (error);
197 	}
198 
199 	if ((error = mod_install(&modlinkage)) != 0)  {
200 		ddi_soft_state_fini(&cpudrv_state);
201 	}
202 
203 	/*
204 	 * Callbacks used by the PPM driver.
205 	 */
206 	CPUDRV_PM_SET_PPM_CALLBACKS();
207 	return (error);
208 }
209 
210 int
211 _fini(void)
212 {
213 	int	error;
214 
215 	DPRINTF(D_FINI, (" _fini: function called\n"));
216 	if ((error = mod_remove(&modlinkage)) == 0) {
217 		ddi_soft_state_fini(&cpudrv_state);
218 	}
219 
220 	return (error);
221 }
222 
223 int
224 _info(struct modinfo *modinfop)
225 {
226 	return (mod_info(&modlinkage, modinfop));
227 }
228 
229 /*
230  * Driver attach(9e) entry point.
231  */
232 static int
233 cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
234 {
235 	int			instance;
236 	cpudrv_devstate_t	*cpudsp;
237 	extern pri_t		maxclsyspri;
238 
239 	instance = ddi_get_instance(dip);
240 
241 	switch (cmd) {
242 	case DDI_ATTACH:
243 		DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
244 		    "DDI_ATTACH called\n", instance));
245 		if (CPUDRV_PM_DISABLED())
246 			return (DDI_FAILURE);
247 		if (ddi_soft_state_zalloc(cpudrv_state, instance) !=
248 		    DDI_SUCCESS) {
249 			cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
250 			    "can't allocate state", instance);
251 			CPUDRV_PM_DISABLE();
252 			return (DDI_FAILURE);
253 		}
254 		if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) ==
255 		    NULL) {
256 			cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
257 			    "can't get state", instance);
258 			ddi_soft_state_free(cpudrv_state, instance);
259 			CPUDRV_PM_DISABLE();
260 			return (DDI_FAILURE);
261 		}
262 		cpudsp->dip = dip;
263 
264 		/*
265 		 * Find CPU number for this dev_info node.
266 		 */
267 		if (!cpudrv_pm_get_cpu_id(dip, &(cpudsp->cpu_id))) {
268 			cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
269 			    "can't convert dip to cpu_id", instance);
270 			ddi_soft_state_free(cpudrv_state, instance);
271 			CPUDRV_PM_DISABLE();
272 			return (DDI_FAILURE);
273 		}
274 		if (!cpudrv_mach_pm_init(cpudsp)) {
275 			ddi_soft_state_free(cpudrv_state, instance);
276 			CPUDRV_PM_DISABLE();
277 			return (DDI_FAILURE);
278 		}
279 		mutex_init(&cpudsp->lock, NULL, MUTEX_DRIVER, NULL);
280 		if (CPUDRV_PM_POWER_ENABLED(cpudsp)) {
281 			if (cpudrv_pm_init_power(cpudsp) != DDI_SUCCESS) {
282 				CPUDRV_PM_DISABLE();
283 				cpudrv_pm_free(cpudsp);
284 				ddi_soft_state_free(cpudrv_state, instance);
285 				return (DDI_FAILURE);
286 			}
287 			if (cpudrv_pm_comp_create(cpudsp) != DDI_SUCCESS) {
288 				CPUDRV_PM_DISABLE();
289 				cpudrv_pm_free(cpudsp);
290 				ddi_soft_state_free(cpudrv_state, instance);
291 				return (DDI_FAILURE);
292 			}
293 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
294 			    dip, "pm-class", "CPU") != DDI_PROP_SUCCESS) {
295 				CPUDRV_PM_DISABLE();
296 				cpudrv_pm_free(cpudsp);
297 				ddi_soft_state_free(cpudrv_state, instance);
298 				return (DDI_FAILURE);
299 			}
300 
301 			/*
302 			 * Taskq is used to dispatch routine to monitor CPU
303 			 * activities.
304 			 */
305 			cpudsp->cpudrv_pm.tq = taskq_create_instance(
306 			    "cpudrv_pm_monitor",
307 			    ddi_get_instance(dip), CPUDRV_PM_TASKQ_THREADS,
308 			    (maxclsyspri - 1), CPUDRV_PM_TASKQ_MIN,
309 			    CPUDRV_PM_TASKQ_MAX,
310 			    TASKQ_PREPOPULATE|TASKQ_CPR_SAFE);
311 
312 			mutex_init(&cpudsp->cpudrv_pm.timeout_lock, NULL,
313 			    MUTEX_DRIVER, NULL);
314 			cv_init(&cpudsp->cpudrv_pm.timeout_cv, NULL,
315 			    CV_DEFAULT, NULL);
316 
317 			/*
318 			 * Driver needs to assume that CPU is running at
319 			 * unknown speed at DDI_ATTACH and switch it to the
320 			 * needed speed. We assume that initial needed speed
321 			 * is full speed for us.
322 			 */
323 			/*
324 			 * We need to take the lock because cpudrv_pm_monitor()
325 			 * will start running in parallel with attach().
326 			 */
327 			mutex_enter(&cpudsp->lock);
328 			cpudsp->cpudrv_pm.cur_spd = NULL;
329 			cpudsp->cpudrv_pm.targ_spd =
330 			    cpudsp->cpudrv_pm.head_spd;
331 			cpudsp->cpudrv_pm.pm_started = B_FALSE;
332 			/*
333 			 * We don't call pm_raise_power() directly from attach
334 			 * because driver attach for a slave CPU node can
335 			 * happen before the CPU is even initialized. We just
336 			 * start the monitoring system which understands
337 			 * unknown speed and moves CPU to targ_spd when it
338 			 * have been initialized.
339 			 */
340 			CPUDRV_PM_MONITOR_INIT(cpudsp);
341 			mutex_exit(&cpudsp->lock);
342 
343 		}
344 
345 		CPUDRV_PM_INSTALL_MAX_CHANGE_HANDLER(cpudsp, dip);
346 
347 		ddi_report_dev(dip);
348 		return (DDI_SUCCESS);
349 
350 	case DDI_RESUME:
351 		DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
352 		    "DDI_RESUME called\n", instance));
353 
354 		cpudsp = ddi_get_soft_state(cpudrv_state, instance);
355 		ASSERT(cpudsp != NULL);
356 
357 		/*
358 		 * Nothing to do for resume, if not doing active PM.
359 		 */
360 		if (!CPUDRV_PM_POWER_ENABLED(cpudsp))
361 			return (DDI_SUCCESS);
362 
363 		mutex_enter(&cpudsp->lock);
364 		/*
365 		 * Driver needs to assume that CPU is running at unknown speed
366 		 * at DDI_RESUME and switch it to the needed speed. We assume
367 		 * that the needed speed is full speed for us.
368 		 */
369 		cpudsp->cpudrv_pm.cur_spd = NULL;
370 		cpudsp->cpudrv_pm.targ_spd = cpudsp->cpudrv_pm.head_spd;
371 		CPUDRV_PM_MONITOR_INIT(cpudsp);
372 		mutex_exit(&cpudsp->lock);
373 		CPUDRV_PM_REDEFINE_TOPSPEED(dip);
374 		return (DDI_SUCCESS);
375 
376 	default:
377 		return (DDI_FAILURE);
378 	}
379 }
380 
381 /*
382  * Driver detach(9e) entry point.
383  */
384 static int
385 cpudrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
386 {
387 	int			instance;
388 	cpudrv_devstate_t	*cpudsp;
389 	cpudrv_pm_t		*cpupm;
390 
391 	instance = ddi_get_instance(dip);
392 
393 	switch (cmd) {
394 	case DDI_DETACH:
395 		DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: "
396 		    "DDI_DETACH called\n", instance));
397 		/*
398 		 * If the only thing supported by the driver is power
399 		 * management, we can in future enhance the driver and
400 		 * framework that loads it to unload the driver when
401 		 * user has disabled CPU power management.
402 		 */
403 		return (DDI_FAILURE);
404 
405 	case DDI_SUSPEND:
406 		DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: "
407 		    "DDI_SUSPEND called\n", instance));
408 
409 		cpudsp = ddi_get_soft_state(cpudrv_state, instance);
410 		ASSERT(cpudsp != NULL);
411 
412 		/*
413 		 * Nothing to do for suspend, if not doing active PM.
414 		 */
415 		if (!CPUDRV_PM_POWER_ENABLED(cpudsp))
416 			return (DDI_SUCCESS);
417 
418 		/*
419 		 * During a checkpoint-resume sequence, framework will
420 		 * stop interrupts to quiesce kernel activity. This will
421 		 * leave our monitoring system ineffective. Handle this
422 		 * by stopping our monitoring system and bringing CPU
423 		 * to full speed. In case we are in special direct pm
424 		 * mode, we leave the CPU at whatever speed it is. This
425 		 * is harmless other than speed.
426 		 */
427 		mutex_enter(&cpudsp->lock);
428 		cpupm = &(cpudsp->cpudrv_pm);
429 
430 		DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: DDI_SUSPEND - "
431 		    "cur_spd %d, head_spd %d\n", instance,
432 		    cpupm->cur_spd->pm_level, cpupm->head_spd->pm_level));
433 
434 		CPUDRV_PM_MONITOR_FINI(cpudsp);
435 
436 		if (!cpudrv_direct_pm && (cpupm->cur_spd != cpupm->head_spd)) {
437 			if (cpupm->pm_busycnt < 1) {
438 				if ((pm_busy_component(dip, CPUDRV_PM_COMP_NUM)
439 				    == DDI_SUCCESS)) {
440 					cpupm->pm_busycnt++;
441 				} else {
442 					CPUDRV_PM_MONITOR_INIT(cpudsp);
443 					mutex_exit(&cpudsp->lock);
444 					cmn_err(CE_WARN, "cpudrv_detach: "
445 					    "instance %d: can't busy CPU "
446 					    "component", instance);
447 					return (DDI_FAILURE);
448 				}
449 			}
450 			mutex_exit(&cpudsp->lock);
451 			if (pm_raise_power(dip, CPUDRV_PM_COMP_NUM,
452 			    cpupm->head_spd->pm_level) != DDI_SUCCESS) {
453 				mutex_enter(&cpudsp->lock);
454 				CPUDRV_PM_MONITOR_INIT(cpudsp);
455 				mutex_exit(&cpudsp->lock);
456 				cmn_err(CE_WARN, "cpudrv_detach: instance %d: "
457 				    "can't raise CPU power level", instance);
458 				return (DDI_FAILURE);
459 			} else {
460 				return (DDI_SUCCESS);
461 			}
462 		} else {
463 			mutex_exit(&cpudsp->lock);
464 			return (DDI_SUCCESS);
465 		}
466 
467 	default:
468 		return (DDI_FAILURE);
469 	}
470 }
471 
472 /*
473  * Driver power(9e) entry point.
474  *
475  * Driver's notion of current power is set *only* in power(9e) entry point
476  * after actual power change operation has been successfully completed.
477  */
478 /* ARGSUSED */
479 static int
480 cpudrv_power(dev_info_t *dip, int comp, int level)
481 {
482 	int			instance;
483 	cpudrv_devstate_t	*cpudsp;
484 	cpudrv_pm_t 		*cpupm;
485 	cpudrv_pm_spd_t		*new_spd;
486 	boolean_t		is_ready;
487 	int			ret;
488 
489 	instance = ddi_get_instance(dip);
490 
491 	DPRINTF(D_POWER, ("cpudrv_power: instance %d: level %d\n",
492 	    instance, level));
493 	if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) == NULL) {
494 		cmn_err(CE_WARN, "cpudrv_power: instance %d: can't get state",
495 		    instance);
496 		return (DDI_FAILURE);
497 	}
498 
499 	mutex_enter(&cpudsp->lock);
500 	cpupm = &(cpudsp->cpudrv_pm);
501 
502 	/*
503 	 * In normal operation, we fail if we are busy and request is
504 	 * to lower the power level. We let this go through if the driver
505 	 * is in special direct pm mode. On x86, we also let this through
506 	 * if the change is due to a request to govern the max speed.
507 	 */
508 	if (!cpudrv_direct_pm && (cpupm->pm_busycnt >= 1) &&
509 	    !cpudrv_pm_is_governor_thread(cpupm)) {
510 		if ((cpupm->cur_spd != NULL) &&
511 		    (level < cpupm->cur_spd->pm_level)) {
512 			mutex_exit(&cpudsp->lock);
513 			return (DDI_FAILURE);
514 		}
515 	}
516 
517 	for (new_spd = cpupm->head_spd; new_spd; new_spd = new_spd->down_spd) {
518 		if (new_spd->pm_level == level)
519 			break;
520 	}
521 	if (!new_spd) {
522 		CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
523 		mutex_exit(&cpudsp->lock);
524 		cmn_err(CE_WARN, "cpudrv_power: instance %d: "
525 		    "can't locate new CPU speed", instance);
526 		return (DDI_FAILURE);
527 	}
528 
529 	/*
530 	 * We currently refuse to power manage if the CPU is not ready to
531 	 * take cross calls (cross calls fail silently if CPU is not ready
532 	 * for it).
533 	 *
534 	 * Additionally, for x86 platforms we cannot power manage
535 	 * any one instance, until all instances have been initialized.
536 	 * That's because we don't know what the CPU domains look like
537 	 * until all instances have been initialized.
538 	 */
539 	is_ready = CPUDRV_PM_XCALL_IS_READY(cpudsp->cpu_id);
540 	if (!is_ready) {
541 		DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
542 		    "CPU not ready for x-calls\n", instance));
543 	} else if (!(is_ready = cpudrv_pm_power_ready())) {
544 		DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
545 		    "waiting for all CPUs to be power manageable\n", instance));
546 	}
547 	if (!is_ready) {
548 		CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
549 		mutex_exit(&cpudsp->lock);
550 		return (DDI_FAILURE);
551 	}
552 
553 	/*
554 	 * Execute CPU specific routine on the requested CPU to change its
555 	 * speed to normal-speed/divisor.
556 	 */
557 	if ((ret = cpudrv_pm_change_speed(cpudsp, new_spd)) != DDI_SUCCESS) {
558 		cmn_err(CE_WARN, "cpudrv_power: cpudrv_pm_change_speed() "
559 		    "return = %d", ret);
560 		mutex_exit(&cpudsp->lock);
561 		return (DDI_FAILURE);
562 	}
563 
564 	/*
565 	 * DTrace probe point for CPU speed change transition
566 	 */
567 	DTRACE_PROBE3(cpu__change__speed, cpudrv_devstate_t *, cpudsp,
568 	    cpudrv_pm_t *, cpupm, cpudrv_pm_spd_t *, new_spd);
569 
570 	/*
571 	 * Reset idle threshold time for the new power level.
572 	 */
573 	if ((cpupm->cur_spd != NULL) && (level < cpupm->cur_spd->pm_level)) {
574 		if (pm_idle_component(dip, CPUDRV_PM_COMP_NUM) ==
575 		    DDI_SUCCESS) {
576 			if (cpupm->pm_busycnt >= 1)
577 				cpupm->pm_busycnt--;
578 		} else
579 			cmn_err(CE_WARN, "cpudrv_power: instance %d: can't "
580 			    "idle CPU component", ddi_get_instance(dip));
581 	}
582 	/*
583 	 * Reset various parameters because we are now running at new speed.
584 	 */
585 	cpupm->lastquan_mstate[CMS_IDLE] = 0;
586 	cpupm->lastquan_mstate[CMS_SYSTEM] = 0;
587 	cpupm->lastquan_mstate[CMS_USER] = 0;
588 	cpupm->lastquan_lbolt = 0;
589 	cpupm->cur_spd = new_spd;
590 	CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
591 	mutex_exit(&cpudsp->lock);
592 
593 	return (DDI_SUCCESS);
594 }
595 
596 /*
597  * Initialize the field that will be used for reporting
598  * the supported_frequencies_Hz cpu_info kstat.
599  */
600 static void
601 set_supp_freqs(cpu_t *cp, cpudrv_pm_t *cpupm)
602 {
603 	char		*supp_freqs;
604 	char		*sfptr;
605 	uint64_t	*speeds;
606 	cpudrv_pm_spd_t	*spd;
607 	int		i;
608 #define	UINT64_MAX_STRING (sizeof ("18446744073709551615"))
609 
610 	speeds = kmem_zalloc(cpupm->num_spd * sizeof (uint64_t), KM_SLEEP);
611 	for (i = cpupm->num_spd - 1, spd = cpupm->head_spd; spd;
612 	    i--, spd = spd->down_spd) {
613 		speeds[i] =
614 		    CPUDRV_PM_SPEED_HZ(cp->cpu_type_info.pi_clock, spd->speed);
615 	}
616 
617 	supp_freqs = kmem_zalloc((UINT64_MAX_STRING * cpupm->num_spd),
618 	    KM_SLEEP);
619 	sfptr = supp_freqs;
620 	for (i = 0; i < cpupm->num_spd; i++) {
621 		if (i == cpupm->num_spd - 1) {
622 			(void) sprintf(sfptr, "%"PRIu64, speeds[i]);
623 		} else {
624 			(void) sprintf(sfptr, "%"PRIu64":", speeds[i]);
625 			sfptr = supp_freqs + strlen(supp_freqs);
626 		}
627 	}
628 	cpu_set_supp_freqs(cp, supp_freqs);
629 	kmem_free(supp_freqs, (UINT64_MAX_STRING * cpupm->num_spd));
630 	kmem_free(speeds, cpupm->num_spd * sizeof (uint64_t));
631 }
632 
633 /*
634  * Initialize power management data.
635  */
636 static int
637 cpudrv_pm_init_power(cpudrv_devstate_t *cpudsp)
638 {
639 	cpudrv_pm_t 	*cpupm = &(cpudsp->cpudrv_pm);
640 	cpudrv_pm_spd_t	*cur_spd;
641 	cpudrv_pm_spd_t	*prev_spd = NULL;
642 	int		*speeds;
643 	uint_t		nspeeds;
644 	int		idle_cnt_percent;
645 	int		user_cnt_percent;
646 	int		i;
647 
648 	CPUDRV_PM_GET_SPEEDS(cpudsp, speeds, nspeeds);
649 	if (nspeeds < 2) {
650 		/* Need at least two speeds to power manage */
651 		CPUDRV_PM_FREE_SPEEDS(speeds, nspeeds);
652 		return (DDI_FAILURE);
653 	}
654 	cpupm->num_spd = nspeeds;
655 
656 	/*
657 	 * Calculate the watermarks and other parameters based on the
658 	 * supplied speeds.
659 	 *
660 	 * One of the basic assumption is that for X amount of CPU work,
661 	 * if CPU is slowed down by a factor of N, the time it takes to
662 	 * do the same work will be N * X.
663 	 *
664 	 * The driver declares that a CPU is idle and ready for slowed down,
665 	 * if amount of idle thread is more than the current speed idle_hwm
666 	 * without dropping below idle_hwm a number of consecutive sampling
667 	 * intervals and number of running threads in user mode are below
668 	 * user_lwm.  We want to set the current user_lwm such that if we
669 	 * just switched to the next slower speed with no change in real work
670 	 * load, the amount of user threads at the slower speed will be such
671 	 * that it falls below the slower speed's user_hwm.  If we didn't do
672 	 * that then we will just come back to the higher speed as soon as we
673 	 * go down even with no change in work load.
674 	 * The user_hwm is a fixed precentage and not calculated dynamically.
675 	 *
676 	 * We bring the CPU up if idle thread at current speed is less than
677 	 * the current speed idle_lwm for a number of consecutive sampling
678 	 * intervals or user threads are above the user_hwm for the current
679 	 * speed.
680 	 */
681 	for (i = 0; i < nspeeds; i++) {
682 		cur_spd = kmem_zalloc(sizeof (cpudrv_pm_spd_t), KM_SLEEP);
683 		cur_spd->speed = speeds[i];
684 		if (i == 0) {	/* normal speed */
685 			cpupm->head_spd = cur_spd;
686 			cur_spd->quant_cnt = CPUDRV_PM_QUANT_CNT_NORMAL;
687 			cur_spd->idle_hwm =
688 			    (cpudrv_pm_idle_hwm * cur_spd->quant_cnt) / 100;
689 			/* can't speed anymore */
690 			cur_spd->idle_lwm = 0;
691 			cur_spd->user_hwm = UINT_MAX;
692 		} else {
693 			cur_spd->quant_cnt = CPUDRV_PM_QUANT_CNT_OTHR;
694 			ASSERT(prev_spd != NULL);
695 			prev_spd->down_spd = cur_spd;
696 			cur_spd->up_spd = cpupm->head_spd;
697 
698 			/*
699 			 * Let's assume CPU is considered idle at full speed
700 			 * when it is spending I% of time in running the idle
701 			 * thread.  At full speed, CPU will be busy (100 - I) %
702 			 * of times.  This % of busyness increases by factor of
703 			 * N as CPU slows down.  CPU that is idle I% of times
704 			 * in full speed, it is idle (100 - ((100 - I) * N)) %
705 			 * of times in N speed.  The idle_lwm is a fixed
706 			 * percentage.  A large value of N may result in
707 			 * idle_hwm to go below idle_lwm.  We need to make sure
708 			 * that there is at least a buffer zone seperation
709 			 * between the idle_lwm and idle_hwm values.
710 			 */
711 			idle_cnt_percent = CPUDRV_PM_IDLE_CNT_PERCENT(
712 			    cpudrv_pm_idle_hwm, speeds, i);
713 			idle_cnt_percent = max(idle_cnt_percent,
714 			    (cpudrv_pm_idle_lwm + cpudrv_pm_idle_buf_zone));
715 			cur_spd->idle_hwm =
716 			    (idle_cnt_percent * cur_spd->quant_cnt) / 100;
717 			cur_spd->idle_lwm =
718 			    (cpudrv_pm_idle_lwm * cur_spd->quant_cnt) / 100;
719 
720 			/*
721 			 * The lwm for user threads are determined such that
722 			 * if CPU slows down, the load of work in the
723 			 * new speed would still keep the CPU at or below the
724 			 * user_hwm in the new speed.  This is to prevent
725 			 * the quick jump back up to higher speed.
726 			 */
727 			cur_spd->user_hwm = (cpudrv_pm_user_hwm *
728 			    cur_spd->quant_cnt) / 100;
729 			user_cnt_percent = CPUDRV_PM_USER_CNT_PERCENT(
730 			    cpudrv_pm_user_hwm, speeds, i);
731 			prev_spd->user_lwm =
732 			    (user_cnt_percent * prev_spd->quant_cnt) / 100;
733 		}
734 		prev_spd = cur_spd;
735 	}
736 	/* Slowest speed. Can't slow down anymore */
737 	cur_spd->idle_hwm = UINT_MAX;
738 	cur_spd->user_lwm = -1;
739 #ifdef	DEBUG
740 	DPRINTF(D_PM_INIT, ("cpudrv_pm_init: instance %d: head_spd spd %d, "
741 	    "num_spd %d\n", ddi_get_instance(cpudsp->dip),
742 	    cpupm->head_spd->speed, cpupm->num_spd));
743 	for (cur_spd = cpupm->head_spd; cur_spd; cur_spd = cur_spd->down_spd) {
744 		DPRINTF(D_PM_INIT, ("cpudrv_pm_init: instance %d: speed %d, "
745 		    "down_spd spd %d, idle_hwm %d, user_lwm %d, "
746 		    "up_spd spd %d, idle_lwm %d, user_hwm %d, "
747 		    "quant_cnt %d\n", ddi_get_instance(cpudsp->dip),
748 		    cur_spd->speed,
749 		    (cur_spd->down_spd ? cur_spd->down_spd->speed : 0),
750 		    cur_spd->idle_hwm, cur_spd->user_lwm,
751 		    (cur_spd->up_spd ? cur_spd->up_spd->speed : 0),
752 		    cur_spd->idle_lwm, cur_spd->user_hwm,
753 		    cur_spd->quant_cnt));
754 	}
755 #endif	/* DEBUG */
756 	CPUDRV_PM_FREE_SPEEDS(speeds, nspeeds);
757 	return (DDI_SUCCESS);
758 }
759 
760 /*
761  * Free CPU power management data.
762  */
763 static void
764 cpudrv_pm_free(cpudrv_devstate_t *cpudsp)
765 {
766 	cpudrv_pm_t 	*cpupm = &(cpudsp->cpudrv_pm);
767 	cpudrv_pm_spd_t	*cur_spd, *next_spd;
768 
769 	cur_spd = cpupm->head_spd;
770 	while (cur_spd) {
771 		next_spd = cur_spd->down_spd;
772 		kmem_free(cur_spd, sizeof (cpudrv_pm_spd_t));
773 		cur_spd = next_spd;
774 	}
775 	bzero(cpupm, sizeof (cpudrv_pm_t));
776 	cpudrv_mach_pm_free(cpudsp);
777 }
778 
779 /*
780  * Create pm-components property.
781  */
782 static int
783 cpudrv_pm_comp_create(cpudrv_devstate_t *cpudsp)
784 {
785 	cpudrv_pm_t 	*cpupm = &(cpudsp->cpudrv_pm);
786 	cpudrv_pm_spd_t	*cur_spd;
787 	char		**pmc;
788 	int		size;
789 	char		name[] = "NAME=CPU Speed";
790 	int		i, j;
791 	uint_t		comp_spd;
792 	int		result = DDI_FAILURE;
793 
794 	pmc = kmem_zalloc((cpupm->num_spd + 1) * sizeof (char *), KM_SLEEP);
795 	size = CPUDRV_PM_COMP_SIZE();
796 	if (cpupm->num_spd > CPUDRV_PM_COMP_MAX_VAL) {
797 		cmn_err(CE_WARN, "cpudrv_pm_comp_create: instance %d: "
798 		    "number of speeds exceeded limits",
799 		    ddi_get_instance(cpudsp->dip));
800 		kmem_free(pmc, (cpupm->num_spd + 1) * sizeof (char *));
801 		return (result);
802 	}
803 
804 	for (i = cpupm->num_spd, cur_spd = cpupm->head_spd; i > 0;
805 	    i--, cur_spd = cur_spd->down_spd) {
806 		cur_spd->pm_level = i;
807 		pmc[i] = kmem_zalloc((size * sizeof (char)), KM_SLEEP);
808 		comp_spd = CPUDRV_PM_COMP_SPEED(cpupm, cur_spd);
809 		if (comp_spd > CPUDRV_PM_COMP_MAX_VAL) {
810 			cmn_err(CE_WARN, "cpudrv_pm_comp_create: "
811 			    "instance %d: speed exceeded limits",
812 			    ddi_get_instance(cpudsp->dip));
813 			for (j = cpupm->num_spd; j >= i; j--) {
814 				kmem_free(pmc[j], size * sizeof (char));
815 			}
816 			kmem_free(pmc, (cpupm->num_spd + 1) *
817 			    sizeof (char *));
818 			return (result);
819 		}
820 		CPUDRV_PM_COMP_SPRINT(pmc[i], cpupm, cur_spd, comp_spd)
821 		DPRINTF(D_PM_COMP_CREATE, ("cpudrv_pm_comp_create: "
822 		    "instance %d: pm-components power level %d string '%s'\n",
823 		    ddi_get_instance(cpudsp->dip), i, pmc[i]));
824 	}
825 	pmc[0] = kmem_zalloc(sizeof (name), KM_SLEEP);
826 	(void) strcat(pmc[0], name);
827 	DPRINTF(D_PM_COMP_CREATE, ("cpudrv_pm_comp_create: instance %d: "
828 	    "pm-components component name '%s'\n",
829 	    ddi_get_instance(cpudsp->dip), pmc[0]));
830 
831 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, cpudsp->dip,
832 	    "pm-components", pmc, cpupm->num_spd + 1) == DDI_PROP_SUCCESS) {
833 		result = DDI_SUCCESS;
834 	} else {
835 		cmn_err(CE_WARN, "cpudrv_pm_comp_create: instance %d: "
836 		    "can't create pm-components property",
837 		    ddi_get_instance(cpudsp->dip));
838 	}
839 
840 	for (i = cpupm->num_spd; i > 0; i--) {
841 		kmem_free(pmc[i], size * sizeof (char));
842 	}
843 	kmem_free(pmc[0], sizeof (name));
844 	kmem_free(pmc, (cpupm->num_spd + 1) * sizeof (char *));
845 	return (result);
846 }
847 
848 /*
849  * Mark a component idle.
850  */
851 #define	CPUDRV_PM_MONITOR_PM_IDLE_COMP(dip, cpupm) { \
852 	if ((cpupm)->pm_busycnt >= 1) { \
853 		if (pm_idle_component((dip), CPUDRV_PM_COMP_NUM) == \
854 		    DDI_SUCCESS) { \
855 			DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: " \
856 			    "instance %d: pm_idle_component called\n", \
857 			    ddi_get_instance((dip)))); \
858 			(cpupm)->pm_busycnt--; \
859 		} else { \
860 			cmn_err(CE_WARN, "cpudrv_pm_monitor: instance %d: " \
861 			    "can't idle CPU component", \
862 			    ddi_get_instance((dip))); \
863 		} \
864 	} \
865 }
866 
867 /*
868  * Marks a component busy in both PM framework and driver state structure.
869  */
870 #define	CPUDRV_PM_MONITOR_PM_BUSY_COMP(dip, cpupm) { \
871 	if ((cpupm)->pm_busycnt < 1) { \
872 		if (pm_busy_component((dip), CPUDRV_PM_COMP_NUM) == \
873 		    DDI_SUCCESS) { \
874 			DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: " \
875 			    "instance %d: pm_busy_component called\n", \
876 			    ddi_get_instance((dip)))); \
877 			(cpupm)->pm_busycnt++; \
878 		} else { \
879 			cmn_err(CE_WARN, "cpudrv_pm_monitor: instance %d: " \
880 			    "can't busy CPU component", \
881 			    ddi_get_instance((dip))); \
882 		} \
883 	} \
884 }
885 
886 /*
887  * Marks a component busy and calls pm_raise_power().
888  */
889 #define	CPUDRV_PM_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm, new_level) { \
890 	/* \
891 	 * Mark driver and PM framework busy first so framework doesn't try \
892 	 * to bring CPU to lower speed when we need to be at higher speed. \
893 	 */ \
894 	CPUDRV_PM_MONITOR_PM_BUSY_COMP((dip), (cpupm)); \
895 	mutex_exit(&(cpudsp)->lock); \
896 	DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: " \
897 	    "pm_raise_power called to %d\n", ddi_get_instance((dip)), \
898 		(new_level))); \
899 	if (pm_raise_power((dip), CPUDRV_PM_COMP_NUM, (new_level)) != \
900 	    DDI_SUCCESS) { \
901 		cmn_err(CE_WARN, "cpudrv_pm_monitor: instance %d: can't " \
902 		    "raise CPU power level", ddi_get_instance((dip))); \
903 	} \
904 	mutex_enter(&(cpudsp)->lock); \
905 }
906 
907 /*
908  * In order to monitor a CPU, we need to hold cpu_lock to access CPU
909  * statistics. Holding cpu_lock is not allowed from a callout routine.
910  * We dispatch a taskq to do that job.
911  */
912 static void
913 cpudrv_pm_monitor_disp(void *arg)
914 {
915 	cpudrv_devstate_t	*cpudsp = (cpudrv_devstate_t *)arg;
916 
917 	/*
918 	 * We are here because the last task has scheduled a timeout.
919 	 * The queue should be empty at this time.
920 	 */
921 	mutex_enter(&cpudsp->cpudrv_pm.timeout_lock);
922 	if (!taskq_dispatch(cpudsp->cpudrv_pm.tq, cpudrv_pm_monitor, arg,
923 	    TQ_NOSLEEP)) {
924 		mutex_exit(&cpudsp->cpudrv_pm.timeout_lock);
925 		DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor_disp: failed to "
926 		    "dispatch the cpudrv_pm_monitor taskq\n"));
927 		mutex_enter(&cpudsp->lock);
928 		CPUDRV_PM_MONITOR_INIT(cpudsp);
929 		mutex_exit(&cpudsp->lock);
930 		return;
931 	}
932 	cpudsp->cpudrv_pm.timeout_count++;
933 	mutex_exit(&cpudsp->cpudrv_pm.timeout_lock);
934 }
935 
936 /*
937  * Monitors each CPU for the amount of time idle thread was running in the
938  * last quantum and arranges for the CPU to go to the lower or higher speed.
939  * Called at the time interval appropriate for the current speed. The
940  * time interval for normal speed is CPUDRV_PM_QUANT_CNT_NORMAL. The time
941  * interval for other speeds (including unknown speed) is
942  * CPUDRV_PM_QUANT_CNT_OTHR.
943  */
944 static void
945 cpudrv_pm_monitor(void *arg)
946 {
947 	cpudrv_devstate_t	*cpudsp = (cpudrv_devstate_t *)arg;
948 	cpudrv_pm_t		*cpupm;
949 	cpudrv_pm_spd_t		*cur_spd, *new_spd;
950 	cpu_t			*cp;
951 	dev_info_t		*dip;
952 	uint_t			idle_cnt, user_cnt, system_cnt;
953 	clock_t			lbolt_cnt;
954 	hrtime_t		msnsecs[NCMSTATES];
955 	boolean_t		is_ready;
956 
957 #define	GET_CPU_MSTATE_CNT(state, cnt) \
958 	msnsecs[state] = NSEC_TO_TICK(msnsecs[state]); \
959 	if (cpupm->lastquan_mstate[state] > msnsecs[state]) \
960 		msnsecs[state] = cpupm->lastquan_mstate[state]; \
961 	cnt = msnsecs[state] - cpupm->lastquan_mstate[state]; \
962 	cpupm->lastquan_mstate[state] = msnsecs[state]
963 
964 	mutex_enter(&cpudsp->lock);
965 	cpupm = &(cpudsp->cpudrv_pm);
966 	if (cpupm->timeout_id == 0) {
967 		mutex_exit(&cpudsp->lock);
968 		goto do_return;
969 	}
970 	cur_spd = cpupm->cur_spd;
971 	dip = cpudsp->dip;
972 
973 	/*
974 	 * We assume that a CPU is initialized and has a valid cpu_t
975 	 * structure, if it is ready for cross calls. If this changes,
976 	 * additional checks might be needed.
977 	 *
978 	 * Additionally, for x86 platforms we cannot power manage
979 	 * any one instance, until all instances have been initialized.
980 	 * That's because we don't know what the CPU domains look like
981 	 * until all instances have been initialized.
982 	 */
983 	is_ready = CPUDRV_PM_XCALL_IS_READY(cpudsp->cpu_id);
984 	if (!is_ready) {
985 		DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: "
986 		    "CPU not ready for x-calls\n", ddi_get_instance(dip)));
987 	} else if (!(is_ready = cpudrv_pm_power_ready())) {
988 		DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: "
989 		    "waiting for all CPUs to be power manageable\n",
990 		    ddi_get_instance(dip)));
991 	}
992 	if (!is_ready) {
993 		/*
994 		 * Make sure that we are busy so that framework doesn't
995 		 * try to bring us down in this situation.
996 		 */
997 		CPUDRV_PM_MONITOR_PM_BUSY_COMP(dip, cpupm);
998 		CPUDRV_PM_MONITOR_INIT(cpudsp);
999 		mutex_exit(&cpudsp->lock);
1000 		goto do_return;
1001 	}
1002 
1003 	/*
1004 	 * Make sure that we are still not at unknown power level.
1005 	 */
1006 	if (cur_spd == NULL) {
1007 		DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: "
1008 		    "cur_spd is unknown\n", ddi_get_instance(dip)));
1009 		CPUDRV_PM_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm,
1010 		    cpupm->targ_spd->pm_level);
1011 		/*
1012 		 * We just changed the speed. Wait till at least next
1013 		 * call to this routine before proceeding ahead.
1014 		 */
1015 		CPUDRV_PM_MONITOR_INIT(cpudsp);
1016 		mutex_exit(&cpudsp->lock);
1017 		goto do_return;
1018 	}
1019 
1020 	mutex_enter(&cpu_lock);
1021 	if ((cp = cpu_get(cpudsp->cpu_id)) == NULL) {
1022 		mutex_exit(&cpu_lock);
1023 		CPUDRV_PM_MONITOR_INIT(cpudsp);
1024 		mutex_exit(&cpudsp->lock);
1025 		cmn_err(CE_WARN, "cpudrv_pm_monitor: instance %d: can't get "
1026 		    "cpu_t", ddi_get_instance(dip));
1027 		goto do_return;
1028 	}
1029 
1030 	if (!cpupm->pm_started) {
1031 		cpupm->pm_started = B_TRUE;
1032 		set_supp_freqs(cp, cpupm);
1033 	}
1034 
1035 	get_cpu_mstate(cp, msnsecs);
1036 	GET_CPU_MSTATE_CNT(CMS_IDLE, idle_cnt);
1037 	GET_CPU_MSTATE_CNT(CMS_USER, user_cnt);
1038 	GET_CPU_MSTATE_CNT(CMS_SYSTEM, system_cnt);
1039 
1040 	/*
1041 	 * We can't do anything when we have just switched to a state
1042 	 * because there is no valid timestamp.
1043 	 */
1044 	if (cpupm->lastquan_lbolt == 0) {
1045 		cpupm->lastquan_lbolt = lbolt;
1046 		mutex_exit(&cpu_lock);
1047 		CPUDRV_PM_MONITOR_INIT(cpudsp);
1048 		mutex_exit(&cpudsp->lock);
1049 		goto do_return;
1050 	}
1051 
1052 	/*
1053 	 * Various watermarks are based on this routine being called back
1054 	 * exactly at the requested period. This is not guaranteed
1055 	 * because this routine is called from a taskq that is dispatched
1056 	 * from a timeout routine.  Handle this by finding out how many
1057 	 * ticks have elapsed since the last call (lbolt_cnt) and adjusting
1058 	 * the idle_cnt based on the delay added to the requested period
1059 	 * by timeout and taskq.
1060 	 */
1061 	lbolt_cnt = lbolt - cpupm->lastquan_lbolt;
1062 	cpupm->lastquan_lbolt = lbolt;
1063 	mutex_exit(&cpu_lock);
1064 	/*
1065 	 * Time taken between recording the current counts and
1066 	 * arranging the next call of this routine is an error in our
1067 	 * calculation. We minimize the error by calling
1068 	 * CPUDRV_PM_MONITOR_INIT() here instead of end of this routine.
1069 	 */
1070 	CPUDRV_PM_MONITOR_INIT(cpudsp);
1071 	DPRINTF(D_PM_MONITOR_VERBOSE, ("cpudrv_pm_monitor: instance %d: "
1072 	    "idle count %d, user count %d, system count %d, pm_level %d, "
1073 	    "pm_busycnt %d\n", ddi_get_instance(dip), idle_cnt, user_cnt,
1074 	    system_cnt, cur_spd->pm_level, cpupm->pm_busycnt));
1075 
1076 #ifdef	DEBUG
1077 	/*
1078 	 * Notify that timeout and taskq has caused delays and we need to
1079 	 * scale our parameters accordingly.
1080 	 *
1081 	 * To get accurate result, don't turn on other DPRINTFs with
1082 	 * the following DPRINTF. PROM calls generated by other
1083 	 * DPRINTFs changes the timing.
1084 	 */
1085 	if (lbolt_cnt > cur_spd->quant_cnt) {
1086 		DPRINTF(D_PM_MONITOR_DELAY, ("cpudrv_pm_monitor: instance %d: "
1087 		    "lbolt count %ld > quantum_count %u\n",
1088 		    ddi_get_instance(dip), lbolt_cnt, cur_spd->quant_cnt));
1089 	}
1090 #endif	/* DEBUG */
1091 
1092 	/*
1093 	 * Adjust counts based on the delay added by timeout and taskq.
1094 	 */
1095 	idle_cnt = (idle_cnt * cur_spd->quant_cnt) / lbolt_cnt;
1096 	user_cnt = (user_cnt * cur_spd->quant_cnt) / lbolt_cnt;
1097 	if ((user_cnt > cur_spd->user_hwm) || (idle_cnt < cur_spd->idle_lwm &&
1098 	    cur_spd->idle_blwm_cnt >= cpudrv_pm_idle_blwm_cnt_max)) {
1099 		cur_spd->idle_blwm_cnt = 0;
1100 		cur_spd->idle_bhwm_cnt = 0;
1101 		/*
1102 		 * In normal situation, arrange to go to next higher speed.
1103 		 * If we are running in special direct pm mode, we just stay
1104 		 * at the current speed.
1105 		 */
1106 		if (cur_spd == cur_spd->up_spd || cpudrv_direct_pm) {
1107 			CPUDRV_PM_MONITOR_PM_BUSY_COMP(dip, cpupm);
1108 		} else {
1109 			new_spd = cur_spd->up_spd;
1110 			CPUDRV_PM_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm,
1111 			    new_spd->pm_level);
1112 		}
1113 	} else if ((user_cnt <= cur_spd->user_lwm) &&
1114 	    (idle_cnt >= cur_spd->idle_hwm) || !CPU_ACTIVE(cp)) {
1115 		cur_spd->idle_blwm_cnt = 0;
1116 		cur_spd->idle_bhwm_cnt = 0;
1117 		/*
1118 		 * Arrange to go to next lower speed by informing our idle
1119 		 * status to the power management framework.
1120 		 */
1121 		CPUDRV_PM_MONITOR_PM_IDLE_COMP(dip, cpupm);
1122 	} else {
1123 		/*
1124 		 * If we are between the idle water marks and have not
1125 		 * been here enough consecutive times to be considered
1126 		 * busy, just increment the count and return.
1127 		 */
1128 		if ((idle_cnt < cur_spd->idle_hwm) &&
1129 		    (idle_cnt >= cur_spd->idle_lwm) &&
1130 		    (cur_spd->idle_bhwm_cnt < cpudrv_pm_idle_bhwm_cnt_max)) {
1131 			cur_spd->idle_blwm_cnt = 0;
1132 			cur_spd->idle_bhwm_cnt++;
1133 			mutex_exit(&cpudsp->lock);
1134 			goto do_return;
1135 		}
1136 		if (idle_cnt < cur_spd->idle_lwm) {
1137 			cur_spd->idle_blwm_cnt++;
1138 			cur_spd->idle_bhwm_cnt = 0;
1139 		}
1140 		/*
1141 		 * Arranges to stay at the current speed.
1142 		 */
1143 		CPUDRV_PM_MONITOR_PM_BUSY_COMP(dip, cpupm);
1144 	}
1145 	mutex_exit(&cpudsp->lock);
1146 do_return:
1147 	mutex_enter(&cpupm->timeout_lock);
1148 	ASSERT(cpupm->timeout_count > 0);
1149 	cpupm->timeout_count--;
1150 	cv_signal(&cpupm->timeout_cv);
1151 	mutex_exit(&cpupm->timeout_lock);
1152 }
1153