xref: /illumos-gate/usr/src/uts/i86pc/os/mp_machdep.c (revision 3fe80ca4a1f8a033d672a9a2e6e4babac651205a)
1 
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 /*
26  * Copyright (c) 2009-2010, Intel Corporation.
27  * All rights reserved.
28  * Copyright 2020 Joyent, Inc.
29  * Copyright 2023 Oxide Computer Company
30  */
31 
32 #define	PSMI_1_7
33 #include <sys/smp_impldefs.h>
34 #include <sys/psm.h>
35 #include <sys/psm_modctl.h>
36 #include <sys/pit.h>
37 #include <sys/cmn_err.h>
38 #include <sys/strlog.h>
39 #include <sys/clock.h>
40 #include <sys/debug.h>
41 #include <sys/rtc.h>
42 #include <sys/x86_archext.h>
43 #include <sys/cpupart.h>
44 #include <sys/cpuvar.h>
45 #include <sys/cpu_event.h>
46 #include <sys/cmt.h>
47 #include <sys/cpu.h>
48 #include <sys/disp.h>
49 #include <sys/archsystm.h>
50 #include <sys/machsystm.h>
51 #include <sys/sysmacros.h>
52 #include <sys/memlist.h>
53 #include <sys/param.h>
54 #include <sys/promif.h>
55 #include <sys/cpu_pm.h>
56 #if defined(__xpv)
57 #include <sys/hypervisor.h>
58 #endif
59 #include <sys/mach_intr.h>
60 #include <vm/hat_i86.h>
61 #include <sys/kdi_machimpl.h>
62 #include <sys/sdt.h>
63 #include <sys/hpet.h>
64 #include <sys/sunddi.h>
65 #include <sys/sunndi.h>
66 #include <sys/cpc_pcbe.h>
67 #include <sys/prom_debug.h>
68 #include <sys/tsc.h>
69 
70 
71 #define	OFFSETOF(s, m)		(size_t)(&(((s *)0)->m))
72 
73 /*
74  *	Local function prototypes
75  */
76 static int mp_disable_intr(processorid_t cpun);
77 static void mp_enable_intr(processorid_t cpun);
78 static void mach_init();
79 static void mach_picinit();
80 static int machhztomhz(uint64_t cpu_freq_hz);
81 static uint64_t mach_getcpufreq(void);
82 static void mach_fixcpufreq(void);
83 static int mach_clkinit(int, int *);
84 static void mach_smpinit(void);
85 static int mach_softlvl_to_vect(int ipl);
86 static void mach_get_platform(int owner);
87 static void mach_construct_info();
88 static int mach_translate_irq(dev_info_t *dip, int irqno);
89 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *,
90     psm_intr_op_t, int *);
91 static void mach_notify_error(int level, char *errmsg);
92 static hrtime_t dummy_hrtime(void);
93 static void dummy_scalehrtime(hrtime_t *);
94 static uint64_t dummy_unscalehrtime(hrtime_t);
95 void cpu_idle(void);
96 static void cpu_wakeup(cpu_t *, int);
97 #ifndef __xpv
98 void cpu_idle_mwait(void);
99 static void cpu_wakeup_mwait(cpu_t *, int);
100 #endif
101 static int mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp);
102 
103 /*
104  *	External reference functions
105  */
106 extern void return_instr();
107 extern void pc_gethrestime(timestruc_t *);
108 extern int cpuid_get_coreid(cpu_t *);
109 extern int cpuid_get_chipid(cpu_t *);
110 
111 /*
112  *	PSM functions initialization
113  */
114 void (*psm_shutdownf)(int, int)	= (void (*)(int, int))return_instr;
115 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr;
116 void (*psm_notifyf)(int)	= (void (*)(int))return_instr;
117 void (*psm_set_idle_cpuf)(int)	= (void (*)(int))return_instr;
118 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr;
119 void (*psminitf)()		= mach_init;
120 void (*picinitf)()		= return_instr;
121 int (*clkinitf)(int, int *)	= (int (*)(int, int *))return_instr;
122 int (*ap_mlsetup)()		= (int (*)(void))return_instr;
123 void (*send_dirintf)()		= return_instr;
124 void (*setspl)(int)		= (void (*)(int))return_instr;
125 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
126 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
127 int (*get_pending_spl)(void)	= (int (*)(void))return_instr;
128 int (*addintr)(void *, int, avfunc, char *, int, caddr_t, caddr_t,
129     uint64_t *, dev_info_t *) = NULL;
130 void (*remintr)(void *, int, avfunc, int) = NULL;
131 void (*kdisetsoftint)(int, struct av_softinfo *)=
132 	(void (*)(int, struct av_softinfo *))return_instr;
133 void (*setsoftint)(int, struct av_softinfo *)=
134 	(void (*)(int, struct av_softinfo *))return_instr;
135 int (*slvltovect)(int)		= (int (*)(int))return_instr;
136 int (*setlvl)(int, int *)	= (int (*)(int, int *))return_instr;
137 void (*setlvlx)(int, int)	= (void (*)(int, int))return_instr;
138 int (*psm_disable_intr)(int)	= mp_disable_intr;
139 void (*psm_enable_intr)(int)	= mp_enable_intr;
140 hrtime_t (*gethrtimef)(void)	= dummy_hrtime;
141 hrtime_t (*gethrtimeunscaledf)(void)	= dummy_hrtime;
142 void (*scalehrtimef)(hrtime_t *)	= dummy_scalehrtime;
143 uint64_t (*unscalehrtimef)(hrtime_t)	= dummy_unscalehrtime;
144 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq;
145 void (*gethrestimef)(timestruc_t *) = pc_gethrestime;
146 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL;
147 int (*psm_get_clockirq)(int) = NULL;
148 int (*psm_get_ipivect)(int, int) = NULL;
149 uchar_t (*psm_get_ioapicid)(uchar_t) = NULL;
150 uint32_t (*psm_get_localapicid)(uint32_t) = NULL;
151 uchar_t (*psm_xlate_vector_by_irq)(uchar_t) = NULL;
152 int (*psm_get_pir_ipivect)(void) = NULL;
153 void (*psm_send_pir_ipi)(processorid_t) = NULL;
154 void (*psm_cmci_setup)(processorid_t, boolean_t) = NULL;
155 
156 int (*psm_clkinit)(int) = NULL;
157 void (*psm_timer_reprogram)(hrtime_t) = NULL;
158 void (*psm_timer_enable)(void) = NULL;
159 void (*psm_timer_disable)(void) = NULL;
160 void (*psm_post_cyclic_setup)(void *arg) = NULL;
161 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t,
162     int *) = mach_intr_ops;
163 int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *))
164     return_instr;
165 
166 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr;
167 void (*hrtime_tick)(void)	= return_instr;
168 
169 int (*psm_cpu_create_devinfo)(cpu_t *, dev_info_t **) = mach_cpu_create_devinfo;
170 int (*psm_cpu_get_devinfo)(cpu_t *, dev_info_t **) = NULL;
171 
172 /* global IRM pool for APIX (PSM) module */
173 ddi_irm_pool_t *apix_irm_pool_p = NULL;
174 
175 /*
176  * True if the generic TSC code is our source of hrtime, rather than whatever
177  * the PSM can provide.
178  */
179 #ifdef __xpv
180 int tsc_gethrtime_enable = 0;
181 #else
182 int tsc_gethrtime_enable = 1;
183 #endif
184 int tsc_gethrtime_initted = 0;
185 
186 /*
187  * True if the hrtime implementation is "hires"; namely, better than microdata.
188  */
189 int gethrtime_hires = 0;
190 
191 /*
192  * Local Static Data
193  */
194 static struct psm_ops mach_ops;
195 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL};
196 static ushort_t mach_ver[4] = {0, 0, 0, 0};
197 
198 /*
199  * virtualization support for psm
200  */
201 void *psm_vt_ops = NULL;
202 /*
203  * If non-zero, idle cpus will become "halted" when there's
204  * no work to do.
205  */
206 int	idle_cpu_use_hlt = 1;
207 
208 #ifndef __xpv
209 /*
210  * If non-zero, idle cpus will use mwait if available to halt instead of hlt.
211  */
212 int	idle_cpu_prefer_mwait = 1;
213 /*
214  * Set to 0 to avoid MONITOR+CLFLUSH assertion.
215  */
216 int	idle_cpu_assert_cflush_monitor = 1;
217 
218 /*
219  * If non-zero, idle cpus will not use power saving Deep C-States idle loop.
220  */
221 int	idle_cpu_no_deep_c = 0;
222 /*
223  * Non-power saving idle loop and wakeup pointers.
224  * Allows user to toggle Deep Idle power saving feature on/off.
225  */
226 void	(*non_deep_idle_cpu)() = cpu_idle;
227 void	(*non_deep_idle_disp_enq_thread)(cpu_t *, int);
228 
229 /*
230  * Object for the kernel to access the HPET.
231  */
232 hpet_t hpet;
233 
234 #endif	/* ifndef __xpv */
235 
236 uint_t cp_haltset_fanout = 0;
237 
238 /*ARGSUSED*/
239 int
pg_plat_hw_shared(cpu_t * cp,pghw_type_t hw)240 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
241 {
242 	switch (hw) {
243 	case PGHW_IPIPE:
244 		if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
245 			/*
246 			 * Hyper-threading is SMT
247 			 */
248 			return (1);
249 		} else {
250 			return (0);
251 		}
252 	case PGHW_FPU:
253 		if (cpuid_get_cores_per_compunit(cp) > 1)
254 			return (1);
255 		else
256 			return (0);
257 	case PGHW_PROCNODE:
258 		if (cpuid_get_procnodes_per_pkg(cp) > 1)
259 			return (1);
260 		else
261 			return (0);
262 	case PGHW_CHIP:
263 		if (is_x86_feature(x86_featureset, X86FSET_CMP) ||
264 		    is_x86_feature(x86_featureset, X86FSET_HTT))
265 			return (1);
266 		else
267 			return (0);
268 	case PGHW_CACHE:
269 		if (cpuid_get_ncpu_sharing_last_cache(cp) > 1)
270 			return (1);
271 		else
272 			return (0);
273 	case PGHW_POW_ACTIVE:
274 		if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1)
275 			return (1);
276 		else
277 			return (0);
278 	case PGHW_POW_IDLE:
279 		if (cpupm_domain_id(cp, CPUPM_DTYPE_IDLE) != (id_t)-1)
280 			return (1);
281 		else
282 			return (0);
283 	default:
284 		return (0);
285 	}
286 }
287 
288 /*
289  * Compare two CPUs and see if they have a pghw_type_t sharing relationship
290  * If pghw_type_t is an unsupported hardware type, then return -1
291  */
292 int
pg_plat_cpus_share(cpu_t * cpu_a,cpu_t * cpu_b,pghw_type_t hw)293 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
294 {
295 	id_t pgp_a, pgp_b;
296 
297 	pgp_a = pg_plat_hw_instance_id(cpu_a, hw);
298 	pgp_b = pg_plat_hw_instance_id(cpu_b, hw);
299 
300 	if (pgp_a == -1 || pgp_b == -1)
301 		return (-1);
302 
303 	return (pgp_a == pgp_b);
304 }
305 
306 /*
307  * Return a physical instance identifier for known hardware sharing
308  * relationships
309  */
310 id_t
pg_plat_hw_instance_id(cpu_t * cpu,pghw_type_t hw)311 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
312 {
313 	switch (hw) {
314 	case PGHW_IPIPE:
315 		return (cpuid_get_coreid(cpu));
316 	case PGHW_CACHE:
317 		return (cpuid_get_last_lvl_cacheid(cpu));
318 	case PGHW_FPU:
319 		return (cpuid_get_compunitid(cpu));
320 	case PGHW_PROCNODE:
321 		return (cpuid_get_procnodeid(cpu));
322 	case PGHW_CHIP:
323 		return (cpuid_get_chipid(cpu));
324 	case PGHW_POW_ACTIVE:
325 		return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE));
326 	case PGHW_POW_IDLE:
327 		return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE));
328 	default:
329 		return (-1);
330 	}
331 }
332 
333 /*
334  * Express preference for optimizing for sharing relationship
335  * hw1 vs hw2
336  */
337 pghw_type_t
pg_plat_hw_rank(pghw_type_t hw1,pghw_type_t hw2)338 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
339 {
340 	int i, rank1, rank2;
341 
342 	static pghw_type_t hw_hier[] = {
343 		PGHW_IPIPE,
344 		PGHW_CACHE,
345 		PGHW_FPU,
346 		PGHW_PROCNODE,
347 		PGHW_CHIP,
348 		PGHW_POW_IDLE,
349 		PGHW_POW_ACTIVE,
350 		PGHW_NUM_COMPONENTS
351 	};
352 
353 	rank1 = 0;
354 	rank2 = 0;
355 
356 	for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
357 		if (hw_hier[i] == hw1)
358 			rank1 = i;
359 		if (hw_hier[i] == hw2)
360 			rank2 = i;
361 	}
362 
363 	if (rank1 > rank2)
364 		return (hw1);
365 	else
366 		return (hw2);
367 }
368 
369 /*
370  * Override the default CMT dispatcher policy for the specified
371  * hardware sharing relationship
372  */
373 pg_cmt_policy_t
pg_plat_cmt_policy(pghw_type_t hw)374 pg_plat_cmt_policy(pghw_type_t hw)
375 {
376 	/*
377 	 * For shared caches, also load balance across them to
378 	 * maximize aggregate cache capacity
379 	 *
380 	 * On AMD family 0x15 CPUs, cores come in pairs called
381 	 * compute units, sharing the FPU and the I$ and L2
382 	 * caches. Use balancing and cache affinity.
383 	 */
384 	switch (hw) {
385 	case PGHW_FPU:
386 	case PGHW_CACHE:
387 		return (CMT_BALANCE|CMT_AFFINITY);
388 	default:
389 		return (CMT_NO_POLICY);
390 	}
391 }
392 
393 id_t
pg_plat_get_core_id(cpu_t * cpu)394 pg_plat_get_core_id(cpu_t *cpu)
395 {
396 	return ((id_t)cpuid_get_coreid(cpu));
397 }
398 
399 void
cmp_set_nosteal_interval(void)400 cmp_set_nosteal_interval(void)
401 {
402 	/* Set the nosteal interval (used by disp_getbest()) to 100us */
403 	nosteal_nsec = 100000UL;
404 }
405 
406 /*
407  * Routine to ensure initial callers to hrtime gets 0 as return
408  */
409 static hrtime_t
dummy_hrtime(void)410 dummy_hrtime(void)
411 {
412 	return (0);
413 }
414 
415 /* ARGSUSED */
416 static void
dummy_scalehrtime(hrtime_t * ticks)417 dummy_scalehrtime(hrtime_t *ticks)
418 {}
419 
420 static uint64_t
dummy_unscalehrtime(hrtime_t nsecs)421 dummy_unscalehrtime(hrtime_t nsecs)
422 {
423 	return ((uint64_t)nsecs);
424 }
425 
426 /*
427  * Supports Deep C-State power saving idle loop.
428  */
429 void
cpu_idle_adaptive(void)430 cpu_idle_adaptive(void)
431 {
432 	(*CPU->cpu_m.mcpu_idle_cpu)();
433 }
434 
435 /*
436  * Function called by CPU idle notification framework to check whether CPU
437  * has been awakened. It will be called with interrupt disabled.
438  * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
439  * notification framework.
440  */
441 /*ARGSUSED*/
442 static void
cpu_idle_check_wakeup(void * arg)443 cpu_idle_check_wakeup(void *arg)
444 {
445 	/*
446 	 * Toggle interrupt flag to detect pending interrupts.
447 	 * If interrupt happened, do_interrupt() will notify CPU idle
448 	 * notification framework so no need to call cpu_idle_exit() here.
449 	 */
450 	sti();
451 	SMT_PAUSE();
452 	cli();
453 }
454 
455 /*
456  * Idle the present CPU until wakened via an interrupt
457  */
458 void
cpu_idle(void)459 cpu_idle(void)
460 {
461 	cpu_t		*cpup = CPU;
462 	processorid_t	cpu_sid = cpup->cpu_seqid;
463 	cpupart_t	*cp = cpup->cpu_part;
464 	int		hset_update = 1;
465 
466 	/*
467 	 * If this CPU is online, and there's multiple CPUs
468 	 * in the system, then we should notate our halting
469 	 * by adding ourselves to the partition's halted CPU
470 	 * bitmap. This allows other CPUs to find/awaken us when
471 	 * work becomes available.
472 	 */
473 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
474 		hset_update = 0;
475 
476 	/*
477 	 * Add ourselves to the partition's halted CPUs bitmap
478 	 * and set our HALTED flag, if necessary.
479 	 *
480 	 * When a thread becomes runnable, it is placed on the queue
481 	 * and then the halted CPU bitmap is checked to determine who
482 	 * (if anyone) should be awakened. We therefore need to first
483 	 * add ourselves to the bitmap, and and then check if there
484 	 * is any work available. The order is important to prevent a race
485 	 * that can lead to work languishing on a run queue somewhere while
486 	 * this CPU remains halted.
487 	 *
488 	 * Either the producing CPU will see we're halted and will awaken us,
489 	 * or this CPU will see the work available in disp_anywork().
490 	 *
491 	 * Note that memory barriers after updating the HALTED flag
492 	 * are not necessary since an atomic operation (updating the bitset)
493 	 * immediately follows. On x86 the atomic operation acts as a
494 	 * memory barrier for the update of cpu_disp_flags.
495 	 */
496 	if (hset_update) {
497 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
498 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
499 	}
500 
501 	/*
502 	 * Check to make sure there's really nothing to do.
503 	 * Work destined for this CPU may become available after
504 	 * this check. We'll be notified through the clearing of our
505 	 * bit in the halted CPU bitmap, and a poke.
506 	 */
507 	if (disp_anywork()) {
508 		if (hset_update) {
509 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
510 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
511 		}
512 		return;
513 	}
514 
515 	/*
516 	 * We're on our way to being halted.
517 	 *
518 	 * Disable interrupts now, so that we'll awaken immediately
519 	 * after halting if someone tries to poke us between now and
520 	 * the time we actually halt.
521 	 *
522 	 * We check for the presence of our bit after disabling interrupts.
523 	 * If it's cleared, we'll return. If the bit is cleared after
524 	 * we check then the poke will pop us out of the halted state.
525 	 *
526 	 * This means that the ordering of the poke and the clearing
527 	 * of the bit by cpu_wakeup is important.
528 	 * cpu_wakeup() must clear, then poke.
529 	 * cpu_idle() must disable interrupts, then check for the bit.
530 	 */
531 	cli();
532 
533 	if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
534 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
535 		sti();
536 		return;
537 	}
538 
539 	/*
540 	 * The check for anything locally runnable is here for performance
541 	 * and isn't needed for correctness. disp_nrunnable ought to be
542 	 * in our cache still, so it's inexpensive to check, and if there
543 	 * is anything runnable we won't have to wait for the poke.
544 	 */
545 	if (cpup->cpu_disp->disp_nrunnable != 0) {
546 		if (hset_update) {
547 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
548 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
549 		}
550 		sti();
551 		return;
552 	}
553 
554 	if (cpu_idle_enter(IDLE_STATE_C1, 0,
555 	    cpu_idle_check_wakeup, NULL) == 0) {
556 		mach_cpu_idle();
557 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
558 	}
559 
560 	/*
561 	 * We're no longer halted
562 	 */
563 	if (hset_update) {
564 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
565 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
566 	}
567 }
568 
569 
570 /*
571  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
572  * Otherwise, see if other CPUs in the cpu partition are halted and need to
573  * be woken up so that they can steal the thread we placed on this CPU.
574  * This function is only used on MP systems.
575  */
576 static void
cpu_wakeup(cpu_t * cpu,int bound)577 cpu_wakeup(cpu_t *cpu, int bound)
578 {
579 	uint_t		cpu_found;
580 	processorid_t	cpu_sid;
581 	cpupart_t	*cp;
582 
583 	cp = cpu->cpu_part;
584 	cpu_sid = cpu->cpu_seqid;
585 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
586 		/*
587 		 * Clear the halted bit for that CPU since it will be
588 		 * poked in a moment.
589 		 */
590 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
591 		/*
592 		 * We may find the current CPU present in the halted cpuset
593 		 * if we're in the context of an interrupt that occurred
594 		 * before we had a chance to clear our bit in cpu_idle().
595 		 * Poking ourself is obviously unnecessary, since if
596 		 * we're here, we're not halted.
597 		 */
598 		if (cpu != CPU)
599 			poke_cpu(cpu->cpu_id);
600 		return;
601 	} else {
602 		/*
603 		 * This cpu isn't halted, but it's idle or undergoing a
604 		 * context switch. No need to awaken anyone else.
605 		 */
606 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
607 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
608 			return;
609 	}
610 
611 	/*
612 	 * No need to wake up other CPUs if this is for a bound thread.
613 	 */
614 	if (bound)
615 		return;
616 
617 	/*
618 	 * The CPU specified for wakeup isn't currently halted, so check
619 	 * to see if there are any other halted CPUs in the partition,
620 	 * and if there are then awaken one.
621 	 */
622 	do {
623 		cpu_found = bitset_find(&cp->cp_haltset);
624 		if (cpu_found == (uint_t)-1)
625 			return;
626 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
627 
628 	if (cpu_found != CPU->cpu_seqid) {
629 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
630 	}
631 }
632 
633 #ifndef __xpv
634 /*
635  * Function called by CPU idle notification framework to check whether CPU
636  * has been awakened. It will be called with interrupt disabled.
637  * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
638  * notification framework.
639  */
640 static void
cpu_idle_mwait_check_wakeup(void * arg)641 cpu_idle_mwait_check_wakeup(void *arg)
642 {
643 	volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
644 
645 	ASSERT(arg != NULL);
646 	if (*mcpu_mwait != MWAIT_HALTED) {
647 		/*
648 		 * CPU has been awakened, notify CPU idle notification system.
649 		 */
650 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
651 	} else {
652 		/*
653 		 * Toggle interrupt flag to detect pending interrupts.
654 		 * If interrupt happened, do_interrupt() will notify CPU idle
655 		 * notification framework so no need to call cpu_idle_exit()
656 		 * here.
657 		 */
658 		sti();
659 		SMT_PAUSE();
660 		cli();
661 	}
662 }
663 
664 /*
665  * Idle the present CPU until awakened via touching its monitored line
666  */
667 void
cpu_idle_mwait(void)668 cpu_idle_mwait(void)
669 {
670 	volatile uint32_t	*mcpu_mwait = CPU->cpu_m.mcpu_mwait;
671 	cpu_t			*cpup = CPU;
672 	processorid_t		cpu_sid = cpup->cpu_seqid;
673 	cpupart_t		*cp = cpup->cpu_part;
674 	int			hset_update = 1;
675 
676 	/*
677 	 * Set our mcpu_mwait here, so we can tell if anyone tries to
678 	 * wake us between now and when we call mwait.  No other cpu will
679 	 * attempt to set our mcpu_mwait until we add ourself to the halted
680 	 * CPU bitmap.
681 	 */
682 	*mcpu_mwait = MWAIT_HALTED;
683 
684 	/*
685 	 * If this CPU is online, and there's multiple CPUs
686 	 * in the system, then we should note our halting
687 	 * by adding ourselves to the partition's halted CPU
688 	 * bitmap. This allows other CPUs to find/awaken us when
689 	 * work becomes available.
690 	 */
691 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
692 		hset_update = 0;
693 
694 	/*
695 	 * Add ourselves to the partition's halted CPUs bitmap
696 	 * and set our HALTED flag, if necessary.
697 	 *
698 	 * When a thread becomes runnable, it is placed on the queue
699 	 * and then the halted CPU bitmap is checked to determine who
700 	 * (if anyone) should be awakened. We therefore need to first
701 	 * add ourselves to the bitmap, and and then check if there
702 	 * is any work available.
703 	 *
704 	 * Note that memory barriers after updating the HALTED flag
705 	 * are not necessary since an atomic operation (updating the bitmap)
706 	 * immediately follows. On x86 the atomic operation acts as a
707 	 * memory barrier for the update of cpu_disp_flags.
708 	 */
709 	if (hset_update) {
710 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
711 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
712 	}
713 
714 	/*
715 	 * Check to make sure there's really nothing to do.
716 	 * Work destined for this CPU may become available after
717 	 * this check. We'll be notified through the clearing of our
718 	 * bit in the halted CPU bitmap, and a write to our mcpu_mwait.
719 	 *
720 	 * disp_anywork() checks disp_nrunnable, so we do not have to later.
721 	 */
722 	if (disp_anywork()) {
723 		if (hset_update) {
724 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
725 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
726 		}
727 		return;
728 	}
729 
730 	/*
731 	 * We're on our way to being halted.
732 	 * To avoid a lost wakeup, arm the monitor before checking if another
733 	 * cpu wrote to mcpu_mwait to wake us up.
734 	 */
735 	i86_monitor(mcpu_mwait, 0, 0);
736 	if (*mcpu_mwait == MWAIT_HALTED) {
737 		if (cpu_idle_enter(IDLE_STATE_C1, 0,
738 		    cpu_idle_mwait_check_wakeup, (void *)mcpu_mwait) == 0) {
739 			if (*mcpu_mwait == MWAIT_HALTED) {
740 				i86_mwait(0, 0);
741 			}
742 			cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
743 		}
744 	}
745 
746 	/*
747 	 * We're no longer halted
748 	 */
749 	if (hset_update) {
750 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
751 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
752 	}
753 }
754 
755 /*
756  * If "cpu" is halted in mwait, then wake it up clearing its halted bit in
757  * advance.  Otherwise, see if other CPUs in the cpu partition are halted and
758  * need to be woken up so that they can steal the thread we placed on this CPU.
759  * This function is only used on MP systems.
760  */
761 static void
cpu_wakeup_mwait(cpu_t * cp,int bound)762 cpu_wakeup_mwait(cpu_t *cp, int bound)
763 {
764 	cpupart_t	*cpu_part;
765 	uint_t		cpu_found;
766 	processorid_t	cpu_sid;
767 
768 	cpu_part = cp->cpu_part;
769 	cpu_sid = cp->cpu_seqid;
770 
771 	/*
772 	 * Clear the halted bit for that CPU since it will be woken up
773 	 * in a moment.
774 	 */
775 	if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
776 		/*
777 		 * Clear the halted bit for that CPU since it will be
778 		 * poked in a moment.
779 		 */
780 		bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);
781 		/*
782 		 * We may find the current CPU present in the halted cpuset
783 		 * if we're in the context of an interrupt that occurred
784 		 * before we had a chance to clear our bit in cpu_idle().
785 		 * Waking ourself is obviously unnecessary, since if
786 		 * we're here, we're not halted.
787 		 *
788 		 * monitor/mwait wakeup via writing to our cache line is
789 		 * harmless and less expensive than always checking if we
790 		 * are waking ourself which is an uncommon case.
791 		 */
792 		MWAIT_WAKEUP(cp);	/* write to monitored line */
793 		return;
794 	} else {
795 		/*
796 		 * This cpu isn't halted, but it's idle or undergoing a
797 		 * context switch. No need to awaken anyone else.
798 		 */
799 		if (cp->cpu_thread == cp->cpu_idle_thread ||
800 		    cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
801 			return;
802 	}
803 
804 	/*
805 	 * No need to wake up other CPUs if the thread we just enqueued
806 	 * is bound.
807 	 */
808 	if (bound || ncpus == 1)
809 		return;
810 
811 	/*
812 	 * See if there's any other halted CPUs. If there are, then
813 	 * select one, and awaken it.
814 	 * It's possible that after we find a CPU, somebody else
815 	 * will awaken it before we get the chance.
816 	 * In that case, look again.
817 	 */
818 	do {
819 		cpu_found = bitset_find(&cpu_part->cp_haltset);
820 		if (cpu_found == (uint_t)-1)
821 			return;
822 	} while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
823 	    cpu_found) < 0);
824 
825 	/*
826 	 * Do not check if cpu_found is ourself as monitor/mwait
827 	 * wakeup is cheap.
828 	 */
829 	MWAIT_WAKEUP(cpu_seq[cpu_found]); /* write to monitored line */
830 }
831 
832 #endif
833 
834 void (*cpu_pause_handler)(volatile char *) = NULL;
835 
836 static int
mp_disable_intr(int cpun)837 mp_disable_intr(int cpun)
838 {
839 	/*
840 	 * switch to the offline cpu
841 	 */
842 	affinity_set(cpun);
843 	/*
844 	 * raise ipl to just below cross call
845 	 */
846 	splx(XC_SYS_PIL - 1);
847 	/*
848 	 *	set base spl to prevent the next swtch to idle from
849 	 *	lowering back to ipl 0
850 	 */
851 	CPU->cpu_intr_actv |= (1 << (XC_SYS_PIL - 1));
852 	set_base_spl();
853 	affinity_clear();
854 	return (DDI_SUCCESS);
855 }
856 
857 static void
mp_enable_intr(int cpun)858 mp_enable_intr(int cpun)
859 {
860 	/*
861 	 * switch to the online cpu
862 	 */
863 	affinity_set(cpun);
864 	/*
865 	 * clear the interrupt active mask
866 	 */
867 	CPU->cpu_intr_actv &= ~(1 << (XC_SYS_PIL - 1));
868 	set_base_spl();
869 	(void) spl0();
870 	affinity_clear();
871 }
872 
873 static void
mach_get_platform(int owner)874 mach_get_platform(int owner)
875 {
876 	void		**srv_opsp;
877 	void		**clt_opsp;
878 	int		i;
879 	int		total_ops;
880 
881 	/* fix up psm ops */
882 	srv_opsp = (void **)mach_set[0];
883 	clt_opsp = (void **)mach_set[owner];
884 	if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01)
885 		total_ops = sizeof (struct psm_ops_ver01) /
886 		    sizeof (void (*)(void));
887 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1)
888 		/* no psm_notify_func */
889 		total_ops = OFFSETOF(struct psm_ops, psm_notify_func) /
890 		    sizeof (void (*)(void));
891 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2)
892 		/* no psm_timer funcs */
893 		total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) /
894 		    sizeof (void (*)(void));
895 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3)
896 		/* no psm_preshutdown function */
897 		total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) /
898 		    sizeof (void (*)(void));
899 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4)
900 		/* no psm_intr_ops function */
901 		total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) /
902 		    sizeof (void (*)(void));
903 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_5)
904 		/* no psm_state function */
905 		total_ops = OFFSETOF(struct psm_ops, psm_state) /
906 		    sizeof (void (*)(void));
907 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_6)
908 		/* no psm_cpu_ops function */
909 		total_ops = OFFSETOF(struct psm_ops, psm_cpu_ops) /
910 		    sizeof (void (*)(void));
911 	else
912 		total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void));
913 
914 	/*
915 	 * Save the version of the PSM module, in case we need to
916 	 * behave differently based on version.
917 	 */
918 	mach_ver[0] = mach_ver[owner];
919 
920 	for (i = 0; i < total_ops; i++)
921 		if (clt_opsp[i] != NULL)
922 			srv_opsp[i] = clt_opsp[i];
923 }
924 
925 static void
mach_construct_info()926 mach_construct_info()
927 {
928 	struct psm_sw *swp;
929 	int	mach_cnt[PSM_OWN_OVERRIDE+1] = {0};
930 	int	conflict_owner = 0;
931 
932 	if (psmsw->psw_forw == psmsw)
933 		panic("No valid PSM modules found");
934 	mutex_enter(&psmsw_lock);
935 	for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
936 		if (!(swp->psw_flag & PSM_MOD_IDENTIFY))
937 			continue;
938 		mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops;
939 		mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version;
940 		mach_cnt[swp->psw_infop->p_owner]++;
941 	}
942 	mutex_exit(&psmsw_lock);
943 
944 	mach_get_platform(PSM_OWN_SYS_DEFAULT);
945 
946 	/* check to see are there any conflicts */
947 	if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1)
948 		conflict_owner = PSM_OWN_EXCLUSIVE;
949 	if (mach_cnt[PSM_OWN_OVERRIDE] > 1)
950 		conflict_owner = PSM_OWN_OVERRIDE;
951 	if (conflict_owner) {
952 		/* remove all psm modules except uppc */
953 		cmn_err(CE_WARN,
954 		    "Conflicts detected on the following PSM modules:");
955 		mutex_enter(&psmsw_lock);
956 		for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
957 			if (swp->psw_infop->p_owner == conflict_owner)
958 				cmn_err(CE_WARN, "%s ",
959 				    swp->psw_infop->p_mach_idstring);
960 		}
961 		mutex_exit(&psmsw_lock);
962 		cmn_err(CE_WARN,
963 		    "Setting the system back to SINGLE processor mode!");
964 		cmn_err(CE_WARN,
965 		    "Please edit /etc/mach to remove the invalid PSM module.");
966 		return;
967 	}
968 
969 	if (mach_set[PSM_OWN_EXCLUSIVE])
970 		mach_get_platform(PSM_OWN_EXCLUSIVE);
971 
972 	if (mach_set[PSM_OWN_OVERRIDE])
973 		mach_get_platform(PSM_OWN_OVERRIDE);
974 }
975 
976 static void
mach_init()977 mach_init()
978 {
979 	struct psm_ops  *pops;
980 
981 	PRM_POINT("mach_construct_info()");
982 	mach_construct_info();
983 
984 	pops = mach_set[0];
985 
986 	/* register the interrupt and clock initialization rotuines */
987 	picinitf = mach_picinit;
988 	clkinitf = mach_clkinit;
989 	psm_get_clockirq = pops->psm_get_clockirq;
990 
991 	/* register the interrupt setup code */
992 	slvltovect = mach_softlvl_to_vect;
993 	addspl	= pops->psm_addspl;
994 	delspl	= pops->psm_delspl;
995 
996 	if (pops->psm_translate_irq)
997 		psm_translate_irq = pops->psm_translate_irq;
998 	if (pops->psm_intr_ops)
999 		psm_intr_ops = pops->psm_intr_ops;
1000 
1001 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4)
1002 	/*
1003 	 * Time-of-day functionality now handled in TOD modules.
1004 	 * (Warn about PSM modules that think that we're going to use
1005 	 * their ops vectors.)
1006 	 */
1007 	if (pops->psm_tod_get)
1008 		cmn_err(CE_WARN, "obsolete psm_tod_get op %p",
1009 		    (void *)pops->psm_tod_get);
1010 
1011 	if (pops->psm_tod_set)
1012 		cmn_err(CE_WARN, "obsolete psm_tod_set op %p",
1013 		    (void *)pops->psm_tod_set);
1014 #endif
1015 
1016 	if (pops->psm_notify_error) {
1017 		psm_notify_error = mach_notify_error;
1018 		notify_error = pops->psm_notify_error;
1019 	}
1020 
1021 	PRM_POINT("psm_softinit()");
1022 	(*pops->psm_softinit)();
1023 
1024 	/*
1025 	 * Initialize the dispatcher's function hooks to enable CPU halting
1026 	 * when idle.  Set both the deep-idle and non-deep-idle hooks.
1027 	 *
1028 	 * Assume we can use power saving deep-idle loop cpu_idle_adaptive.
1029 	 * Platform deep-idle driver will reset our idle loop to
1030 	 * non_deep_idle_cpu if power saving deep-idle feature is not available.
1031 	 *
1032 	 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle)
1033 	 * or idle_cpu_prefer_mwait is not set.
1034 	 * Allocate monitor/mwait buffer for cpu0.
1035 	 */
1036 #ifndef __xpv
1037 	non_deep_idle_disp_enq_thread = disp_enq_thread;
1038 #endif
1039 	PRM_DEBUG(idle_cpu_use_hlt);
1040 	if (idle_cpu_use_hlt) {
1041 		idle_cpu = cpu_idle_adaptive;
1042 		CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
1043 #ifndef __xpv
1044 		if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
1045 		    idle_cpu_prefer_mwait) {
1046 			CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU);
1047 			/*
1048 			 * Protect ourself from insane mwait size.
1049 			 */
1050 			if (CPU->cpu_m.mcpu_mwait == NULL) {
1051 #ifdef DEBUG
1052 				cmn_err(CE_NOTE, "Using hlt idle.  Cannot "
1053 				    "handle cpu 0 mwait size.");
1054 #endif
1055 				idle_cpu_prefer_mwait = 0;
1056 				CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
1057 			} else {
1058 				CPU->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
1059 			}
1060 		} else {
1061 			CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
1062 		}
1063 		non_deep_idle_cpu = CPU->cpu_m.mcpu_idle_cpu;
1064 
1065 		/*
1066 		 * Disable power saving deep idle loop?
1067 		 */
1068 		if (idle_cpu_no_deep_c) {
1069 			idle_cpu = non_deep_idle_cpu;
1070 		}
1071 #endif
1072 	}
1073 
1074 	PRM_POINT("mach_smpinit()");
1075 	mach_smpinit();
1076 }
1077 
1078 static void
mach_smpinit(void)1079 mach_smpinit(void)
1080 {
1081 	struct psm_ops  *pops;
1082 	processorid_t cpu_id;
1083 	int cnt;
1084 	cpuset_t cpumask;
1085 
1086 	pops = mach_set[0];
1087 	CPUSET_ZERO(cpumask);
1088 
1089 	cpu_id = -1;
1090 	cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
1091 	/*
1092 	 * Only add boot_ncpus CPUs to mp_cpus. Other CPUs will be handled
1093 	 * by CPU DR driver at runtime.
1094 	 */
1095 	for (cnt = 0; cpu_id != -1 && cnt < boot_ncpus; cnt++) {
1096 		CPUSET_ADD(cpumask, cpu_id);
1097 		cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
1098 	}
1099 
1100 	mp_cpus = cpumask;
1101 
1102 	/* MP related routines */
1103 	ap_mlsetup = pops->psm_post_cpu_start;
1104 	send_dirintf = pops->psm_send_ipi;
1105 
1106 	/* optional MP related routines */
1107 	if (pops->psm_shutdown)
1108 		psm_shutdownf = pops->psm_shutdown;
1109 	if (pops->psm_preshutdown)
1110 		psm_preshutdownf = pops->psm_preshutdown;
1111 	if (pops->psm_notify_func)
1112 		psm_notifyf = pops->psm_notify_func;
1113 	if (pops->psm_set_idlecpu)
1114 		psm_set_idle_cpuf = pops->psm_set_idlecpu;
1115 	if (pops->psm_unset_idlecpu)
1116 		psm_unset_idle_cpuf = pops->psm_unset_idlecpu;
1117 
1118 	psm_clkinit = pops->psm_clkinit;
1119 
1120 	if (pops->psm_timer_reprogram)
1121 		psm_timer_reprogram = pops->psm_timer_reprogram;
1122 
1123 	if (pops->psm_timer_enable)
1124 		psm_timer_enable = pops->psm_timer_enable;
1125 
1126 	if (pops->psm_timer_disable)
1127 		psm_timer_disable = pops->psm_timer_disable;
1128 
1129 	if (pops->psm_post_cyclic_setup)
1130 		psm_post_cyclic_setup = pops->psm_post_cyclic_setup;
1131 
1132 	if (pops->psm_state)
1133 		psm_state = pops->psm_state;
1134 
1135 	/*
1136 	 * Set these vectors here so they can be used by Suspend/Resume
1137 	 * on UP machines.
1138 	 */
1139 	if (pops->psm_disable_intr)
1140 		psm_disable_intr = pops->psm_disable_intr;
1141 	if (pops->psm_enable_intr)
1142 		psm_enable_intr  = pops->psm_enable_intr;
1143 
1144 	/*
1145 	 * Set this vector so it can be used by vmbus (for Hyper-V)
1146 	 * Need this even for single-CPU systems.  This works for
1147 	 * "pcplusmp" and "apix" platforms, but not "uppc" (because
1148 	 * "Uni-processor PC" does not provide a _get_ipivect).
1149 	 */
1150 	psm_get_ipivect = pops->psm_get_ipivect;
1151 
1152 	/* check for multiple CPUs */
1153 	if (cnt < 2 && plat_dr_support_cpu() == B_FALSE)
1154 		return;
1155 
1156 	/* check for MP platforms */
1157 	if (pops->psm_cpu_start == NULL)
1158 		return;
1159 
1160 	/*
1161 	 * Set the dispatcher hook to enable cpu "wake up"
1162 	 * when a thread becomes runnable.
1163 	 */
1164 	if (idle_cpu_use_hlt) {
1165 		disp_enq_thread = cpu_wakeup;
1166 #ifndef __xpv
1167 		if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
1168 		    idle_cpu_prefer_mwait)
1169 			disp_enq_thread = cpu_wakeup_mwait;
1170 		non_deep_idle_disp_enq_thread = disp_enq_thread;
1171 #endif
1172 	}
1173 
1174 	psm_get_pir_ipivect = pops->psm_get_pir_ipivect;
1175 	psm_send_pir_ipi = pops->psm_send_pir_ipi;
1176 	psm_cmci_setup = pops->psm_cmci_setup;
1177 
1178 
1179 	(void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_intr",
1180 	    (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI),
1181 	    NULL, NULL, NULL, NULL);
1182 
1183 	(void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE);
1184 }
1185 
1186 static void
mach_picinit()1187 mach_picinit()
1188 {
1189 	struct psm_ops  *pops;
1190 
1191 	pops = mach_set[0];
1192 
1193 	/* register the interrupt handlers */
1194 	setlvl = pops->psm_intr_enter;
1195 	setlvlx = pops->psm_intr_exit;
1196 
1197 	/* initialize the interrupt hardware */
1198 	(*pops->psm_picinit)();
1199 
1200 	/* set interrupt mask for current ipl */
1201 	setspl = pops->psm_setspl;
1202 	cli();
1203 	setspl(CPU->cpu_pri);
1204 }
1205 
1206 uint_t	cpu_freq;	/* MHz */
1207 uint64_t cpu_freq_hz;	/* measured (in hertz) */
1208 
1209 #define	MEGA_HZ		1000000
1210 
1211 #ifdef __xpv
1212 
1213 int xpv_cpufreq_workaround = 1;
1214 int xpv_cpufreq_verbose = 0;
1215 
1216 #endif	/* __xpv */
1217 
1218 static uint64_t
mach_getcpufreq(void)1219 mach_getcpufreq(void)
1220 {
1221 #ifndef __xpv
1222 	return (tsc_get_freq());
1223 #else
1224 	vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time;
1225 	uint64_t cpu_hz;
1226 
1227 	/*
1228 	 * During dom0 bringup, it was noted that on at least one older
1229 	 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul
1230 	 * value that is quite wrong (the 3.06GHz clock was reported
1231 	 * as 4.77GHz)
1232 	 *
1233 	 * The curious thing is, that if you stop the kernel at entry,
1234 	 * breakpoint here and inspect the value with kmdb, the value
1235 	 * is correct - but if you don't stop and simply enable the
1236 	 * printf statement (below), you can see the bad value printed
1237 	 * here.  Almost as if something kmdb did caused the hypervisor to
1238 	 * figure it out correctly.  And, note that the hypervisor
1239 	 * eventually -does- figure it out correctly ... if you look at
1240 	 * the field later in the life of dom0, it is correct.
1241 	 *
1242 	 * For now, on dom0, we employ a slightly cheesy workaround of
1243 	 * using the DOM0_PHYSINFO hypercall.
1244 	 */
1245 	if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) {
1246 		cpu_hz = 1000 * xpv_cpu_khz();
1247 	} else {
1248 		cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul;
1249 
1250 		if (vti->tsc_shift < 0)
1251 			cpu_hz <<= -vti->tsc_shift;
1252 		else
1253 			cpu_hz >>= vti->tsc_shift;
1254 	}
1255 
1256 	if (xpv_cpufreq_verbose)
1257 		printf("mach_getcpufreq: system_mul 0x%x, shift %d, "
1258 		    "cpu_hz %" PRId64 "Hz\n",
1259 		    vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz);
1260 
1261 	return (cpu_hz);
1262 #endif	/* __xpv */
1263 }
1264 
1265 /*
1266  * If the clock speed of a cpu is found to be reported incorrectly, do not add
1267  * to this array, instead improve the accuracy of the algorithm that determines
1268  * the clock speed of the processor or extend the implementation to support the
1269  * vendor as appropriate. This is here only to support adjusting the speed on
1270  * older slower processors that mach_fixcpufreq() would not be able to account
1271  * for otherwise.
1272  */
1273 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 };
1274 
1275 /*
1276  * On fast processors the clock frequency that is measured may be off by
1277  * a few MHz from the value printed on the part. This is a combination of
1278  * the factors that for such fast parts being off by this much is within
1279  * the tolerances for manufacture and because of the difficulties in the
1280  * measurement that can lead to small error. This function uses some
1281  * heuristics in order to tweak the value that was measured to match what
1282  * is most likely printed on the part.
1283  *
1284  * Some examples:
1285  *	AMD Athlon 1000 mhz measured as 998 mhz
1286  *	Intel Pentium III Xeon 733 mhz measured as 731 mhz
1287  *	Intel Pentium IV 1500 mhz measured as 1495mhz
1288  *
1289  * If in the future this function is no longer sufficient to correct
1290  * for the error in the measurement, then the algorithm used to perform
1291  * the measurement will have to be improved in order to increase accuracy
1292  * rather than adding horrible and questionable kludges here.
1293  *
1294  * This is called after the cyclics subsystem because of the potential
1295  * that the heuristics within may give a worse estimate of the clock
1296  * frequency than the value that was measured.
1297  */
1298 static void
mach_fixcpufreq(void)1299 mach_fixcpufreq(void)
1300 {
1301 	uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i;
1302 
1303 	freq = (uint32_t)cpu_freq;
1304 
1305 	/*
1306 	 * Find the nearest integer multiple of 200/3 (about 66) MHz to the
1307 	 * measured speed taking into account that the 667 MHz parts were
1308 	 * the first to round-up.
1309 	 */
1310 	mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200);
1311 	near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3);
1312 	delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66);
1313 
1314 	/* Find the nearest integer multiple of 50 MHz to the measured speed */
1315 	mul = (freq + 25) / 50;
1316 	near50 = mul * 50;
1317 	delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50);
1318 
1319 	/* Find the closer of the two */
1320 	if (delta66 < delta50) {
1321 		fixed = near66;
1322 		delta = delta66;
1323 	} else {
1324 		fixed = near50;
1325 		delta = delta50;
1326 	}
1327 
1328 	if (fixed > INT_MAX)
1329 		return;
1330 
1331 	/*
1332 	 * Some older parts have a core clock frequency that is not an
1333 	 * integral multiple of 50 or 66 MHz. Check if one of the old
1334 	 * clock frequencies is closer to the measured value than any
1335 	 * of the integral multiples of 50 an 66, and if so set fixed
1336 	 * and delta appropriately to represent the closest value.
1337 	 */
1338 	i = sizeof (x86_cpu_freq) / sizeof (int);
1339 	while (i > 0) {
1340 		i--;
1341 
1342 		if (x86_cpu_freq[i] <= freq) {
1343 			mul = freq - x86_cpu_freq[i];
1344 
1345 			if (mul < delta) {
1346 				fixed = x86_cpu_freq[i];
1347 				delta = mul;
1348 			}
1349 
1350 			break;
1351 		}
1352 
1353 		mul = x86_cpu_freq[i] - freq;
1354 
1355 		if (mul < delta) {
1356 			fixed = x86_cpu_freq[i];
1357 			delta = mul;
1358 		}
1359 	}
1360 
1361 	/*
1362 	 * Set a reasonable maximum for how much to correct the measured
1363 	 * result by. This check is here to prevent the adjustment made
1364 	 * by this function from being more harm than good. It is entirely
1365 	 * possible that in the future parts will be made that are not
1366 	 * integral multiples of 66 or 50 in clock frequency or that
1367 	 * someone may overclock a part to some odd frequency. If the
1368 	 * measured value is farther from the corrected value than
1369 	 * allowed, then assume the corrected value is in error and use
1370 	 * the measured value.
1371 	 */
1372 	if (6 < delta)
1373 		return;
1374 
1375 	cpu_freq = (int)fixed;
1376 }
1377 
1378 
1379 static int
machhztomhz(uint64_t cpu_freq_hz)1380 machhztomhz(uint64_t cpu_freq_hz)
1381 {
1382 	uint64_t cpu_mhz;
1383 
1384 	/* Round to nearest MHZ */
1385 	cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ;
1386 
1387 	if (cpu_mhz > INT_MAX)
1388 		return (0);
1389 
1390 	return ((int)cpu_mhz);
1391 
1392 }
1393 
1394 
1395 static int
mach_clkinit(int preferred_mode,int * set_mode)1396 mach_clkinit(int preferred_mode, int *set_mode)
1397 {
1398 	struct psm_ops  *pops;
1399 	int resolution;
1400 
1401 	pops = mach_set[0];
1402 
1403 	cpu_freq_hz = mach_getcpufreq();
1404 
1405 	cpu_freq = machhztomhz(cpu_freq_hz);
1406 
1407 	/*
1408 	 * For most systems, we retain the default TSC-based gethrtime()
1409 	 * implementation that was initialized early in the boot process.
1410 	 */
1411 #ifdef __xpv
1412 	if (pops->psm_hrtimeinit)
1413 		(*pops->psm_hrtimeinit)();
1414 	gethrtimef = pops->psm_gethrtime;
1415 	gethrtimeunscaledf = gethrtimef;
1416 	/* scalehrtimef will remain dummy */
1417 
1418 #endif /* __xpv */
1419 
1420 	mach_fixcpufreq();
1421 
1422 	if (mach_ver[0] >= PSM_INFO_VER01_3) {
1423 		if (preferred_mode == TIMER_ONESHOT) {
1424 
1425 			resolution = (*pops->psm_clkinit)(0);
1426 			if (resolution != 0)  {
1427 				*set_mode = TIMER_ONESHOT;
1428 				return (resolution);
1429 			}
1430 		}
1431 
1432 		/*
1433 		 * either periodic mode was requested or could not set to
1434 		 * one-shot mode
1435 		 */
1436 		resolution = (*pops->psm_clkinit)(hz);
1437 		/*
1438 		 * psm should be able to do periodic, so we do not check
1439 		 * for return value of psm_clkinit here.
1440 		 */
1441 		*set_mode = TIMER_PERIODIC;
1442 		return (resolution);
1443 	} else {
1444 		/*
1445 		 * PSMI interface prior to PSMI_3 does not define a return
1446 		 * value for psm_clkinit, so the return value is ignored.
1447 		 */
1448 		(void) (*pops->psm_clkinit)(hz);
1449 		*set_mode = TIMER_PERIODIC;
1450 		return (nsec_per_tick);
1451 	}
1452 }
1453 
1454 
1455 /*ARGSUSED*/
1456 static int
mach_softlvl_to_vect(int ipl)1457 mach_softlvl_to_vect(int ipl)
1458 {
1459 	setsoftint = av_set_softint_pending;
1460 	kdisetsoftint = kdi_av_set_softint_pending;
1461 
1462 	return (PSM_SV_SOFTWARE);
1463 }
1464 
1465 #ifdef DEBUG
1466 /*
1467  * This is here to allow us to simulate cpus that refuse to start.
1468  */
1469 cpuset_t cpufailset;
1470 #endif
1471 
1472 int
mach_cpu_start(struct cpu * cp,void * ctx)1473 mach_cpu_start(struct cpu *cp, void *ctx)
1474 {
1475 	struct psm_ops *pops = mach_set[0];
1476 	processorid_t id = cp->cpu_id;
1477 
1478 #ifdef DEBUG
1479 	if (CPU_IN_SET(cpufailset, id))
1480 		return (0);
1481 #endif
1482 	return ((*pops->psm_cpu_start)(id, ctx));
1483 }
1484 
1485 int
mach_cpuid_start(processorid_t id,void * ctx)1486 mach_cpuid_start(processorid_t id, void *ctx)
1487 {
1488 	struct psm_ops *pops = mach_set[0];
1489 
1490 #ifdef DEBUG
1491 	if (CPU_IN_SET(cpufailset, id))
1492 		return (0);
1493 #endif
1494 	return ((*pops->psm_cpu_start)(id, ctx));
1495 }
1496 
1497 int
mach_cpu_stop(cpu_t * cp,void * ctx)1498 mach_cpu_stop(cpu_t *cp, void *ctx)
1499 {
1500 	struct psm_ops *pops = mach_set[0];
1501 	psm_cpu_request_t request;
1502 
1503 	if (pops->psm_cpu_ops == NULL) {
1504 		return (ENOTSUP);
1505 	}
1506 
1507 	ASSERT(cp->cpu_id != -1);
1508 	request.pcr_cmd = PSM_CPU_STOP;
1509 	request.req.cpu_stop.cpuid = cp->cpu_id;
1510 	request.req.cpu_stop.ctx = ctx;
1511 
1512 	return ((*pops->psm_cpu_ops)(&request));
1513 }
1514 
1515 int
mach_cpu_add(mach_cpu_add_arg_t * argp,processorid_t * cpuidp)1516 mach_cpu_add(mach_cpu_add_arg_t *argp, processorid_t *cpuidp)
1517 {
1518 	int rc;
1519 	struct psm_ops *pops = mach_set[0];
1520 	psm_cpu_request_t request;
1521 
1522 	if (pops->psm_cpu_ops == NULL) {
1523 		return (ENOTSUP);
1524 	}
1525 
1526 	request.pcr_cmd = PSM_CPU_ADD;
1527 	request.req.cpu_add.argp = argp;
1528 	request.req.cpu_add.cpuid = -1;
1529 	rc = (*pops->psm_cpu_ops)(&request);
1530 	if (rc == 0) {
1531 		ASSERT(request.req.cpu_add.cpuid != -1);
1532 		*cpuidp = request.req.cpu_add.cpuid;
1533 	}
1534 
1535 	return (rc);
1536 }
1537 
1538 int
mach_cpu_remove(processorid_t cpuid)1539 mach_cpu_remove(processorid_t cpuid)
1540 {
1541 	struct psm_ops *pops = mach_set[0];
1542 	psm_cpu_request_t request;
1543 
1544 	if (pops->psm_cpu_ops == NULL) {
1545 		return (ENOTSUP);
1546 	}
1547 
1548 	request.pcr_cmd = PSM_CPU_REMOVE;
1549 	request.req.cpu_remove.cpuid = cpuid;
1550 
1551 	return ((*pops->psm_cpu_ops)(&request));
1552 }
1553 
1554 /*
1555  * Default handler to create device node for CPU.
1556  * One reference count will be held on created device node.
1557  */
1558 static int
mach_cpu_create_devinfo(cpu_t * cp,dev_info_t ** dipp)1559 mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp)
1560 {
1561 	int rv;
1562 	dev_info_t *dip;
1563 	static kmutex_t cpu_node_lock;
1564 	static dev_info_t *cpu_nex_devi = NULL;
1565 
1566 	ASSERT(cp != NULL);
1567 	ASSERT(dipp != NULL);
1568 	*dipp = NULL;
1569 
1570 	if (cpu_nex_devi == NULL) {
1571 		mutex_enter(&cpu_node_lock);
1572 		/* First check whether cpus exists. */
1573 		cpu_nex_devi = ddi_find_devinfo("cpus", -1, 0);
1574 		/* Create cpus if it doesn't exist. */
1575 		if (cpu_nex_devi == NULL) {
1576 			ndi_devi_enter(ddi_root_node());
1577 			rv = ndi_devi_alloc(ddi_root_node(), "cpus",
1578 			    (pnode_t)DEVI_SID_NODEID, &dip);
1579 			if (rv != NDI_SUCCESS) {
1580 				mutex_exit(&cpu_node_lock);
1581 				cmn_err(CE_CONT,
1582 				    "?failed to create cpu nexus device.\n");
1583 				return (PSM_FAILURE);
1584 			}
1585 			ASSERT(dip != NULL);
1586 			(void) ndi_devi_online(dip, 0);
1587 			ndi_devi_exit(ddi_root_node());
1588 			cpu_nex_devi = dip;
1589 		}
1590 		mutex_exit(&cpu_node_lock);
1591 	}
1592 
1593 	/*
1594 	 * create a child node for cpu identified as 'cpu_id'
1595 	 */
1596 	ndi_devi_enter(cpu_nex_devi);
1597 	dip = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, -1);
1598 	if (dip == NULL) {
1599 		cmn_err(CE_CONT,
1600 		    "?failed to create device node for cpu%d.\n", cp->cpu_id);
1601 		rv = PSM_FAILURE;
1602 	} else {
1603 		*dipp = dip;
1604 		(void) ndi_hold_devi(dip);
1605 		rv = PSM_SUCCESS;
1606 	}
1607 	ndi_devi_exit(cpu_nex_devi);
1608 
1609 	return (rv);
1610 }
1611 
1612 /*
1613  * Create cpu device node in device tree and online it.
1614  * Return created dip with reference count held if requested.
1615  */
1616 int
mach_cpu_create_device_node(struct cpu * cp,dev_info_t ** dipp)1617 mach_cpu_create_device_node(struct cpu *cp, dev_info_t **dipp)
1618 {
1619 	int rv;
1620 	dev_info_t *dip = NULL;
1621 
1622 	ASSERT(psm_cpu_create_devinfo != NULL);
1623 	rv = psm_cpu_create_devinfo(cp, &dip);
1624 	if (rv == PSM_SUCCESS) {
1625 		cpuid_set_cpu_properties(dip, cp->cpu_id, cp->cpu_m.mcpu_cpi);
1626 		/* Recursively attach driver for parent nexus device. */
1627 		if (i_ddi_attach_node_hierarchy(ddi_get_parent(dip)) ==
1628 		    DDI_SUCCESS) {
1629 			/* Configure cpu itself and descendants. */
1630 			(void) ndi_devi_online(dip,
1631 			    NDI_ONLINE_ATTACH | NDI_CONFIG);
1632 		}
1633 		if (dipp != NULL) {
1634 			*dipp = dip;
1635 		} else {
1636 			(void) ndi_rele_devi(dip);
1637 		}
1638 	}
1639 
1640 	return (rv);
1641 }
1642 
1643 /*
1644  * The dipp contains one of following values on return:
1645  * - NULL if no device node found
1646  * - pointer to device node if found
1647  */
1648 int
mach_cpu_get_device_node(struct cpu * cp,dev_info_t ** dipp)1649 mach_cpu_get_device_node(struct cpu *cp, dev_info_t **dipp)
1650 {
1651 	*dipp = NULL;
1652 	if (psm_cpu_get_devinfo != NULL) {
1653 		if (psm_cpu_get_devinfo(cp, dipp) == PSM_SUCCESS) {
1654 			return (PSM_SUCCESS);
1655 		}
1656 	}
1657 
1658 	return (PSM_FAILURE);
1659 }
1660 
1661 /*ARGSUSED*/
1662 static int
mach_translate_irq(dev_info_t * dip,int irqno)1663 mach_translate_irq(dev_info_t *dip, int irqno)
1664 {
1665 	return (irqno);	/* default to NO translation */
1666 }
1667 
1668 static void
mach_notify_error(int level,char * errmsg)1669 mach_notify_error(int level, char *errmsg)
1670 {
1671 	/*
1672 	 * SL_FATAL is pass in once panicstr is set, deliver it
1673 	 * as CE_PANIC.  Also, translate SL_ codes back to CE_
1674 	 * codes for the psmi handler
1675 	 */
1676 	if (level & SL_FATAL)
1677 		(*notify_error)(CE_PANIC, errmsg);
1678 	else if (level & SL_WARN)
1679 		(*notify_error)(CE_WARN, errmsg);
1680 	else if (level & SL_NOTE)
1681 		(*notify_error)(CE_NOTE, errmsg);
1682 	else if (level & SL_CONSOLE)
1683 		(*notify_error)(CE_CONT, errmsg);
1684 }
1685 
1686 /*
1687  * It provides the default basic intr_ops interface for the new DDI
1688  * interrupt framework if the PSM doesn't have one.
1689  *
1690  * Input:
1691  * dip     - pointer to the dev_info structure of the requested device
1692  * hdlp    - pointer to the internal interrupt handle structure for the
1693  *	     requested interrupt
1694  * intr_op - opcode for this call
1695  * result  - pointer to the integer that will hold the result to be
1696  *	     passed back if return value is PSM_SUCCESS
1697  *
1698  * Output:
1699  * return value is either PSM_SUCCESS or PSM_FAILURE
1700  */
1701 static int
mach_intr_ops(dev_info_t * dip,ddi_intr_handle_impl_t * hdlp,psm_intr_op_t intr_op,int * result)1702 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
1703     psm_intr_op_t intr_op, int *result)
1704 {
1705 	struct intrspec *ispec;
1706 
1707 	switch (intr_op) {
1708 	case PSM_INTR_OP_CHECK_MSI:
1709 		*result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
1710 		    DDI_INTR_TYPE_MSIX);
1711 		break;
1712 	case PSM_INTR_OP_ALLOC_VECTORS:
1713 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1714 			*result = 1;
1715 		else
1716 			*result = 0;
1717 		break;
1718 	case PSM_INTR_OP_FREE_VECTORS:
1719 		break;
1720 	case PSM_INTR_OP_NAVAIL_VECTORS:
1721 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1722 			*result = 1;
1723 		else
1724 			*result = 0;
1725 		break;
1726 	case PSM_INTR_OP_XLATE_VECTOR:
1727 		ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
1728 		*result = psm_translate_irq(dip, ispec->intrspec_vec);
1729 		break;
1730 	case PSM_INTR_OP_GET_CAP:
1731 		*result = 0;
1732 		break;
1733 	case PSM_INTR_OP_GET_PENDING:
1734 	case PSM_INTR_OP_CLEAR_MASK:
1735 	case PSM_INTR_OP_SET_MASK:
1736 	case PSM_INTR_OP_GET_SHARED:
1737 	case PSM_INTR_OP_SET_PRI:
1738 	case PSM_INTR_OP_SET_CAP:
1739 	case PSM_INTR_OP_SET_CPU:
1740 	case PSM_INTR_OP_GET_INTR:
1741 	default:
1742 		return (PSM_FAILURE);
1743 	}
1744 	return (PSM_SUCCESS);
1745 }
1746 /*
1747  * Return 1 if CMT load balancing policies should be
1748  * implemented across instances of the specified hardware
1749  * sharing relationship.
1750  */
1751 int
pg_cmt_load_bal_hw(pghw_type_t hw)1752 pg_cmt_load_bal_hw(pghw_type_t hw)
1753 {
1754 	if (hw == PGHW_IPIPE ||
1755 	    hw == PGHW_FPU ||
1756 	    hw == PGHW_PROCNODE ||
1757 	    hw == PGHW_CHIP)
1758 		return (1);
1759 	else
1760 		return (0);
1761 }
1762 /*
1763  * Return 1 if thread affinity polices should be implemented
1764  * for instances of the specifed hardware sharing relationship.
1765  */
1766 int
pg_cmt_affinity_hw(pghw_type_t hw)1767 pg_cmt_affinity_hw(pghw_type_t hw)
1768 {
1769 	if (hw == PGHW_CACHE)
1770 		return (1);
1771 	else
1772 		return (0);
1773 }
1774 
1775 /*
1776  * Return number of counter events requested to measure hardware capacity and
1777  * utilization and setup CPC requests for specified CPU as needed
1778  *
1779  * May return 0 when platform or processor specific code knows that no CPC
1780  * events should be programmed on this CPU or -1 when platform or processor
1781  * specific code doesn't know which counter events are best to use and common
1782  * code should decide for itself
1783  */
1784 int
1785 /* LINTED E_FUNC_ARG_UNUSED */
cu_plat_cpc_init(cpu_t * cp,kcpc_request_list_t * reqs,int nreqs)1786 cu_plat_cpc_init(cpu_t *cp, kcpc_request_list_t *reqs, int nreqs)
1787 {
1788 	const char	*impl_name;
1789 
1790 	/*
1791 	 * Return error if pcbe_ops not set
1792 	 */
1793 	if (pcbe_ops == NULL)
1794 		return (-1);
1795 
1796 	/*
1797 	 * Return that no CPC events should be programmed on hyperthreaded
1798 	 * Pentium 4 and return error for all other x86 processors to tell
1799 	 * common code to decide what counter events to program on those CPUs
1800 	 * for measuring hardware capacity and utilization
1801 	 */
1802 	impl_name = pcbe_ops->pcbe_impl_name();
1803 	if (impl_name != NULL && strcmp(impl_name, PCBE_IMPL_NAME_P4HT) == 0)
1804 		return (0);
1805 	else
1806 		return (-1);
1807 }
1808