xref: /titanic_51/usr/src/uts/common/os/cpu.c (revision 25cf1a301a396c38e8adf52c15f537b80d2483f7)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
589c0ae93Scindi  * Common Development and Distribution License (the "License").
689c0ae93Scindi  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
228949bcd6Sandrei  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
297c478bd9Sstevel@tonic-gate  * Architecture-independent CPU control functions.
307c478bd9Sstevel@tonic-gate  */
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate #include <sys/types.h>
337c478bd9Sstevel@tonic-gate #include <sys/param.h>
347c478bd9Sstevel@tonic-gate #include <sys/var.h>
357c478bd9Sstevel@tonic-gate #include <sys/thread.h>
367c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
377c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
387c478bd9Sstevel@tonic-gate #include <sys/uadmin.h>
397c478bd9Sstevel@tonic-gate #include <sys/systm.h>
407c478bd9Sstevel@tonic-gate #include <sys/errno.h>
417c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
427c478bd9Sstevel@tonic-gate #include <sys/procset.h>
437c478bd9Sstevel@tonic-gate #include <sys/processor.h>
447c478bd9Sstevel@tonic-gate #include <sys/debug.h>
457c478bd9Sstevel@tonic-gate #include <sys/cpupart.h>
467c478bd9Sstevel@tonic-gate #include <sys/lgrp.h>
477c478bd9Sstevel@tonic-gate #include <sys/pset.h>
487c478bd9Sstevel@tonic-gate #include <sys/chip.h>
497c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
507c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h>	/* to set per-cpu kmem_cache offset */
517c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
527c478bd9Sstevel@tonic-gate #include <sys/callb.h>
537c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
547c478bd9Sstevel@tonic-gate #include <sys/cyclic.h>
557c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
567c478bd9Sstevel@tonic-gate #include <sys/nvpair.h>
577c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h>
587c478bd9Sstevel@tonic-gate #include <sys/msacct.h>
597c478bd9Sstevel@tonic-gate #include <sys/time.h>
607c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
617aec1d6eScindi #if defined(__i386) || defined(__amd64)
627aec1d6eScindi #include <sys/x86_archext.h>
637aec1d6eScindi #endif
647c478bd9Sstevel@tonic-gate 
657c478bd9Sstevel@tonic-gate extern int	mp_cpu_start(cpu_t *);
667c478bd9Sstevel@tonic-gate extern int	mp_cpu_stop(cpu_t *);
677c478bd9Sstevel@tonic-gate extern int	mp_cpu_poweron(cpu_t *);
687c478bd9Sstevel@tonic-gate extern int	mp_cpu_poweroff(cpu_t *);
697c478bd9Sstevel@tonic-gate extern int	mp_cpu_configure(int);
707c478bd9Sstevel@tonic-gate extern int	mp_cpu_unconfigure(int);
717c478bd9Sstevel@tonic-gate extern void	mp_cpu_faulted_enter(cpu_t *);
727c478bd9Sstevel@tonic-gate extern void	mp_cpu_faulted_exit(cpu_t *);
737c478bd9Sstevel@tonic-gate 
747c478bd9Sstevel@tonic-gate extern int cmp_cpu_to_chip(processorid_t cpuid);
757c478bd9Sstevel@tonic-gate #ifdef __sparcv9
767c478bd9Sstevel@tonic-gate extern char *cpu_fru_fmri(cpu_t *cp);
777c478bd9Sstevel@tonic-gate #endif
787c478bd9Sstevel@tonic-gate 
797c478bd9Sstevel@tonic-gate static void cpu_add_active_internal(cpu_t *cp);
807c478bd9Sstevel@tonic-gate static void cpu_remove_active(cpu_t *cp);
817c478bd9Sstevel@tonic-gate static void cpu_info_kstat_create(cpu_t *cp);
827c478bd9Sstevel@tonic-gate static void cpu_info_kstat_destroy(cpu_t *cp);
837c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_create(cpu_t *cp);
847c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_destroy(cpu_t *cp);
857c478bd9Sstevel@tonic-gate 
867c478bd9Sstevel@tonic-gate static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw);
877c478bd9Sstevel@tonic-gate static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw);
887c478bd9Sstevel@tonic-gate static int cpu_stat_ks_update(kstat_t *ksp, int rw);
897c478bd9Sstevel@tonic-gate static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t);
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate /*
927c478bd9Sstevel@tonic-gate  * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active,
937c478bd9Sstevel@tonic-gate  * and dispatch queue reallocations.  The lock ordering with respect to
947c478bd9Sstevel@tonic-gate  * related locks is:
957c478bd9Sstevel@tonic-gate  *
967c478bd9Sstevel@tonic-gate  *	cpu_lock --> thread_free_lock  --->  p_lock  --->  thread_lock()
977c478bd9Sstevel@tonic-gate  *
987c478bd9Sstevel@tonic-gate  * Warning:  Certain sections of code do not use the cpu_lock when
997c478bd9Sstevel@tonic-gate  * traversing the cpu_list (e.g. mutex_vector_enter(), clock()).  Since
1007c478bd9Sstevel@tonic-gate  * all cpus are paused during modifications to this list, a solution
1017c478bd9Sstevel@tonic-gate  * to protect the list is too either disable kernel preemption while
1027c478bd9Sstevel@tonic-gate  * walking the list, *or* recheck the cpu_next pointer at each
1037c478bd9Sstevel@tonic-gate  * iteration in the loop.  Note that in no cases can any cached
1047c478bd9Sstevel@tonic-gate  * copies of the cpu pointers be kept as they may become invalid.
1057c478bd9Sstevel@tonic-gate  */
1067c478bd9Sstevel@tonic-gate kmutex_t	cpu_lock;
1077c478bd9Sstevel@tonic-gate cpu_t		*cpu_list;		/* list of all CPUs */
1087c478bd9Sstevel@tonic-gate cpu_t		*cpu_active;		/* list of active CPUs */
1097c478bd9Sstevel@tonic-gate static cpuset_t	cpu_available;		/* set of available CPUs */
1107c478bd9Sstevel@tonic-gate cpuset_t	cpu_seqid_inuse;	/* which cpu_seqids are in use */
1117c478bd9Sstevel@tonic-gate 
1127c478bd9Sstevel@tonic-gate /*
1137c478bd9Sstevel@tonic-gate  * max_ncpus keeps the max cpus the system can have. Initially
1147c478bd9Sstevel@tonic-gate  * it's NCPU, but since most archs scan the devtree for cpus
1157c478bd9Sstevel@tonic-gate  * fairly early on during boot, the real max can be known before
1167c478bd9Sstevel@tonic-gate  * ncpus is set (useful for early NCPU based allocations).
1177c478bd9Sstevel@tonic-gate  */
1187c478bd9Sstevel@tonic-gate int max_ncpus = NCPU;
1197c478bd9Sstevel@tonic-gate /*
1207c478bd9Sstevel@tonic-gate  * platforms that set max_ncpus to maxiumum number of cpus that can be
1217c478bd9Sstevel@tonic-gate  * dynamically added will set boot_max_ncpus to the number of cpus found
1227c478bd9Sstevel@tonic-gate  * at device tree scan time during boot.
1237c478bd9Sstevel@tonic-gate  */
1247c478bd9Sstevel@tonic-gate int boot_max_ncpus = -1;
1257c478bd9Sstevel@tonic-gate /*
1267c478bd9Sstevel@tonic-gate  * Maximum possible CPU id.  This can never be >= NCPU since NCPU is
1277c478bd9Sstevel@tonic-gate  * used to size arrays that are indexed by CPU id.
1287c478bd9Sstevel@tonic-gate  */
1297c478bd9Sstevel@tonic-gate processorid_t max_cpuid = NCPU - 1;
1307c478bd9Sstevel@tonic-gate 
1317c478bd9Sstevel@tonic-gate int ncpus = 1;
1327c478bd9Sstevel@tonic-gate int ncpus_online = 1;
1337c478bd9Sstevel@tonic-gate 
1347c478bd9Sstevel@tonic-gate /*
1357c478bd9Sstevel@tonic-gate  * CPU that we're trying to offline.  Protected by cpu_lock.
1367c478bd9Sstevel@tonic-gate  */
1377c478bd9Sstevel@tonic-gate cpu_t *cpu_inmotion;
1387c478bd9Sstevel@tonic-gate 
1397c478bd9Sstevel@tonic-gate /*
1407c478bd9Sstevel@tonic-gate  * Can be raised to suppress further weakbinding, which are instead
1417c478bd9Sstevel@tonic-gate  * satisfied by disabling preemption.  Must be raised/lowered under cpu_lock,
1427c478bd9Sstevel@tonic-gate  * while individual thread weakbinding synchronisation is done under thread
1437c478bd9Sstevel@tonic-gate  * lock.
1447c478bd9Sstevel@tonic-gate  */
1457c478bd9Sstevel@tonic-gate int weakbindingbarrier;
1467c478bd9Sstevel@tonic-gate 
1477c478bd9Sstevel@tonic-gate /*
1487c478bd9Sstevel@tonic-gate  * values for safe_list.  Pause state that CPUs are in.
1497c478bd9Sstevel@tonic-gate  */
1507c478bd9Sstevel@tonic-gate #define	PAUSE_IDLE	0		/* normal state */
1517c478bd9Sstevel@tonic-gate #define	PAUSE_READY	1		/* paused thread ready to spl */
1527c478bd9Sstevel@tonic-gate #define	PAUSE_WAIT	2		/* paused thread is spl-ed high */
1537c478bd9Sstevel@tonic-gate #define	PAUSE_DIE	3		/* tell pause thread to leave */
1547c478bd9Sstevel@tonic-gate #define	PAUSE_DEAD	4		/* pause thread has left */
1557c478bd9Sstevel@tonic-gate 
1567c478bd9Sstevel@tonic-gate /*
1577c478bd9Sstevel@tonic-gate  * Variables used in pause_cpus().
1587c478bd9Sstevel@tonic-gate  */
1597c478bd9Sstevel@tonic-gate static volatile char safe_list[NCPU];
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate static struct _cpu_pause_info {
1627c478bd9Sstevel@tonic-gate 	int		cp_spl;		/* spl saved in pause_cpus() */
1637c478bd9Sstevel@tonic-gate 	volatile int	cp_go;		/* Go signal sent after all ready */
1647c478bd9Sstevel@tonic-gate 	int		cp_count;	/* # of CPUs to pause */
1657c478bd9Sstevel@tonic-gate 	ksema_t		cp_sem;		/* synch pause_cpus & cpu_pause */
1667c478bd9Sstevel@tonic-gate 	kthread_id_t	cp_paused;
1677c478bd9Sstevel@tonic-gate } cpu_pause_info;
1687c478bd9Sstevel@tonic-gate 
1697c478bd9Sstevel@tonic-gate static kmutex_t pause_free_mutex;
1707c478bd9Sstevel@tonic-gate static kcondvar_t pause_free_cv;
1717c478bd9Sstevel@tonic-gate 
1727c478bd9Sstevel@tonic-gate static struct cpu_sys_stats_ks_data {
1737c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_ticks_idle;
1747c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_ticks_user;
1757c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_ticks_kernel;
1767c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_ticks_wait;
1777c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_nsec_idle;
1787c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_nsec_user;
1797c478bd9Sstevel@tonic-gate 	kstat_named_t cpu_nsec_kernel;
1807c478bd9Sstevel@tonic-gate 	kstat_named_t wait_ticks_io;
1817c478bd9Sstevel@tonic-gate 	kstat_named_t bread;
1827c478bd9Sstevel@tonic-gate 	kstat_named_t bwrite;
1837c478bd9Sstevel@tonic-gate 	kstat_named_t lread;
1847c478bd9Sstevel@tonic-gate 	kstat_named_t lwrite;
1857c478bd9Sstevel@tonic-gate 	kstat_named_t phread;
1867c478bd9Sstevel@tonic-gate 	kstat_named_t phwrite;
1877c478bd9Sstevel@tonic-gate 	kstat_named_t pswitch;
1887c478bd9Sstevel@tonic-gate 	kstat_named_t trap;
1897c478bd9Sstevel@tonic-gate 	kstat_named_t intr;
1907c478bd9Sstevel@tonic-gate 	kstat_named_t syscall;
1917c478bd9Sstevel@tonic-gate 	kstat_named_t sysread;
1927c478bd9Sstevel@tonic-gate 	kstat_named_t syswrite;
1937c478bd9Sstevel@tonic-gate 	kstat_named_t sysfork;
1947c478bd9Sstevel@tonic-gate 	kstat_named_t sysvfork;
1957c478bd9Sstevel@tonic-gate 	kstat_named_t sysexec;
1967c478bd9Sstevel@tonic-gate 	kstat_named_t readch;
1977c478bd9Sstevel@tonic-gate 	kstat_named_t writech;
1987c478bd9Sstevel@tonic-gate 	kstat_named_t rcvint;
1997c478bd9Sstevel@tonic-gate 	kstat_named_t xmtint;
2007c478bd9Sstevel@tonic-gate 	kstat_named_t mdmint;
2017c478bd9Sstevel@tonic-gate 	kstat_named_t rawch;
2027c478bd9Sstevel@tonic-gate 	kstat_named_t canch;
2037c478bd9Sstevel@tonic-gate 	kstat_named_t outch;
2047c478bd9Sstevel@tonic-gate 	kstat_named_t msg;
2057c478bd9Sstevel@tonic-gate 	kstat_named_t sema;
2067c478bd9Sstevel@tonic-gate 	kstat_named_t namei;
2077c478bd9Sstevel@tonic-gate 	kstat_named_t ufsiget;
2087c478bd9Sstevel@tonic-gate 	kstat_named_t ufsdirblk;
2097c478bd9Sstevel@tonic-gate 	kstat_named_t ufsipage;
2107c478bd9Sstevel@tonic-gate 	kstat_named_t ufsinopage;
2117c478bd9Sstevel@tonic-gate 	kstat_named_t procovf;
2127c478bd9Sstevel@tonic-gate 	kstat_named_t intrthread;
2137c478bd9Sstevel@tonic-gate 	kstat_named_t intrblk;
2147c478bd9Sstevel@tonic-gate 	kstat_named_t intrunpin;
2157c478bd9Sstevel@tonic-gate 	kstat_named_t idlethread;
2167c478bd9Sstevel@tonic-gate 	kstat_named_t inv_swtch;
2177c478bd9Sstevel@tonic-gate 	kstat_named_t nthreads;
2187c478bd9Sstevel@tonic-gate 	kstat_named_t cpumigrate;
2197c478bd9Sstevel@tonic-gate 	kstat_named_t xcalls;
2207c478bd9Sstevel@tonic-gate 	kstat_named_t mutex_adenters;
2217c478bd9Sstevel@tonic-gate 	kstat_named_t rw_rdfails;
2227c478bd9Sstevel@tonic-gate 	kstat_named_t rw_wrfails;
2237c478bd9Sstevel@tonic-gate 	kstat_named_t modload;
2247c478bd9Sstevel@tonic-gate 	kstat_named_t modunload;
2257c478bd9Sstevel@tonic-gate 	kstat_named_t bawrite;
2267c478bd9Sstevel@tonic-gate 	kstat_named_t iowait;
2277c478bd9Sstevel@tonic-gate } cpu_sys_stats_ks_data_template = {
2287c478bd9Sstevel@tonic-gate 	{ "cpu_ticks_idle", 	KSTAT_DATA_UINT64 },
2297c478bd9Sstevel@tonic-gate 	{ "cpu_ticks_user", 	KSTAT_DATA_UINT64 },
2307c478bd9Sstevel@tonic-gate 	{ "cpu_ticks_kernel", 	KSTAT_DATA_UINT64 },
2317c478bd9Sstevel@tonic-gate 	{ "cpu_ticks_wait", 	KSTAT_DATA_UINT64 },
2327c478bd9Sstevel@tonic-gate 	{ "cpu_nsec_idle",	KSTAT_DATA_UINT64 },
2337c478bd9Sstevel@tonic-gate 	{ "cpu_nsec_user",	KSTAT_DATA_UINT64 },
2347c478bd9Sstevel@tonic-gate 	{ "cpu_nsec_kernel",	KSTAT_DATA_UINT64 },
2357c478bd9Sstevel@tonic-gate 	{ "wait_ticks_io", 	KSTAT_DATA_UINT64 },
2367c478bd9Sstevel@tonic-gate 	{ "bread", 		KSTAT_DATA_UINT64 },
2377c478bd9Sstevel@tonic-gate 	{ "bwrite", 		KSTAT_DATA_UINT64 },
2387c478bd9Sstevel@tonic-gate 	{ "lread", 		KSTAT_DATA_UINT64 },
2397c478bd9Sstevel@tonic-gate 	{ "lwrite", 		KSTAT_DATA_UINT64 },
2407c478bd9Sstevel@tonic-gate 	{ "phread", 		KSTAT_DATA_UINT64 },
2417c478bd9Sstevel@tonic-gate 	{ "phwrite", 		KSTAT_DATA_UINT64 },
2427c478bd9Sstevel@tonic-gate 	{ "pswitch", 		KSTAT_DATA_UINT64 },
2437c478bd9Sstevel@tonic-gate 	{ "trap", 		KSTAT_DATA_UINT64 },
2447c478bd9Sstevel@tonic-gate 	{ "intr", 		KSTAT_DATA_UINT64 },
2457c478bd9Sstevel@tonic-gate 	{ "syscall", 		KSTAT_DATA_UINT64 },
2467c478bd9Sstevel@tonic-gate 	{ "sysread", 		KSTAT_DATA_UINT64 },
2477c478bd9Sstevel@tonic-gate 	{ "syswrite", 		KSTAT_DATA_UINT64 },
2487c478bd9Sstevel@tonic-gate 	{ "sysfork", 		KSTAT_DATA_UINT64 },
2497c478bd9Sstevel@tonic-gate 	{ "sysvfork", 		KSTAT_DATA_UINT64 },
2507c478bd9Sstevel@tonic-gate 	{ "sysexec", 		KSTAT_DATA_UINT64 },
2517c478bd9Sstevel@tonic-gate 	{ "readch", 		KSTAT_DATA_UINT64 },
2527c478bd9Sstevel@tonic-gate 	{ "writech", 		KSTAT_DATA_UINT64 },
2537c478bd9Sstevel@tonic-gate 	{ "rcvint", 		KSTAT_DATA_UINT64 },
2547c478bd9Sstevel@tonic-gate 	{ "xmtint", 		KSTAT_DATA_UINT64 },
2557c478bd9Sstevel@tonic-gate 	{ "mdmint", 		KSTAT_DATA_UINT64 },
2567c478bd9Sstevel@tonic-gate 	{ "rawch", 		KSTAT_DATA_UINT64 },
2577c478bd9Sstevel@tonic-gate 	{ "canch", 		KSTAT_DATA_UINT64 },
2587c478bd9Sstevel@tonic-gate 	{ "outch", 		KSTAT_DATA_UINT64 },
2597c478bd9Sstevel@tonic-gate 	{ "msg", 		KSTAT_DATA_UINT64 },
2607c478bd9Sstevel@tonic-gate 	{ "sema", 		KSTAT_DATA_UINT64 },
2617c478bd9Sstevel@tonic-gate 	{ "namei", 		KSTAT_DATA_UINT64 },
2627c478bd9Sstevel@tonic-gate 	{ "ufsiget", 		KSTAT_DATA_UINT64 },
2637c478bd9Sstevel@tonic-gate 	{ "ufsdirblk", 		KSTAT_DATA_UINT64 },
2647c478bd9Sstevel@tonic-gate 	{ "ufsipage", 		KSTAT_DATA_UINT64 },
2657c478bd9Sstevel@tonic-gate 	{ "ufsinopage", 	KSTAT_DATA_UINT64 },
2667c478bd9Sstevel@tonic-gate 	{ "procovf", 		KSTAT_DATA_UINT64 },
2677c478bd9Sstevel@tonic-gate 	{ "intrthread", 	KSTAT_DATA_UINT64 },
2687c478bd9Sstevel@tonic-gate 	{ "intrblk", 		KSTAT_DATA_UINT64 },
2697c478bd9Sstevel@tonic-gate 	{ "intrunpin",		KSTAT_DATA_UINT64 },
2707c478bd9Sstevel@tonic-gate 	{ "idlethread", 	KSTAT_DATA_UINT64 },
2717c478bd9Sstevel@tonic-gate 	{ "inv_swtch", 		KSTAT_DATA_UINT64 },
2727c478bd9Sstevel@tonic-gate 	{ "nthreads", 		KSTAT_DATA_UINT64 },
2737c478bd9Sstevel@tonic-gate 	{ "cpumigrate", 	KSTAT_DATA_UINT64 },
2747c478bd9Sstevel@tonic-gate 	{ "xcalls", 		KSTAT_DATA_UINT64 },
2757c478bd9Sstevel@tonic-gate 	{ "mutex_adenters", 	KSTAT_DATA_UINT64 },
2767c478bd9Sstevel@tonic-gate 	{ "rw_rdfails", 	KSTAT_DATA_UINT64 },
2777c478bd9Sstevel@tonic-gate 	{ "rw_wrfails", 	KSTAT_DATA_UINT64 },
2787c478bd9Sstevel@tonic-gate 	{ "modload", 		KSTAT_DATA_UINT64 },
2797c478bd9Sstevel@tonic-gate 	{ "modunload", 		KSTAT_DATA_UINT64 },
2807c478bd9Sstevel@tonic-gate 	{ "bawrite", 		KSTAT_DATA_UINT64 },
2817c478bd9Sstevel@tonic-gate 	{ "iowait",		KSTAT_DATA_UINT64 },
2827c478bd9Sstevel@tonic-gate };
2837c478bd9Sstevel@tonic-gate 
2847c478bd9Sstevel@tonic-gate static struct cpu_vm_stats_ks_data {
2857c478bd9Sstevel@tonic-gate 	kstat_named_t pgrec;
2867c478bd9Sstevel@tonic-gate 	kstat_named_t pgfrec;
2877c478bd9Sstevel@tonic-gate 	kstat_named_t pgin;
2887c478bd9Sstevel@tonic-gate 	kstat_named_t pgpgin;
2897c478bd9Sstevel@tonic-gate 	kstat_named_t pgout;
2907c478bd9Sstevel@tonic-gate 	kstat_named_t pgpgout;
2917c478bd9Sstevel@tonic-gate 	kstat_named_t swapin;
2927c478bd9Sstevel@tonic-gate 	kstat_named_t pgswapin;
2937c478bd9Sstevel@tonic-gate 	kstat_named_t swapout;
2947c478bd9Sstevel@tonic-gate 	kstat_named_t pgswapout;
2957c478bd9Sstevel@tonic-gate 	kstat_named_t zfod;
2967c478bd9Sstevel@tonic-gate 	kstat_named_t dfree;
2977c478bd9Sstevel@tonic-gate 	kstat_named_t scan;
2987c478bd9Sstevel@tonic-gate 	kstat_named_t rev;
2997c478bd9Sstevel@tonic-gate 	kstat_named_t hat_fault;
3007c478bd9Sstevel@tonic-gate 	kstat_named_t as_fault;
3017c478bd9Sstevel@tonic-gate 	kstat_named_t maj_fault;
3027c478bd9Sstevel@tonic-gate 	kstat_named_t cow_fault;
3037c478bd9Sstevel@tonic-gate 	kstat_named_t prot_fault;
3047c478bd9Sstevel@tonic-gate 	kstat_named_t softlock;
3057c478bd9Sstevel@tonic-gate 	kstat_named_t kernel_asflt;
3067c478bd9Sstevel@tonic-gate 	kstat_named_t pgrrun;
3077c478bd9Sstevel@tonic-gate 	kstat_named_t execpgin;
3087c478bd9Sstevel@tonic-gate 	kstat_named_t execpgout;
3097c478bd9Sstevel@tonic-gate 	kstat_named_t execfree;
3107c478bd9Sstevel@tonic-gate 	kstat_named_t anonpgin;
3117c478bd9Sstevel@tonic-gate 	kstat_named_t anonpgout;
3127c478bd9Sstevel@tonic-gate 	kstat_named_t anonfree;
3137c478bd9Sstevel@tonic-gate 	kstat_named_t fspgin;
3147c478bd9Sstevel@tonic-gate 	kstat_named_t fspgout;
3157c478bd9Sstevel@tonic-gate 	kstat_named_t fsfree;
3167c478bd9Sstevel@tonic-gate } cpu_vm_stats_ks_data_template = {
3177c478bd9Sstevel@tonic-gate 	{ "pgrec",		KSTAT_DATA_UINT64 },
3187c478bd9Sstevel@tonic-gate 	{ "pgfrec",		KSTAT_DATA_UINT64 },
3197c478bd9Sstevel@tonic-gate 	{ "pgin",		KSTAT_DATA_UINT64 },
3207c478bd9Sstevel@tonic-gate 	{ "pgpgin",		KSTAT_DATA_UINT64 },
3217c478bd9Sstevel@tonic-gate 	{ "pgout",		KSTAT_DATA_UINT64 },
3227c478bd9Sstevel@tonic-gate 	{ "pgpgout",		KSTAT_DATA_UINT64 },
3237c478bd9Sstevel@tonic-gate 	{ "swapin",		KSTAT_DATA_UINT64 },
3247c478bd9Sstevel@tonic-gate 	{ "pgswapin",		KSTAT_DATA_UINT64 },
3257c478bd9Sstevel@tonic-gate 	{ "swapout",		KSTAT_DATA_UINT64 },
3267c478bd9Sstevel@tonic-gate 	{ "pgswapout",		KSTAT_DATA_UINT64 },
3277c478bd9Sstevel@tonic-gate 	{ "zfod",		KSTAT_DATA_UINT64 },
3287c478bd9Sstevel@tonic-gate 	{ "dfree",		KSTAT_DATA_UINT64 },
3297c478bd9Sstevel@tonic-gate 	{ "scan",		KSTAT_DATA_UINT64 },
3307c478bd9Sstevel@tonic-gate 	{ "rev",		KSTAT_DATA_UINT64 },
3317c478bd9Sstevel@tonic-gate 	{ "hat_fault",		KSTAT_DATA_UINT64 },
3327c478bd9Sstevel@tonic-gate 	{ "as_fault",		KSTAT_DATA_UINT64 },
3337c478bd9Sstevel@tonic-gate 	{ "maj_fault",		KSTAT_DATA_UINT64 },
3347c478bd9Sstevel@tonic-gate 	{ "cow_fault",		KSTAT_DATA_UINT64 },
3357c478bd9Sstevel@tonic-gate 	{ "prot_fault",		KSTAT_DATA_UINT64 },
3367c478bd9Sstevel@tonic-gate 	{ "softlock",		KSTAT_DATA_UINT64 },
3377c478bd9Sstevel@tonic-gate 	{ "kernel_asflt",	KSTAT_DATA_UINT64 },
3387c478bd9Sstevel@tonic-gate 	{ "pgrrun",		KSTAT_DATA_UINT64 },
3397c478bd9Sstevel@tonic-gate 	{ "execpgin",		KSTAT_DATA_UINT64 },
3407c478bd9Sstevel@tonic-gate 	{ "execpgout",		KSTAT_DATA_UINT64 },
3417c478bd9Sstevel@tonic-gate 	{ "execfree",		KSTAT_DATA_UINT64 },
3427c478bd9Sstevel@tonic-gate 	{ "anonpgin",		KSTAT_DATA_UINT64 },
3437c478bd9Sstevel@tonic-gate 	{ "anonpgout",		KSTAT_DATA_UINT64 },
3447c478bd9Sstevel@tonic-gate 	{ "anonfree",		KSTAT_DATA_UINT64 },
3457c478bd9Sstevel@tonic-gate 	{ "fspgin",		KSTAT_DATA_UINT64 },
3467c478bd9Sstevel@tonic-gate 	{ "fspgout",		KSTAT_DATA_UINT64 },
3477c478bd9Sstevel@tonic-gate 	{ "fsfree",		KSTAT_DATA_UINT64 },
3487c478bd9Sstevel@tonic-gate };
3497c478bd9Sstevel@tonic-gate 
3507c478bd9Sstevel@tonic-gate /*
3517c478bd9Sstevel@tonic-gate  * Force the specified thread to migrate to the appropriate processor.
3527c478bd9Sstevel@tonic-gate  * Called with thread lock held, returns with it dropped.
3537c478bd9Sstevel@tonic-gate  */
3547c478bd9Sstevel@tonic-gate static void
3557c478bd9Sstevel@tonic-gate force_thread_migrate(kthread_id_t tp)
3567c478bd9Sstevel@tonic-gate {
3577c478bd9Sstevel@tonic-gate 	ASSERT(THREAD_LOCK_HELD(tp));
3587c478bd9Sstevel@tonic-gate 	if (tp == curthread) {
3597c478bd9Sstevel@tonic-gate 		THREAD_TRANSITION(tp);
3607c478bd9Sstevel@tonic-gate 		CL_SETRUN(tp);
3617c478bd9Sstevel@tonic-gate 		thread_unlock_nopreempt(tp);
3627c478bd9Sstevel@tonic-gate 		swtch();
3637c478bd9Sstevel@tonic-gate 	} else {
3647c478bd9Sstevel@tonic-gate 		if (tp->t_state == TS_ONPROC) {
3657c478bd9Sstevel@tonic-gate 			cpu_surrender(tp);
3667c478bd9Sstevel@tonic-gate 		} else if (tp->t_state == TS_RUN) {
3677c478bd9Sstevel@tonic-gate 			(void) dispdeq(tp);
3687c478bd9Sstevel@tonic-gate 			setbackdq(tp);
3697c478bd9Sstevel@tonic-gate 		}
3707c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
3717c478bd9Sstevel@tonic-gate 	}
3727c478bd9Sstevel@tonic-gate }
3737c478bd9Sstevel@tonic-gate 
3747c478bd9Sstevel@tonic-gate /*
3757c478bd9Sstevel@tonic-gate  * Set affinity for a specified CPU.
3767c478bd9Sstevel@tonic-gate  * A reference count is incremented and the affinity is held until the
3777c478bd9Sstevel@tonic-gate  * reference count is decremented to zero by thread_affinity_clear().
3787c478bd9Sstevel@tonic-gate  * This is so regions of code requiring affinity can be nested.
3797c478bd9Sstevel@tonic-gate  * Caller needs to ensure that cpu_id remains valid, which can be
3807c478bd9Sstevel@tonic-gate  * done by holding cpu_lock across this call, unless the caller
3817c478bd9Sstevel@tonic-gate  * specifies CPU_CURRENT in which case the cpu_lock will be acquired
3827c478bd9Sstevel@tonic-gate  * by thread_affinity_set and CPU->cpu_id will be the target CPU.
3837c478bd9Sstevel@tonic-gate  */
3847c478bd9Sstevel@tonic-gate void
3857c478bd9Sstevel@tonic-gate thread_affinity_set(kthread_id_t t, int cpu_id)
3867c478bd9Sstevel@tonic-gate {
3877c478bd9Sstevel@tonic-gate 	cpu_t		*cp;
3887c478bd9Sstevel@tonic-gate 	int		c;
3897c478bd9Sstevel@tonic-gate 
3907c478bd9Sstevel@tonic-gate 	ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
3917c478bd9Sstevel@tonic-gate 
3927c478bd9Sstevel@tonic-gate 	if ((c = cpu_id) == CPU_CURRENT) {
3937c478bd9Sstevel@tonic-gate 		mutex_enter(&cpu_lock);
3947c478bd9Sstevel@tonic-gate 		cpu_id = CPU->cpu_id;
3957c478bd9Sstevel@tonic-gate 	}
3967c478bd9Sstevel@tonic-gate 	/*
3977c478bd9Sstevel@tonic-gate 	 * We should be asserting that cpu_lock is held here, but
3987c478bd9Sstevel@tonic-gate 	 * the NCA code doesn't acquire it.  The following assert
3997c478bd9Sstevel@tonic-gate 	 * should be uncommented when the NCA code is fixed.
4007c478bd9Sstevel@tonic-gate 	 *
4017c478bd9Sstevel@tonic-gate 	 * ASSERT(MUTEX_HELD(&cpu_lock));
4027c478bd9Sstevel@tonic-gate 	 */
4037c478bd9Sstevel@tonic-gate 	ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
4047c478bd9Sstevel@tonic-gate 	cp = cpu[cpu_id];
4057c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);		/* user must provide a good cpu_id */
4067c478bd9Sstevel@tonic-gate 	/*
4077c478bd9Sstevel@tonic-gate 	 * If there is already a hard affinity requested, and this affinity
4087c478bd9Sstevel@tonic-gate 	 * conflicts with that, panic.
4097c478bd9Sstevel@tonic-gate 	 */
4107c478bd9Sstevel@tonic-gate 	thread_lock(t);
4117c478bd9Sstevel@tonic-gate 	if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
4127c478bd9Sstevel@tonic-gate 		panic("affinity_set: setting %p but already bound to %p",
4137c478bd9Sstevel@tonic-gate 		    (void *)cp, (void *)t->t_bound_cpu);
4147c478bd9Sstevel@tonic-gate 	}
4157c478bd9Sstevel@tonic-gate 	t->t_affinitycnt++;
4167c478bd9Sstevel@tonic-gate 	t->t_bound_cpu = cp;
4177c478bd9Sstevel@tonic-gate 
4187c478bd9Sstevel@tonic-gate 	/*
4197c478bd9Sstevel@tonic-gate 	 * Make sure we're running on the right CPU.
4207c478bd9Sstevel@tonic-gate 	 */
4217c478bd9Sstevel@tonic-gate 	if (cp != t->t_cpu || t != curthread) {
4227c478bd9Sstevel@tonic-gate 		force_thread_migrate(t);	/* drops thread lock */
4237c478bd9Sstevel@tonic-gate 	} else {
4247c478bd9Sstevel@tonic-gate 		thread_unlock(t);
4257c478bd9Sstevel@tonic-gate 	}
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate 	if (c == CPU_CURRENT)
4287c478bd9Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
4297c478bd9Sstevel@tonic-gate }
4307c478bd9Sstevel@tonic-gate 
4317c478bd9Sstevel@tonic-gate /*
4327c478bd9Sstevel@tonic-gate  *	Wrapper for backward compatibility.
4337c478bd9Sstevel@tonic-gate  */
4347c478bd9Sstevel@tonic-gate void
4357c478bd9Sstevel@tonic-gate affinity_set(int cpu_id)
4367c478bd9Sstevel@tonic-gate {
4377c478bd9Sstevel@tonic-gate 	thread_affinity_set(curthread, cpu_id);
4387c478bd9Sstevel@tonic-gate }
4397c478bd9Sstevel@tonic-gate 
4407c478bd9Sstevel@tonic-gate /*
4417c478bd9Sstevel@tonic-gate  * Decrement the affinity reservation count and if it becomes zero,
4427c478bd9Sstevel@tonic-gate  * clear the CPU affinity for the current thread, or set it to the user's
4437c478bd9Sstevel@tonic-gate  * software binding request.
4447c478bd9Sstevel@tonic-gate  */
4457c478bd9Sstevel@tonic-gate void
4467c478bd9Sstevel@tonic-gate thread_affinity_clear(kthread_id_t t)
4477c478bd9Sstevel@tonic-gate {
4487c478bd9Sstevel@tonic-gate 	register processorid_t binding;
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate 	thread_lock(t);
4517c478bd9Sstevel@tonic-gate 	if (--t->t_affinitycnt == 0) {
4527c478bd9Sstevel@tonic-gate 		if ((binding = t->t_bind_cpu) == PBIND_NONE) {
4537c478bd9Sstevel@tonic-gate 			/*
4547c478bd9Sstevel@tonic-gate 			 * Adjust disp_max_unbound_pri if necessary.
4557c478bd9Sstevel@tonic-gate 			 */
4567c478bd9Sstevel@tonic-gate 			disp_adjust_unbound_pri(t);
4577c478bd9Sstevel@tonic-gate 			t->t_bound_cpu = NULL;
4587c478bd9Sstevel@tonic-gate 			if (t->t_cpu->cpu_part != t->t_cpupart) {
4597c478bd9Sstevel@tonic-gate 				force_thread_migrate(t);
4607c478bd9Sstevel@tonic-gate 				return;
4617c478bd9Sstevel@tonic-gate 			}
4627c478bd9Sstevel@tonic-gate 		} else {
4637c478bd9Sstevel@tonic-gate 			t->t_bound_cpu = cpu[binding];
4647c478bd9Sstevel@tonic-gate 			/*
4657c478bd9Sstevel@tonic-gate 			 * Make sure the thread is running on the bound CPU.
4667c478bd9Sstevel@tonic-gate 			 */
4677c478bd9Sstevel@tonic-gate 			if (t->t_cpu != t->t_bound_cpu) {
4687c478bd9Sstevel@tonic-gate 				force_thread_migrate(t);
4697c478bd9Sstevel@tonic-gate 				return;		/* already dropped lock */
4707c478bd9Sstevel@tonic-gate 			}
4717c478bd9Sstevel@tonic-gate 		}
4727c478bd9Sstevel@tonic-gate 	}
4737c478bd9Sstevel@tonic-gate 	thread_unlock(t);
4747c478bd9Sstevel@tonic-gate }
4757c478bd9Sstevel@tonic-gate 
4767c478bd9Sstevel@tonic-gate /*
4777c478bd9Sstevel@tonic-gate  * Wrapper for backward compatibility.
4787c478bd9Sstevel@tonic-gate  */
4797c478bd9Sstevel@tonic-gate void
4807c478bd9Sstevel@tonic-gate affinity_clear(void)
4817c478bd9Sstevel@tonic-gate {
4827c478bd9Sstevel@tonic-gate 	thread_affinity_clear(curthread);
4837c478bd9Sstevel@tonic-gate }
4847c478bd9Sstevel@tonic-gate 
4857c478bd9Sstevel@tonic-gate /*
4867c478bd9Sstevel@tonic-gate  * Weak cpu affinity.  Bind to the "current" cpu for short periods
4877c478bd9Sstevel@tonic-gate  * of time during which the thread must not block (but may be preempted).
4887c478bd9Sstevel@tonic-gate  * Use this instead of kpreempt_disable() when it is only "no migration"
4897c478bd9Sstevel@tonic-gate  * rather than "no preemption" semantics that are required - disabling
4907c478bd9Sstevel@tonic-gate  * preemption holds higher priority threads off of cpu and if the
4917c478bd9Sstevel@tonic-gate  * operation that is protected is more than momentary this is not good
4927c478bd9Sstevel@tonic-gate  * for realtime etc.
4937c478bd9Sstevel@tonic-gate  *
4947c478bd9Sstevel@tonic-gate  * Weakly bound threads will not prevent a cpu from being offlined -
4957c478bd9Sstevel@tonic-gate  * we'll only run them on the cpu to which they are weakly bound but
4967c478bd9Sstevel@tonic-gate  * (because they do not block) we'll always be able to move them on to
4977c478bd9Sstevel@tonic-gate  * another cpu at offline time if we give them just a short moment to
4987c478bd9Sstevel@tonic-gate  * run during which they will unbind.  To give a cpu a chance of offlining,
4997c478bd9Sstevel@tonic-gate  * however, we require a barrier to weak bindings that may be raised for a
5007c478bd9Sstevel@tonic-gate  * given cpu (offline/move code may set this and then wait a short time for
5017c478bd9Sstevel@tonic-gate  * existing weak bindings to drop); the cpu_inmotion pointer is that barrier.
5027c478bd9Sstevel@tonic-gate  *
5037c478bd9Sstevel@tonic-gate  * There are few restrictions on the calling context of thread_nomigrate.
5047c478bd9Sstevel@tonic-gate  * The caller must not hold the thread lock.  Calls may be nested.
5057c478bd9Sstevel@tonic-gate  *
5067c478bd9Sstevel@tonic-gate  * After weakbinding a thread must not perform actions that may block.
5077c478bd9Sstevel@tonic-gate  * In particular it must not call thread_affinity_set; calling that when
5087c478bd9Sstevel@tonic-gate  * already weakbound is nonsensical anyway.
5097c478bd9Sstevel@tonic-gate  *
5107c478bd9Sstevel@tonic-gate  * If curthread is prevented from migrating for other reasons
5117c478bd9Sstevel@tonic-gate  * (kernel preemption disabled; high pil; strongly bound; interrupt thread)
5127c478bd9Sstevel@tonic-gate  * then the weak binding will succeed even if this cpu is the target of an
5137c478bd9Sstevel@tonic-gate  * offline/move request.
5147c478bd9Sstevel@tonic-gate  */
5157c478bd9Sstevel@tonic-gate void
5167c478bd9Sstevel@tonic-gate thread_nomigrate(void)
5177c478bd9Sstevel@tonic-gate {
5187c478bd9Sstevel@tonic-gate 	cpu_t *cp;
5197c478bd9Sstevel@tonic-gate 	kthread_id_t t = curthread;
5207c478bd9Sstevel@tonic-gate 
5217c478bd9Sstevel@tonic-gate again:
5227c478bd9Sstevel@tonic-gate 	kpreempt_disable();
5237c478bd9Sstevel@tonic-gate 	cp = CPU;
5247c478bd9Sstevel@tonic-gate 
5257c478bd9Sstevel@tonic-gate 	/*
5267c478bd9Sstevel@tonic-gate 	 * A highlevel interrupt must not modify t_nomigrate or
5277c478bd9Sstevel@tonic-gate 	 * t_weakbound_cpu of the thread it has interrupted.  A lowlevel
5287c478bd9Sstevel@tonic-gate 	 * interrupt thread cannot migrate and we can avoid the
5297c478bd9Sstevel@tonic-gate 	 * thread_lock call below by short-circuiting here.  In either
5307c478bd9Sstevel@tonic-gate 	 * case we can just return since no migration is possible and
5317c478bd9Sstevel@tonic-gate 	 * the condition will persist (ie, when we test for these again
5327c478bd9Sstevel@tonic-gate 	 * in thread_allowmigrate they can't have changed).   Migration
5337c478bd9Sstevel@tonic-gate 	 * is also impossible if we're at or above DISP_LEVEL pil.
5347c478bd9Sstevel@tonic-gate 	 */
5357c478bd9Sstevel@tonic-gate 	if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
5367c478bd9Sstevel@tonic-gate 	    getpil() >= DISP_LEVEL) {
5377c478bd9Sstevel@tonic-gate 		kpreempt_enable();
5387c478bd9Sstevel@tonic-gate 		return;
5397c478bd9Sstevel@tonic-gate 	}
5407c478bd9Sstevel@tonic-gate 
5417c478bd9Sstevel@tonic-gate 	/*
5427c478bd9Sstevel@tonic-gate 	 * We must be consistent with existing weak bindings.  Since we
5437c478bd9Sstevel@tonic-gate 	 * may be interrupted between the increment of t_nomigrate and
5447c478bd9Sstevel@tonic-gate 	 * the store to t_weakbound_cpu below we cannot assume that
5457c478bd9Sstevel@tonic-gate 	 * t_weakbound_cpu will be set if t_nomigrate is.  Note that we
5467c478bd9Sstevel@tonic-gate 	 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not
5477c478bd9Sstevel@tonic-gate 	 * always the case.
5487c478bd9Sstevel@tonic-gate 	 */
5497c478bd9Sstevel@tonic-gate 	if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
5507c478bd9Sstevel@tonic-gate 		if (!panicstr)
5517c478bd9Sstevel@tonic-gate 			panic("thread_nomigrate: binding to %p but already "
5527c478bd9Sstevel@tonic-gate 			    "bound to %p", (void *)cp,
5537c478bd9Sstevel@tonic-gate 			    (void *)t->t_weakbound_cpu);
5547c478bd9Sstevel@tonic-gate 	}
5557c478bd9Sstevel@tonic-gate 
5567c478bd9Sstevel@tonic-gate 	/*
5577c478bd9Sstevel@tonic-gate 	 * At this point we have preemption disabled and we don't yet hold
5587c478bd9Sstevel@tonic-gate 	 * the thread lock.  So it's possible that somebody else could
5597c478bd9Sstevel@tonic-gate 	 * set t_bind_cpu here and not be able to force us across to the
5607c478bd9Sstevel@tonic-gate 	 * new cpu (since we have preemption disabled).
5617c478bd9Sstevel@tonic-gate 	 */
5627c478bd9Sstevel@tonic-gate 	thread_lock(curthread);
5637c478bd9Sstevel@tonic-gate 
5647c478bd9Sstevel@tonic-gate 	/*
5657c478bd9Sstevel@tonic-gate 	 * If further weak bindings are being (temporarily) suppressed then
5667c478bd9Sstevel@tonic-gate 	 * we'll settle for disabling kernel preemption (which assures
5677c478bd9Sstevel@tonic-gate 	 * no migration provided the thread does not block which it is
5687c478bd9Sstevel@tonic-gate 	 * not allowed to if using thread_nomigrate).  We must remember
5697c478bd9Sstevel@tonic-gate 	 * this disposition so we can take appropriate action in
5707c478bd9Sstevel@tonic-gate 	 * thread_allowmigrate.  If this is a nested call and the
5717c478bd9Sstevel@tonic-gate 	 * thread is already weakbound then fall through as normal.
5727c478bd9Sstevel@tonic-gate 	 * We remember the decision to settle for kpreempt_disable through
5737c478bd9Sstevel@tonic-gate 	 * negative nesting counting in t_nomigrate.  Once a thread has had one
5747c478bd9Sstevel@tonic-gate 	 * weakbinding request satisfied in this way any further (nested)
5757c478bd9Sstevel@tonic-gate 	 * requests will continue to be satisfied in the same way,
5767c478bd9Sstevel@tonic-gate 	 * even if weak bindings have recommenced.
5777c478bd9Sstevel@tonic-gate 	 */
5787c478bd9Sstevel@tonic-gate 	if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
5797c478bd9Sstevel@tonic-gate 		--t->t_nomigrate;
5807c478bd9Sstevel@tonic-gate 		thread_unlock(curthread);
5817c478bd9Sstevel@tonic-gate 		return;		/* with kpreempt_disable still active */
5827c478bd9Sstevel@tonic-gate 	}
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 	/*
5857c478bd9Sstevel@tonic-gate 	 * We hold thread_lock so t_bind_cpu cannot change.  We could,
5867c478bd9Sstevel@tonic-gate 	 * however, be running on a different cpu to which we are t_bound_cpu
5877c478bd9Sstevel@tonic-gate 	 * to (as explained above).  If we grant the weak binding request
5887c478bd9Sstevel@tonic-gate 	 * in that case then the dispatcher must favour our weak binding
5897c478bd9Sstevel@tonic-gate 	 * over our strong (in which case, just as when preemption is
5907c478bd9Sstevel@tonic-gate 	 * disabled, we can continue to run on a cpu other than the one to
5917c478bd9Sstevel@tonic-gate 	 * which we are strongbound; the difference in this case is that
5927c478bd9Sstevel@tonic-gate 	 * this thread can be preempted and so can appear on the dispatch
5937c478bd9Sstevel@tonic-gate 	 * queues of a cpu other than the one it is strongbound to).
5947c478bd9Sstevel@tonic-gate 	 *
5957c478bd9Sstevel@tonic-gate 	 * If the cpu we are running on does not appear to be a current
5967c478bd9Sstevel@tonic-gate 	 * offline target (we check cpu_inmotion to determine this - since
5977c478bd9Sstevel@tonic-gate 	 * we don't hold cpu_lock we may not see a recent store to that,
5987c478bd9Sstevel@tonic-gate 	 * so it's possible that we at times can grant a weak binding to a
5997c478bd9Sstevel@tonic-gate 	 * cpu that is an offline target, but that one request will not
6007c478bd9Sstevel@tonic-gate 	 * prevent the offline from succeeding) then we will always grant
6017c478bd9Sstevel@tonic-gate 	 * the weak binding request.  This includes the case above where
6027c478bd9Sstevel@tonic-gate 	 * we grant a weakbinding not commensurate with our strong binding.
6037c478bd9Sstevel@tonic-gate 	 *
6047c478bd9Sstevel@tonic-gate 	 * If our cpu does appear to be an offline target then we're inclined
6057c478bd9Sstevel@tonic-gate 	 * not to grant the weakbinding request just yet - we'd prefer to
6067c478bd9Sstevel@tonic-gate 	 * migrate to another cpu and grant the request there.  The
6077c478bd9Sstevel@tonic-gate 	 * exceptions are those cases where going through preemption code
6087c478bd9Sstevel@tonic-gate 	 * will not result in us changing cpu:
6097c478bd9Sstevel@tonic-gate 	 *
6107c478bd9Sstevel@tonic-gate 	 *	. interrupts have already bypassed this case (see above)
6117c478bd9Sstevel@tonic-gate 	 *	. we are already weakbound to this cpu (dispatcher code will
6127c478bd9Sstevel@tonic-gate 	 *	  always return us to the weakbound cpu)
6137c478bd9Sstevel@tonic-gate 	 *	. preemption was disabled even before we disabled it above
6147c478bd9Sstevel@tonic-gate 	 *	. we are strongbound to this cpu (if we're strongbound to
6157c478bd9Sstevel@tonic-gate 	 *	another and not yet running there the trip through the
6167c478bd9Sstevel@tonic-gate 	 *	dispatcher will move us to the strongbound cpu and we
6177c478bd9Sstevel@tonic-gate 	 *	will grant the weak binding there)
6187c478bd9Sstevel@tonic-gate 	 */
6197c478bd9Sstevel@tonic-gate 	if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
6207c478bd9Sstevel@tonic-gate 	    t->t_bound_cpu == cp) {
6217c478bd9Sstevel@tonic-gate 		/*
6227c478bd9Sstevel@tonic-gate 		 * Don't be tempted to store to t_weakbound_cpu only on
6237c478bd9Sstevel@tonic-gate 		 * the first nested bind request - if we're interrupted
6247c478bd9Sstevel@tonic-gate 		 * after the increment of t_nomigrate and before the
6257c478bd9Sstevel@tonic-gate 		 * store to t_weakbound_cpu and the interrupt calls
6267c478bd9Sstevel@tonic-gate 		 * thread_nomigrate then the assertion in thread_allowmigrate
6277c478bd9Sstevel@tonic-gate 		 * would fail.
6287c478bd9Sstevel@tonic-gate 		 */
6297c478bd9Sstevel@tonic-gate 		t->t_nomigrate++;
6307c478bd9Sstevel@tonic-gate 		t->t_weakbound_cpu = cp;
6317c478bd9Sstevel@tonic-gate 		membar_producer();
6327c478bd9Sstevel@tonic-gate 		thread_unlock(curthread);
6337c478bd9Sstevel@tonic-gate 		/*
6347c478bd9Sstevel@tonic-gate 		 * Now that we have dropped the thread_lock another thread
6357c478bd9Sstevel@tonic-gate 		 * can set our t_weakbound_cpu, and will try to migrate us
6367c478bd9Sstevel@tonic-gate 		 * to the strongbound cpu (which will not be prevented by
6377c478bd9Sstevel@tonic-gate 		 * preemption being disabled since we're about to enable
6387c478bd9Sstevel@tonic-gate 		 * preemption).  We have granted the weakbinding to the current
6397c478bd9Sstevel@tonic-gate 		 * cpu, so again we are in the position that is is is possible
6407c478bd9Sstevel@tonic-gate 		 * that our weak and strong bindings differ.  Again this
6417c478bd9Sstevel@tonic-gate 		 * is catered for by dispatcher code which will favour our
6427c478bd9Sstevel@tonic-gate 		 * weak binding.
6437c478bd9Sstevel@tonic-gate 		 */
6447c478bd9Sstevel@tonic-gate 		kpreempt_enable();
6457c478bd9Sstevel@tonic-gate 	} else {
6467c478bd9Sstevel@tonic-gate 		/*
6477c478bd9Sstevel@tonic-gate 		 * Move to another cpu before granting the request by
6487c478bd9Sstevel@tonic-gate 		 * forcing this thread through preemption code.  When we
6497c478bd9Sstevel@tonic-gate 		 * get to set{front,back}dq called from CL_PREEMPT()
6507c478bd9Sstevel@tonic-gate 		 * cpu_choose() will be used to select a cpu to queue
6517c478bd9Sstevel@tonic-gate 		 * us on - that will see cpu_inmotion and take
6527c478bd9Sstevel@tonic-gate 		 * steps to avoid returning us to this cpu.
6537c478bd9Sstevel@tonic-gate 		 */
6547c478bd9Sstevel@tonic-gate 		cp->cpu_kprunrun = 1;
6557c478bd9Sstevel@tonic-gate 		thread_unlock(curthread);
6567c478bd9Sstevel@tonic-gate 		kpreempt_enable();	/* will call preempt() */
6577c478bd9Sstevel@tonic-gate 		goto again;
6587c478bd9Sstevel@tonic-gate 	}
6597c478bd9Sstevel@tonic-gate }
6607c478bd9Sstevel@tonic-gate 
6617c478bd9Sstevel@tonic-gate void
6627c478bd9Sstevel@tonic-gate thread_allowmigrate(void)
6637c478bd9Sstevel@tonic-gate {
6647c478bd9Sstevel@tonic-gate 	kthread_id_t t = curthread;
6657c478bd9Sstevel@tonic-gate 
6667c478bd9Sstevel@tonic-gate 	ASSERT(t->t_weakbound_cpu == CPU ||
6677c478bd9Sstevel@tonic-gate 	    (t->t_nomigrate < 0 && t->t_preempt > 0) ||
6687c478bd9Sstevel@tonic-gate 	    CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
6697c478bd9Sstevel@tonic-gate 	    getpil() >= DISP_LEVEL);
6707c478bd9Sstevel@tonic-gate 
6717c478bd9Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
6727c478bd9Sstevel@tonic-gate 	    getpil() >= DISP_LEVEL)
6737c478bd9Sstevel@tonic-gate 		return;
6747c478bd9Sstevel@tonic-gate 
6757c478bd9Sstevel@tonic-gate 	if (t->t_nomigrate < 0) {
6767c478bd9Sstevel@tonic-gate 		/*
6777c478bd9Sstevel@tonic-gate 		 * This thread was granted "weak binding" in the
6787c478bd9Sstevel@tonic-gate 		 * stronger form of kernel preemption disabling.
6797c478bd9Sstevel@tonic-gate 		 * Undo a level of nesting for both t_nomigrate
6807c478bd9Sstevel@tonic-gate 		 * and t_preempt.
6817c478bd9Sstevel@tonic-gate 		 */
6827c478bd9Sstevel@tonic-gate 		++t->t_nomigrate;
6837c478bd9Sstevel@tonic-gate 		kpreempt_enable();
6847c478bd9Sstevel@tonic-gate 	} else if (--t->t_nomigrate == 0) {
6857c478bd9Sstevel@tonic-gate 		/*
6867c478bd9Sstevel@tonic-gate 		 * Time to drop the weak binding.  We need to cater
6877c478bd9Sstevel@tonic-gate 		 * for the case where we're weakbound to a different
6887c478bd9Sstevel@tonic-gate 		 * cpu than that to which we're strongbound (a very
6897c478bd9Sstevel@tonic-gate 		 * temporary arrangement that must only persist until
6907c478bd9Sstevel@tonic-gate 		 * weak binding drops).  We don't acquire thread_lock
6917c478bd9Sstevel@tonic-gate 		 * here so even as this code executes t_bound_cpu
6927c478bd9Sstevel@tonic-gate 		 * may be changing.  So we disable preemption and
6937c478bd9Sstevel@tonic-gate 		 * a) in the case that t_bound_cpu changes while we
6947c478bd9Sstevel@tonic-gate 		 * have preemption disabled kprunrun will be set
6957c478bd9Sstevel@tonic-gate 		 * asynchronously, and b) if before disabling
6967c478bd9Sstevel@tonic-gate 		 * preemption we were already on a different cpu to
6977c478bd9Sstevel@tonic-gate 		 * our t_bound_cpu then we set kprunrun ourselves
6987c478bd9Sstevel@tonic-gate 		 * to force a trip through the dispatcher when
6997c478bd9Sstevel@tonic-gate 		 * preemption is enabled.
7007c478bd9Sstevel@tonic-gate 		 */
7017c478bd9Sstevel@tonic-gate 		kpreempt_disable();
7027c478bd9Sstevel@tonic-gate 		if (t->t_bound_cpu &&
7037c478bd9Sstevel@tonic-gate 		    t->t_weakbound_cpu != t->t_bound_cpu)
7047c478bd9Sstevel@tonic-gate 			CPU->cpu_kprunrun = 1;
7057c478bd9Sstevel@tonic-gate 		t->t_weakbound_cpu = NULL;
7067c478bd9Sstevel@tonic-gate 		membar_producer();
7077c478bd9Sstevel@tonic-gate 		kpreempt_enable();
7087c478bd9Sstevel@tonic-gate 	}
7097c478bd9Sstevel@tonic-gate }
7107c478bd9Sstevel@tonic-gate 
7117c478bd9Sstevel@tonic-gate /*
7127c478bd9Sstevel@tonic-gate  * weakbinding_stop can be used to temporarily cause weakbindings made
7137c478bd9Sstevel@tonic-gate  * with thread_nomigrate to be satisfied through the stronger action of
7147c478bd9Sstevel@tonic-gate  * kpreempt_disable.  weakbinding_start recommences normal weakbinding.
7157c478bd9Sstevel@tonic-gate  */
7167c478bd9Sstevel@tonic-gate 
7177c478bd9Sstevel@tonic-gate void
7187c478bd9Sstevel@tonic-gate weakbinding_stop(void)
7197c478bd9Sstevel@tonic-gate {
7207c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
7217c478bd9Sstevel@tonic-gate 	weakbindingbarrier = 1;
7227c478bd9Sstevel@tonic-gate 	membar_producer();	/* make visible before subsequent thread_lock */
7237c478bd9Sstevel@tonic-gate }
7247c478bd9Sstevel@tonic-gate 
7257c478bd9Sstevel@tonic-gate void
7267c478bd9Sstevel@tonic-gate weakbinding_start(void)
7277c478bd9Sstevel@tonic-gate {
7287c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
7297c478bd9Sstevel@tonic-gate 	weakbindingbarrier = 0;
7307c478bd9Sstevel@tonic-gate }
7317c478bd9Sstevel@tonic-gate 
7327c478bd9Sstevel@tonic-gate /*
7337c478bd9Sstevel@tonic-gate  * This routine is called to place the CPUs in a safe place so that
7347c478bd9Sstevel@tonic-gate  * one of them can be taken off line or placed on line.  What we are
7357c478bd9Sstevel@tonic-gate  * trying to do here is prevent a thread from traversing the list
7367c478bd9Sstevel@tonic-gate  * of active CPUs while we are changing it or from getting placed on
7377c478bd9Sstevel@tonic-gate  * the run queue of a CPU that has just gone off line.  We do this by
7387c478bd9Sstevel@tonic-gate  * creating a thread with the highest possible prio for each CPU and
7397c478bd9Sstevel@tonic-gate  * having it call this routine.  The advantage of this method is that
7407c478bd9Sstevel@tonic-gate  * we can eliminate all checks for CPU_ACTIVE in the disp routines.
7417c478bd9Sstevel@tonic-gate  * This makes disp faster at the expense of making p_online() slower
7427c478bd9Sstevel@tonic-gate  * which is a good trade off.
7437c478bd9Sstevel@tonic-gate  */
7447c478bd9Sstevel@tonic-gate static void
7457c478bd9Sstevel@tonic-gate cpu_pause(volatile char *safe)
7467c478bd9Sstevel@tonic-gate {
7477c478bd9Sstevel@tonic-gate 	int s;
7487c478bd9Sstevel@tonic-gate 	struct _cpu_pause_info *cpi = &cpu_pause_info;
7497c478bd9Sstevel@tonic-gate 
7507c478bd9Sstevel@tonic-gate 	ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
7517c478bd9Sstevel@tonic-gate 
7527c478bd9Sstevel@tonic-gate 	while (*safe != PAUSE_DIE) {
7537c478bd9Sstevel@tonic-gate 		*safe = PAUSE_READY;
7547c478bd9Sstevel@tonic-gate 		membar_enter();		/* make sure stores are flushed */
7557c478bd9Sstevel@tonic-gate 		sema_v(&cpi->cp_sem);	/* signal requesting thread */
7567c478bd9Sstevel@tonic-gate 
7577c478bd9Sstevel@tonic-gate 		/*
7587c478bd9Sstevel@tonic-gate 		 * Wait here until all pause threads are running.  That
7597c478bd9Sstevel@tonic-gate 		 * indicates that it's safe to do the spl.  Until
7607c478bd9Sstevel@tonic-gate 		 * cpu_pause_info.cp_go is set, we don't want to spl
7617c478bd9Sstevel@tonic-gate 		 * because that might block clock interrupts needed
7627c478bd9Sstevel@tonic-gate 		 * to preempt threads on other CPUs.
7637c478bd9Sstevel@tonic-gate 		 */
7647c478bd9Sstevel@tonic-gate 		while (cpi->cp_go == 0)
7657c478bd9Sstevel@tonic-gate 			;
7667c478bd9Sstevel@tonic-gate 		/*
7677c478bd9Sstevel@tonic-gate 		 * Even though we are at the highest disp prio, we need
7687c478bd9Sstevel@tonic-gate 		 * to block out all interrupts below LOCK_LEVEL so that
7697c478bd9Sstevel@tonic-gate 		 * an intr doesn't come in, wake up a thread, and call
7707c478bd9Sstevel@tonic-gate 		 * setbackdq/setfrontdq.
7717c478bd9Sstevel@tonic-gate 		 */
7727c478bd9Sstevel@tonic-gate 		s = splhigh();
7737c478bd9Sstevel@tonic-gate 		/*
7747c478bd9Sstevel@tonic-gate 		 * This cpu is now safe.
7757c478bd9Sstevel@tonic-gate 		 */
7767c478bd9Sstevel@tonic-gate 		*safe = PAUSE_WAIT;
7777c478bd9Sstevel@tonic-gate 		membar_enter();		/* make sure stores are flushed */
7787c478bd9Sstevel@tonic-gate 
7797c478bd9Sstevel@tonic-gate 		/*
7807c478bd9Sstevel@tonic-gate 		 * Now we wait.  When we are allowed to continue, safe will
7817c478bd9Sstevel@tonic-gate 		 * be set to PAUSE_IDLE.
7827c478bd9Sstevel@tonic-gate 		 */
7837c478bd9Sstevel@tonic-gate 		while (*safe != PAUSE_IDLE)
7847c478bd9Sstevel@tonic-gate 			;
7857c478bd9Sstevel@tonic-gate 
7867c478bd9Sstevel@tonic-gate 		splx(s);
7877c478bd9Sstevel@tonic-gate 		/*
7887c478bd9Sstevel@tonic-gate 		 * Waiting is at an end. Switch out of cpu_pause
7897c478bd9Sstevel@tonic-gate 		 * loop and resume useful work.
7907c478bd9Sstevel@tonic-gate 		 */
7917c478bd9Sstevel@tonic-gate 		swtch();
7927c478bd9Sstevel@tonic-gate 	}
7937c478bd9Sstevel@tonic-gate 
7947c478bd9Sstevel@tonic-gate 	mutex_enter(&pause_free_mutex);
7957c478bd9Sstevel@tonic-gate 	*safe = PAUSE_DEAD;
7967c478bd9Sstevel@tonic-gate 	cv_broadcast(&pause_free_cv);
7977c478bd9Sstevel@tonic-gate 	mutex_exit(&pause_free_mutex);
7987c478bd9Sstevel@tonic-gate }
7997c478bd9Sstevel@tonic-gate 
8007c478bd9Sstevel@tonic-gate /*
8017c478bd9Sstevel@tonic-gate  * Allow the cpus to start running again.
8027c478bd9Sstevel@tonic-gate  */
8037c478bd9Sstevel@tonic-gate void
8047c478bd9Sstevel@tonic-gate start_cpus()
8057c478bd9Sstevel@tonic-gate {
8067c478bd9Sstevel@tonic-gate 	int i;
8077c478bd9Sstevel@tonic-gate 
8087c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
8097c478bd9Sstevel@tonic-gate 	ASSERT(cpu_pause_info.cp_paused);
8107c478bd9Sstevel@tonic-gate 	cpu_pause_info.cp_paused = NULL;
8117c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++)
8127c478bd9Sstevel@tonic-gate 		safe_list[i] = PAUSE_IDLE;
8137c478bd9Sstevel@tonic-gate 	membar_enter();			/* make sure stores are flushed */
8147c478bd9Sstevel@tonic-gate 	affinity_clear();
8157c478bd9Sstevel@tonic-gate 	splx(cpu_pause_info.cp_spl);
8167c478bd9Sstevel@tonic-gate 	kpreempt_enable();
8177c478bd9Sstevel@tonic-gate }
8187c478bd9Sstevel@tonic-gate 
8197c478bd9Sstevel@tonic-gate /*
8207c478bd9Sstevel@tonic-gate  * Allocate a pause thread for a CPU.
8217c478bd9Sstevel@tonic-gate  */
8227c478bd9Sstevel@tonic-gate static void
8237c478bd9Sstevel@tonic-gate cpu_pause_alloc(cpu_t *cp)
8247c478bd9Sstevel@tonic-gate {
8257c478bd9Sstevel@tonic-gate 	kthread_id_t	t;
8267c478bd9Sstevel@tonic-gate 	int		cpun = cp->cpu_id;
8277c478bd9Sstevel@tonic-gate 
8287c478bd9Sstevel@tonic-gate 	/*
8297c478bd9Sstevel@tonic-gate 	 * Note, v.v_nglobpris will not change value as long as I hold
8307c478bd9Sstevel@tonic-gate 	 * cpu_lock.
8317c478bd9Sstevel@tonic-gate 	 */
8327c478bd9Sstevel@tonic-gate 	t = thread_create(NULL, 0, cpu_pause, (caddr_t)&safe_list[cpun],
8337c478bd9Sstevel@tonic-gate 	    0, &p0, TS_STOPPED, v.v_nglobpris - 1);
8347c478bd9Sstevel@tonic-gate 	thread_lock(t);
8357c478bd9Sstevel@tonic-gate 	t->t_bound_cpu = cp;
8367c478bd9Sstevel@tonic-gate 	t->t_disp_queue = cp->cpu_disp;
8377c478bd9Sstevel@tonic-gate 	t->t_affinitycnt = 1;
8387c478bd9Sstevel@tonic-gate 	t->t_preempt = 1;
8397c478bd9Sstevel@tonic-gate 	thread_unlock(t);
8407c478bd9Sstevel@tonic-gate 	cp->cpu_pause_thread = t;
8417c478bd9Sstevel@tonic-gate 	/*
8427c478bd9Sstevel@tonic-gate 	 * Registering a thread in the callback table is usually done
8437c478bd9Sstevel@tonic-gate 	 * in the initialization code of the thread.  In this
8447c478bd9Sstevel@tonic-gate 	 * case, we do it right after thread creation because the
8457c478bd9Sstevel@tonic-gate 	 * thread itself may never run, and we need to register the
8467c478bd9Sstevel@tonic-gate 	 * fact that it is safe for cpr suspend.
8477c478bd9Sstevel@tonic-gate 	 */
8487c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT_SAFE(t, "cpu_pause");
8497c478bd9Sstevel@tonic-gate }
8507c478bd9Sstevel@tonic-gate 
8517c478bd9Sstevel@tonic-gate /*
8527c478bd9Sstevel@tonic-gate  * Free a pause thread for a CPU.
8537c478bd9Sstevel@tonic-gate  */
8547c478bd9Sstevel@tonic-gate static void
8557c478bd9Sstevel@tonic-gate cpu_pause_free(cpu_t *cp)
8567c478bd9Sstevel@tonic-gate {
8577c478bd9Sstevel@tonic-gate 	kthread_id_t	t;
8587c478bd9Sstevel@tonic-gate 	int		cpun = cp->cpu_id;
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
8617c478bd9Sstevel@tonic-gate 	/*
8627c478bd9Sstevel@tonic-gate 	 * We have to get the thread and tell him to die.
8637c478bd9Sstevel@tonic-gate 	 */
8647c478bd9Sstevel@tonic-gate 	if ((t = cp->cpu_pause_thread) == NULL) {
8657c478bd9Sstevel@tonic-gate 		ASSERT(safe_list[cpun] == PAUSE_IDLE);
8667c478bd9Sstevel@tonic-gate 		return;
8677c478bd9Sstevel@tonic-gate 	}
8687c478bd9Sstevel@tonic-gate 	thread_lock(t);
8697c478bd9Sstevel@tonic-gate 	t->t_cpu = CPU;		/* disp gets upset if last cpu is quiesced. */
8707c478bd9Sstevel@tonic-gate 	t->t_bound_cpu = NULL;	/* Must un-bind; cpu may not be running. */
8717c478bd9Sstevel@tonic-gate 	t->t_pri = v.v_nglobpris - 1;
8727c478bd9Sstevel@tonic-gate 	ASSERT(safe_list[cpun] == PAUSE_IDLE);
8737c478bd9Sstevel@tonic-gate 	safe_list[cpun] = PAUSE_DIE;
8747c478bd9Sstevel@tonic-gate 	THREAD_TRANSITION(t);
8757c478bd9Sstevel@tonic-gate 	setbackdq(t);
8767c478bd9Sstevel@tonic-gate 	thread_unlock_nopreempt(t);
8777c478bd9Sstevel@tonic-gate 
8787c478bd9Sstevel@tonic-gate 	/*
8797c478bd9Sstevel@tonic-gate 	 * If we don't wait for the thread to actually die, it may try to
8807c478bd9Sstevel@tonic-gate 	 * run on the wrong cpu as part of an actual call to pause_cpus().
8817c478bd9Sstevel@tonic-gate 	 */
8827c478bd9Sstevel@tonic-gate 	mutex_enter(&pause_free_mutex);
8837c478bd9Sstevel@tonic-gate 	while (safe_list[cpun] != PAUSE_DEAD) {
8847c478bd9Sstevel@tonic-gate 		cv_wait(&pause_free_cv, &pause_free_mutex);
8857c478bd9Sstevel@tonic-gate 	}
8867c478bd9Sstevel@tonic-gate 	mutex_exit(&pause_free_mutex);
8877c478bd9Sstevel@tonic-gate 	safe_list[cpun] = PAUSE_IDLE;
8887c478bd9Sstevel@tonic-gate 
8897c478bd9Sstevel@tonic-gate 	cp->cpu_pause_thread = NULL;
8907c478bd9Sstevel@tonic-gate }
8917c478bd9Sstevel@tonic-gate 
8927c478bd9Sstevel@tonic-gate /*
8937c478bd9Sstevel@tonic-gate  * Initialize basic structures for pausing CPUs.
8947c478bd9Sstevel@tonic-gate  */
8957c478bd9Sstevel@tonic-gate void
8967c478bd9Sstevel@tonic-gate cpu_pause_init()
8977c478bd9Sstevel@tonic-gate {
8987c478bd9Sstevel@tonic-gate 	sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL);
8997c478bd9Sstevel@tonic-gate 	/*
9007c478bd9Sstevel@tonic-gate 	 * Create initial CPU pause thread.
9017c478bd9Sstevel@tonic-gate 	 */
9027c478bd9Sstevel@tonic-gate 	cpu_pause_alloc(CPU);
9037c478bd9Sstevel@tonic-gate }
9047c478bd9Sstevel@tonic-gate 
9057c478bd9Sstevel@tonic-gate /*
9067c478bd9Sstevel@tonic-gate  * Start the threads used to pause another CPU.
9077c478bd9Sstevel@tonic-gate  */
9087c478bd9Sstevel@tonic-gate static int
9097c478bd9Sstevel@tonic-gate cpu_pause_start(processorid_t cpu_id)
9107c478bd9Sstevel@tonic-gate {
9117c478bd9Sstevel@tonic-gate 	int	i;
9127c478bd9Sstevel@tonic-gate 	int	cpu_count = 0;
9137c478bd9Sstevel@tonic-gate 
9147c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++) {
9157c478bd9Sstevel@tonic-gate 		cpu_t		*cp;
9167c478bd9Sstevel@tonic-gate 		kthread_id_t	t;
9177c478bd9Sstevel@tonic-gate 
9187c478bd9Sstevel@tonic-gate 		cp = cpu[i];
9197c478bd9Sstevel@tonic-gate 		if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) {
9207c478bd9Sstevel@tonic-gate 			safe_list[i] = PAUSE_WAIT;
9217c478bd9Sstevel@tonic-gate 			continue;
9227c478bd9Sstevel@tonic-gate 		}
9237c478bd9Sstevel@tonic-gate 
9247c478bd9Sstevel@tonic-gate 		/*
9257c478bd9Sstevel@tonic-gate 		 * Skip CPU if it is quiesced or not yet started.
9267c478bd9Sstevel@tonic-gate 		 */
9277c478bd9Sstevel@tonic-gate 		if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) {
9287c478bd9Sstevel@tonic-gate 			safe_list[i] = PAUSE_WAIT;
9297c478bd9Sstevel@tonic-gate 			continue;
9307c478bd9Sstevel@tonic-gate 		}
9317c478bd9Sstevel@tonic-gate 
9327c478bd9Sstevel@tonic-gate 		/*
9337c478bd9Sstevel@tonic-gate 		 * Start this CPU's pause thread.
9347c478bd9Sstevel@tonic-gate 		 */
9357c478bd9Sstevel@tonic-gate 		t = cp->cpu_pause_thread;
9367c478bd9Sstevel@tonic-gate 		thread_lock(t);
9377c478bd9Sstevel@tonic-gate 		/*
9387c478bd9Sstevel@tonic-gate 		 * Reset the priority, since nglobpris may have
9397c478bd9Sstevel@tonic-gate 		 * changed since the thread was created, if someone
9407c478bd9Sstevel@tonic-gate 		 * has loaded the RT (or some other) scheduling
9417c478bd9Sstevel@tonic-gate 		 * class.
9427c478bd9Sstevel@tonic-gate 		 */
9437c478bd9Sstevel@tonic-gate 		t->t_pri = v.v_nglobpris - 1;
9447c478bd9Sstevel@tonic-gate 		THREAD_TRANSITION(t);
9457c478bd9Sstevel@tonic-gate 		setbackdq(t);
9467c478bd9Sstevel@tonic-gate 		thread_unlock_nopreempt(t);
9477c478bd9Sstevel@tonic-gate 		++cpu_count;
9487c478bd9Sstevel@tonic-gate 	}
9497c478bd9Sstevel@tonic-gate 	return (cpu_count);
9507c478bd9Sstevel@tonic-gate }
9517c478bd9Sstevel@tonic-gate 
9527c478bd9Sstevel@tonic-gate 
9537c478bd9Sstevel@tonic-gate /*
9547c478bd9Sstevel@tonic-gate  * Pause all of the CPUs except the one we are on by creating a high
9557c478bd9Sstevel@tonic-gate  * priority thread bound to those CPUs.
9567c478bd9Sstevel@tonic-gate  *
9577c478bd9Sstevel@tonic-gate  * Note that one must be extremely careful regarding code
9587c478bd9Sstevel@tonic-gate  * executed while CPUs are paused.  Since a CPU may be paused
9597c478bd9Sstevel@tonic-gate  * while a thread scheduling on that CPU is holding an adaptive
9607c478bd9Sstevel@tonic-gate  * lock, code executed with CPUs paused must not acquire adaptive
9617c478bd9Sstevel@tonic-gate  * (or low-level spin) locks.  Also, such code must not block,
9627c478bd9Sstevel@tonic-gate  * since the thread that is supposed to initiate the wakeup may
9637c478bd9Sstevel@tonic-gate  * never run.
9647c478bd9Sstevel@tonic-gate  *
9657c478bd9Sstevel@tonic-gate  * With a few exceptions, the restrictions on code executed with CPUs
9667c478bd9Sstevel@tonic-gate  * paused match those for code executed at high-level interrupt
9677c478bd9Sstevel@tonic-gate  * context.
9687c478bd9Sstevel@tonic-gate  */
9697c478bd9Sstevel@tonic-gate void
9707c478bd9Sstevel@tonic-gate pause_cpus(cpu_t *off_cp)
9717c478bd9Sstevel@tonic-gate {
9727c478bd9Sstevel@tonic-gate 	processorid_t	cpu_id;
9737c478bd9Sstevel@tonic-gate 	int		i;
9747c478bd9Sstevel@tonic-gate 	struct _cpu_pause_info	*cpi = &cpu_pause_info;
9757c478bd9Sstevel@tonic-gate 
9767c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
9777c478bd9Sstevel@tonic-gate 	ASSERT(cpi->cp_paused == NULL);
9787c478bd9Sstevel@tonic-gate 	cpi->cp_count = 0;
9797c478bd9Sstevel@tonic-gate 	cpi->cp_go = 0;
9807c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++)
9817c478bd9Sstevel@tonic-gate 		safe_list[i] = PAUSE_IDLE;
9827c478bd9Sstevel@tonic-gate 	kpreempt_disable();
9837c478bd9Sstevel@tonic-gate 
9847c478bd9Sstevel@tonic-gate 	/*
9857c478bd9Sstevel@tonic-gate 	 * If running on the cpu that is going offline, get off it.
9867c478bd9Sstevel@tonic-gate 	 * This is so that it won't be necessary to rechoose a CPU
9877c478bd9Sstevel@tonic-gate 	 * when done.
9887c478bd9Sstevel@tonic-gate 	 */
9897c478bd9Sstevel@tonic-gate 	if (CPU == off_cp)
9907c478bd9Sstevel@tonic-gate 		cpu_id = off_cp->cpu_next_part->cpu_id;
9917c478bd9Sstevel@tonic-gate 	else
9927c478bd9Sstevel@tonic-gate 		cpu_id = CPU->cpu_id;
9937c478bd9Sstevel@tonic-gate 	affinity_set(cpu_id);
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate 	/*
9967c478bd9Sstevel@tonic-gate 	 * Start the pause threads and record how many were started
9977c478bd9Sstevel@tonic-gate 	 */
9987c478bd9Sstevel@tonic-gate 	cpi->cp_count = cpu_pause_start(cpu_id);
9997c478bd9Sstevel@tonic-gate 
10007c478bd9Sstevel@tonic-gate 	/*
10017c478bd9Sstevel@tonic-gate 	 * Now wait for all CPUs to be running the pause thread.
10027c478bd9Sstevel@tonic-gate 	 */
10037c478bd9Sstevel@tonic-gate 	while (cpi->cp_count > 0) {
10047c478bd9Sstevel@tonic-gate 		/*
10057c478bd9Sstevel@tonic-gate 		 * Spin reading the count without grabbing the disp
10067c478bd9Sstevel@tonic-gate 		 * lock to make sure we don't prevent the pause
10077c478bd9Sstevel@tonic-gate 		 * threads from getting the lock.
10087c478bd9Sstevel@tonic-gate 		 */
10097c478bd9Sstevel@tonic-gate 		while (sema_held(&cpi->cp_sem))
10107c478bd9Sstevel@tonic-gate 			;
10117c478bd9Sstevel@tonic-gate 		if (sema_tryp(&cpi->cp_sem))
10127c478bd9Sstevel@tonic-gate 			--cpi->cp_count;
10137c478bd9Sstevel@tonic-gate 	}
10147c478bd9Sstevel@tonic-gate 	cpi->cp_go = 1;			/* all have reached cpu_pause */
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 	/*
10177c478bd9Sstevel@tonic-gate 	 * Now wait for all CPUs to spl. (Transition from PAUSE_READY
10187c478bd9Sstevel@tonic-gate 	 * to PAUSE_WAIT.)
10197c478bd9Sstevel@tonic-gate 	 */
10207c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++) {
10217c478bd9Sstevel@tonic-gate 		while (safe_list[i] != PAUSE_WAIT)
10227c478bd9Sstevel@tonic-gate 			;
10237c478bd9Sstevel@tonic-gate 	}
10247c478bd9Sstevel@tonic-gate 	cpi->cp_spl = splhigh();	/* block dispatcher on this CPU */
10257c478bd9Sstevel@tonic-gate 	cpi->cp_paused = curthread;
10267c478bd9Sstevel@tonic-gate }
10277c478bd9Sstevel@tonic-gate 
10287c478bd9Sstevel@tonic-gate /*
10297c478bd9Sstevel@tonic-gate  * Check whether the current thread has CPUs paused
10307c478bd9Sstevel@tonic-gate  */
10317c478bd9Sstevel@tonic-gate int
10327c478bd9Sstevel@tonic-gate cpus_paused(void)
10337c478bd9Sstevel@tonic-gate {
10347c478bd9Sstevel@tonic-gate 	if (cpu_pause_info.cp_paused != NULL) {
10357c478bd9Sstevel@tonic-gate 		ASSERT(cpu_pause_info.cp_paused == curthread);
10367c478bd9Sstevel@tonic-gate 		return (1);
10377c478bd9Sstevel@tonic-gate 	}
10387c478bd9Sstevel@tonic-gate 	return (0);
10397c478bd9Sstevel@tonic-gate }
10407c478bd9Sstevel@tonic-gate 
10417c478bd9Sstevel@tonic-gate static cpu_t *
10427c478bd9Sstevel@tonic-gate cpu_get_all(processorid_t cpun)
10437c478bd9Sstevel@tonic-gate {
10447c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
10457c478bd9Sstevel@tonic-gate 
10467c478bd9Sstevel@tonic-gate 	if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun))
10477c478bd9Sstevel@tonic-gate 		return (NULL);
10487c478bd9Sstevel@tonic-gate 	return (cpu[cpun]);
10497c478bd9Sstevel@tonic-gate }
10507c478bd9Sstevel@tonic-gate 
10517c478bd9Sstevel@tonic-gate /*
10527c478bd9Sstevel@tonic-gate  * Check whether cpun is a valid processor id and whether it should be
10537c478bd9Sstevel@tonic-gate  * visible from the current zone. If it is, return a pointer to the
10547c478bd9Sstevel@tonic-gate  * associated CPU structure.
10557c478bd9Sstevel@tonic-gate  */
10567c478bd9Sstevel@tonic-gate cpu_t *
10577c478bd9Sstevel@tonic-gate cpu_get(processorid_t cpun)
10587c478bd9Sstevel@tonic-gate {
10597c478bd9Sstevel@tonic-gate 	cpu_t *c;
10607c478bd9Sstevel@tonic-gate 
10617c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
10627c478bd9Sstevel@tonic-gate 	c = cpu_get_all(cpun);
10637c478bd9Sstevel@tonic-gate 	if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
10647c478bd9Sstevel@tonic-gate 	    zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c))
10657c478bd9Sstevel@tonic-gate 		return (NULL);
10667c478bd9Sstevel@tonic-gate 	return (c);
10677c478bd9Sstevel@tonic-gate }
10687c478bd9Sstevel@tonic-gate 
10697c478bd9Sstevel@tonic-gate /*
10707c478bd9Sstevel@tonic-gate  * The following functions should be used to check CPU states in the kernel.
10717c478bd9Sstevel@tonic-gate  * They should be invoked with cpu_lock held.  Kernel subsystems interested
10727c478bd9Sstevel@tonic-gate  * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc
10737c478bd9Sstevel@tonic-gate  * states.  Those are for user-land (and system call) use only.
10747c478bd9Sstevel@tonic-gate  */
10757c478bd9Sstevel@tonic-gate 
10767c478bd9Sstevel@tonic-gate /*
10777c478bd9Sstevel@tonic-gate  * Determine whether the CPU is online and handling interrupts.
10787c478bd9Sstevel@tonic-gate  */
10797c478bd9Sstevel@tonic-gate int
10807c478bd9Sstevel@tonic-gate cpu_is_online(cpu_t *cpu)
10817c478bd9Sstevel@tonic-gate {
10827c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
10837c478bd9Sstevel@tonic-gate 	return (cpu_flagged_online(cpu->cpu_flags));
10847c478bd9Sstevel@tonic-gate }
10857c478bd9Sstevel@tonic-gate 
10867c478bd9Sstevel@tonic-gate /*
10877c478bd9Sstevel@tonic-gate  * Determine whether the CPU is offline (this includes spare and faulted).
10887c478bd9Sstevel@tonic-gate  */
10897c478bd9Sstevel@tonic-gate int
10907c478bd9Sstevel@tonic-gate cpu_is_offline(cpu_t *cpu)
10917c478bd9Sstevel@tonic-gate {
10927c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
10937c478bd9Sstevel@tonic-gate 	return (cpu_flagged_offline(cpu->cpu_flags));
10947c478bd9Sstevel@tonic-gate }
10957c478bd9Sstevel@tonic-gate 
10967c478bd9Sstevel@tonic-gate /*
10977c478bd9Sstevel@tonic-gate  * Determine whether the CPU is powered off.
10987c478bd9Sstevel@tonic-gate  */
10997c478bd9Sstevel@tonic-gate int
11007c478bd9Sstevel@tonic-gate cpu_is_poweredoff(cpu_t *cpu)
11017c478bd9Sstevel@tonic-gate {
11027c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
11037c478bd9Sstevel@tonic-gate 	return (cpu_flagged_poweredoff(cpu->cpu_flags));
11047c478bd9Sstevel@tonic-gate }
11057c478bd9Sstevel@tonic-gate 
11067c478bd9Sstevel@tonic-gate /*
11077c478bd9Sstevel@tonic-gate  * Determine whether the CPU is handling interrupts.
11087c478bd9Sstevel@tonic-gate  */
11097c478bd9Sstevel@tonic-gate int
11107c478bd9Sstevel@tonic-gate cpu_is_nointr(cpu_t *cpu)
11117c478bd9Sstevel@tonic-gate {
11127c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
11137c478bd9Sstevel@tonic-gate 	return (cpu_flagged_nointr(cpu->cpu_flags));
11147c478bd9Sstevel@tonic-gate }
11157c478bd9Sstevel@tonic-gate 
11167c478bd9Sstevel@tonic-gate /*
11177c478bd9Sstevel@tonic-gate  * Determine whether the CPU is active (scheduling threads).
11187c478bd9Sstevel@tonic-gate  */
11197c478bd9Sstevel@tonic-gate int
11207c478bd9Sstevel@tonic-gate cpu_is_active(cpu_t *cpu)
11217c478bd9Sstevel@tonic-gate {
11227c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
11237c478bd9Sstevel@tonic-gate 	return (cpu_flagged_active(cpu->cpu_flags));
11247c478bd9Sstevel@tonic-gate }
11257c478bd9Sstevel@tonic-gate 
11267c478bd9Sstevel@tonic-gate /*
11277c478bd9Sstevel@tonic-gate  * Same as above, but these require cpu_flags instead of cpu_t pointers.
11287c478bd9Sstevel@tonic-gate  */
11297c478bd9Sstevel@tonic-gate int
11307c478bd9Sstevel@tonic-gate cpu_flagged_online(cpu_flag_t cpu_flags)
11317c478bd9Sstevel@tonic-gate {
11327c478bd9Sstevel@tonic-gate 	return (cpu_flagged_active(cpu_flags) &&
11337c478bd9Sstevel@tonic-gate 	    (cpu_flags & CPU_ENABLE));
11347c478bd9Sstevel@tonic-gate }
11357c478bd9Sstevel@tonic-gate 
11367c478bd9Sstevel@tonic-gate int
11377c478bd9Sstevel@tonic-gate cpu_flagged_offline(cpu_flag_t cpu_flags)
11387c478bd9Sstevel@tonic-gate {
11397c478bd9Sstevel@tonic-gate 	return (((cpu_flags & CPU_POWEROFF) == 0) &&
11407c478bd9Sstevel@tonic-gate 	    ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY));
11417c478bd9Sstevel@tonic-gate }
11427c478bd9Sstevel@tonic-gate 
11437c478bd9Sstevel@tonic-gate int
11447c478bd9Sstevel@tonic-gate cpu_flagged_poweredoff(cpu_flag_t cpu_flags)
11457c478bd9Sstevel@tonic-gate {
11467c478bd9Sstevel@tonic-gate 	return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF);
11477c478bd9Sstevel@tonic-gate }
11487c478bd9Sstevel@tonic-gate 
11497c478bd9Sstevel@tonic-gate int
11507c478bd9Sstevel@tonic-gate cpu_flagged_nointr(cpu_flag_t cpu_flags)
11517c478bd9Sstevel@tonic-gate {
11527c478bd9Sstevel@tonic-gate 	return (cpu_flagged_active(cpu_flags) &&
11537c478bd9Sstevel@tonic-gate 	    (cpu_flags & CPU_ENABLE) == 0);
11547c478bd9Sstevel@tonic-gate }
11557c478bd9Sstevel@tonic-gate 
11567c478bd9Sstevel@tonic-gate int
11577c478bd9Sstevel@tonic-gate cpu_flagged_active(cpu_flag_t cpu_flags)
11587c478bd9Sstevel@tonic-gate {
11597c478bd9Sstevel@tonic-gate 	return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) &&
11607c478bd9Sstevel@tonic-gate 	    ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY));
11617c478bd9Sstevel@tonic-gate }
11627c478bd9Sstevel@tonic-gate 
11637c478bd9Sstevel@tonic-gate /*
11647c478bd9Sstevel@tonic-gate  * Bring the indicated CPU online.
11657c478bd9Sstevel@tonic-gate  */
11667c478bd9Sstevel@tonic-gate int
11677c478bd9Sstevel@tonic-gate cpu_online(cpu_t *cp)
11687c478bd9Sstevel@tonic-gate {
11697c478bd9Sstevel@tonic-gate 	int	error = 0;
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	/*
11727c478bd9Sstevel@tonic-gate 	 * Handle on-line request.
11737c478bd9Sstevel@tonic-gate 	 *	This code must put the new CPU on the active list before
11747c478bd9Sstevel@tonic-gate 	 *	starting it because it will not be paused, and will start
11757c478bd9Sstevel@tonic-gate 	 * 	using the active list immediately.  The real start occurs
11767c478bd9Sstevel@tonic-gate 	 *	when the CPU_QUIESCED flag is turned off.
11777c478bd9Sstevel@tonic-gate 	 */
11787c478bd9Sstevel@tonic-gate 
11797c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
11807c478bd9Sstevel@tonic-gate 
11817c478bd9Sstevel@tonic-gate 	/*
11827c478bd9Sstevel@tonic-gate 	 * Put all the cpus into a known safe place.
11837c478bd9Sstevel@tonic-gate 	 * No mutexes can be entered while CPUs are paused.
11847c478bd9Sstevel@tonic-gate 	 */
11857c478bd9Sstevel@tonic-gate 	error = mp_cpu_start(cp);	/* arch-dep hook */
11867c478bd9Sstevel@tonic-gate 	if (error == 0) {
11877c478bd9Sstevel@tonic-gate 		pause_cpus(NULL);
11887c478bd9Sstevel@tonic-gate 		cpu_add_active_internal(cp);
11897c478bd9Sstevel@tonic-gate 		if (cp->cpu_flags & CPU_FAULTED) {
11907c478bd9Sstevel@tonic-gate 			cp->cpu_flags &= ~CPU_FAULTED;
11917c478bd9Sstevel@tonic-gate 			mp_cpu_faulted_exit(cp);
11927c478bd9Sstevel@tonic-gate 		}
11937c478bd9Sstevel@tonic-gate 		cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
11947c478bd9Sstevel@tonic-gate 		    CPU_SPARE);
11957c478bd9Sstevel@tonic-gate 		start_cpus();
11967c478bd9Sstevel@tonic-gate 		cpu_stats_kstat_create(cp);
11977c478bd9Sstevel@tonic-gate 		cpu_create_intrstat(cp);
11987c478bd9Sstevel@tonic-gate 		lgrp_kstat_create(cp);
11997c478bd9Sstevel@tonic-gate 		cpu_state_change_notify(cp->cpu_id, CPU_ON);
12007c478bd9Sstevel@tonic-gate 		cpu_intr_enable(cp);	/* arch-dep hook */
12017c478bd9Sstevel@tonic-gate 		cyclic_online(cp);
12027c478bd9Sstevel@tonic-gate 		poke_cpu(cp->cpu_id);
12037c478bd9Sstevel@tonic-gate 	}
12047c478bd9Sstevel@tonic-gate 
12057c478bd9Sstevel@tonic-gate 	return (error);
12067c478bd9Sstevel@tonic-gate }
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate /*
12097c478bd9Sstevel@tonic-gate  * Take the indicated CPU offline.
12107c478bd9Sstevel@tonic-gate  */
12117c478bd9Sstevel@tonic-gate int
12127c478bd9Sstevel@tonic-gate cpu_offline(cpu_t *cp, int flags)
12137c478bd9Sstevel@tonic-gate {
12147c478bd9Sstevel@tonic-gate 	cpupart_t *pp;
12157c478bd9Sstevel@tonic-gate 	int	error = 0;
12167c478bd9Sstevel@tonic-gate 	cpu_t	*ncp;
12177c478bd9Sstevel@tonic-gate 	int	intr_enable;
12187c478bd9Sstevel@tonic-gate 	int	cyclic_off = 0;
12197c478bd9Sstevel@tonic-gate 	int	loop_count;
12207c478bd9Sstevel@tonic-gate 	int	no_quiesce = 0;
12217c478bd9Sstevel@tonic-gate 	int	(*bound_func)(struct cpu *, int);
12227c478bd9Sstevel@tonic-gate 	kthread_t *t;
12237c478bd9Sstevel@tonic-gate 	lpl_t	*cpu_lpl;
12247c478bd9Sstevel@tonic-gate 	proc_t	*p;
12257c478bd9Sstevel@tonic-gate 	int	lgrp_diff_lpl;
12267c478bd9Sstevel@tonic-gate 
12277c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12287c478bd9Sstevel@tonic-gate 
12297c478bd9Sstevel@tonic-gate 	/*
12307c478bd9Sstevel@tonic-gate 	 * If we're going from faulted or spare to offline, just
12317c478bd9Sstevel@tonic-gate 	 * clear these flags and update CPU state.
12327c478bd9Sstevel@tonic-gate 	 */
12337c478bd9Sstevel@tonic-gate 	if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) {
12347c478bd9Sstevel@tonic-gate 		if (cp->cpu_flags & CPU_FAULTED) {
12357c478bd9Sstevel@tonic-gate 			cp->cpu_flags &= ~CPU_FAULTED;
12367c478bd9Sstevel@tonic-gate 			mp_cpu_faulted_exit(cp);
12377c478bd9Sstevel@tonic-gate 		}
12387c478bd9Sstevel@tonic-gate 		cp->cpu_flags &= ~CPU_SPARE;
12397c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
12407c478bd9Sstevel@tonic-gate 		return (0);
12417c478bd9Sstevel@tonic-gate 	}
12427c478bd9Sstevel@tonic-gate 
12437c478bd9Sstevel@tonic-gate 	/*
12447c478bd9Sstevel@tonic-gate 	 * Handle off-line request.
12457c478bd9Sstevel@tonic-gate 	 */
12467c478bd9Sstevel@tonic-gate 	pp = cp->cpu_part;
12477c478bd9Sstevel@tonic-gate 	/*
12487c478bd9Sstevel@tonic-gate 	 * Don't offline last online CPU in partition
12497c478bd9Sstevel@tonic-gate 	 */
12507c478bd9Sstevel@tonic-gate 	if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2)
12517c478bd9Sstevel@tonic-gate 		return (EBUSY);
12527c478bd9Sstevel@tonic-gate 	/*
12537c478bd9Sstevel@tonic-gate 	 * Unbind all thread bound to our CPU if we were asked to.
12547c478bd9Sstevel@tonic-gate 	 */
12557c478bd9Sstevel@tonic-gate 	if (flags & CPU_FORCED && (error = cpu_unbind(cp->cpu_id)) != 0)
12567c478bd9Sstevel@tonic-gate 		return (error);
12577c478bd9Sstevel@tonic-gate 	/*
12587c478bd9Sstevel@tonic-gate 	 * We shouldn't be bound to this CPU ourselves.
12597c478bd9Sstevel@tonic-gate 	 */
12607c478bd9Sstevel@tonic-gate 	if (curthread->t_bound_cpu == cp)
12617c478bd9Sstevel@tonic-gate 		return (EBUSY);
12627c478bd9Sstevel@tonic-gate 
12637c478bd9Sstevel@tonic-gate 	/*
12647c478bd9Sstevel@tonic-gate 	 * Tell interested parties that this CPU is going offline.
12657c478bd9Sstevel@tonic-gate 	 */
12667c478bd9Sstevel@tonic-gate 	cpu_state_change_notify(cp->cpu_id, CPU_OFF);
12677c478bd9Sstevel@tonic-gate 
12687c478bd9Sstevel@tonic-gate 	/*
12697c478bd9Sstevel@tonic-gate 	 * Take the CPU out of interrupt participation so we won't find
12707c478bd9Sstevel@tonic-gate 	 * bound kernel threads.  If the architecture cannot completely
12717c478bd9Sstevel@tonic-gate 	 * shut off interrupts on the CPU, don't quiesce it, but don't
12727c478bd9Sstevel@tonic-gate 	 * run anything but interrupt thread... this is indicated by
12737c478bd9Sstevel@tonic-gate 	 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being
12747c478bd9Sstevel@tonic-gate 	 * off.
12757c478bd9Sstevel@tonic-gate 	 */
12767c478bd9Sstevel@tonic-gate 	intr_enable = cp->cpu_flags & CPU_ENABLE;
12777c478bd9Sstevel@tonic-gate 	if (intr_enable)
12787c478bd9Sstevel@tonic-gate 		no_quiesce = cpu_intr_disable(cp);
12797c478bd9Sstevel@tonic-gate 
12807c478bd9Sstevel@tonic-gate 	/*
12817c478bd9Sstevel@tonic-gate 	 * Record that we are aiming to offline this cpu.  This acts as
12827c478bd9Sstevel@tonic-gate 	 * a barrier to further weak binding requests in thread_nomigrate
12837c478bd9Sstevel@tonic-gate 	 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to
12847c478bd9Sstevel@tonic-gate 	 * lean away from this cpu.  Further strong bindings are already
12857c478bd9Sstevel@tonic-gate 	 * avoided since we hold cpu_lock.  Since threads that are set
12867c478bd9Sstevel@tonic-gate 	 * runnable around now and others coming off the target cpu are
12877c478bd9Sstevel@tonic-gate 	 * directed away from the target, existing strong and weak bindings
12887c478bd9Sstevel@tonic-gate 	 * (especially the latter) to the target cpu stand maximum chance of
12897c478bd9Sstevel@tonic-gate 	 * being able to unbind during the short delay loop below (if other
12907c478bd9Sstevel@tonic-gate 	 * unbound threads compete they may not see cpu in time to unbind
12917c478bd9Sstevel@tonic-gate 	 * even if they would do so immediately.
12927c478bd9Sstevel@tonic-gate 	 */
12937c478bd9Sstevel@tonic-gate 	cpu_inmotion = cp;
12947c478bd9Sstevel@tonic-gate 	membar_enter();
12957c478bd9Sstevel@tonic-gate 
12967c478bd9Sstevel@tonic-gate 	/*
12977c478bd9Sstevel@tonic-gate 	 * Check for kernel threads (strong or weak) bound to that CPU.
12987c478bd9Sstevel@tonic-gate 	 * Strongly bound threads may not unbind, and we'll have to return
12997c478bd9Sstevel@tonic-gate 	 * EBUSY.  Weakly bound threads should always disappear - we've
13007c478bd9Sstevel@tonic-gate 	 * stopped more weak binding with cpu_inmotion and existing
13017c478bd9Sstevel@tonic-gate 	 * bindings will drain imminently (they may not block).  Nonetheless
13027c478bd9Sstevel@tonic-gate 	 * we will wait for a fixed period for all bound threads to disappear.
13037c478bd9Sstevel@tonic-gate 	 * Inactive interrupt threads are OK (they'll be in TS_FREE
13047c478bd9Sstevel@tonic-gate 	 * state).  If test finds some bound threads, wait a few ticks
13057c478bd9Sstevel@tonic-gate 	 * to give short-lived threads (such as interrupts) chance to
13067c478bd9Sstevel@tonic-gate 	 * complete.  Note that if no_quiesce is set, i.e. this cpu
13077c478bd9Sstevel@tonic-gate 	 * is required to service interrupts, then we take the route
13087c478bd9Sstevel@tonic-gate 	 * that permits interrupt threads to be active (or bypassed).
13097c478bd9Sstevel@tonic-gate 	 */
13107c478bd9Sstevel@tonic-gate 	bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads;
13117c478bd9Sstevel@tonic-gate 
13127c478bd9Sstevel@tonic-gate again:	for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
13137c478bd9Sstevel@tonic-gate 		if (loop_count >= 5) {
13147c478bd9Sstevel@tonic-gate 			error = EBUSY;	/* some threads still bound */
13157c478bd9Sstevel@tonic-gate 			break;
13167c478bd9Sstevel@tonic-gate 		}
13177c478bd9Sstevel@tonic-gate 
13187c478bd9Sstevel@tonic-gate 		/*
13197c478bd9Sstevel@tonic-gate 		 * If some threads were assigned, give them
13207c478bd9Sstevel@tonic-gate 		 * a chance to complete or move.
13217c478bd9Sstevel@tonic-gate 		 *
13227c478bd9Sstevel@tonic-gate 		 * This assumes that the clock_thread is not bound
13237c478bd9Sstevel@tonic-gate 		 * to any CPU, because the clock_thread is needed to
13247c478bd9Sstevel@tonic-gate 		 * do the delay(hz/100).
13257c478bd9Sstevel@tonic-gate 		 *
13267c478bd9Sstevel@tonic-gate 		 * Note: we still hold the cpu_lock while waiting for
13277c478bd9Sstevel@tonic-gate 		 * the next clock tick.  This is OK since it isn't
13287c478bd9Sstevel@tonic-gate 		 * needed for anything else except processor_bind(2),
13297c478bd9Sstevel@tonic-gate 		 * and system initialization.  If we drop the lock,
13307c478bd9Sstevel@tonic-gate 		 * we would risk another p_online disabling the last
13317c478bd9Sstevel@tonic-gate 		 * processor.
13327c478bd9Sstevel@tonic-gate 		 */
13337c478bd9Sstevel@tonic-gate 		delay(hz/100);
13347c478bd9Sstevel@tonic-gate 	}
13357c478bd9Sstevel@tonic-gate 
13367c478bd9Sstevel@tonic-gate 	if (error == 0 && cyclic_off == 0) {
13377c478bd9Sstevel@tonic-gate 		if (!cyclic_offline(cp)) {
13387c478bd9Sstevel@tonic-gate 			/*
13397c478bd9Sstevel@tonic-gate 			 * We must have bound cyclics...
13407c478bd9Sstevel@tonic-gate 			 */
13417c478bd9Sstevel@tonic-gate 			error = EBUSY;
13427c478bd9Sstevel@tonic-gate 			goto out;
13437c478bd9Sstevel@tonic-gate 		}
13447c478bd9Sstevel@tonic-gate 		cyclic_off = 1;
13457c478bd9Sstevel@tonic-gate 	}
13467c478bd9Sstevel@tonic-gate 
13477c478bd9Sstevel@tonic-gate 	/*
13487c478bd9Sstevel@tonic-gate 	 * Call mp_cpu_stop() to perform any special operations
13497c478bd9Sstevel@tonic-gate 	 * needed for this machine architecture to offline a CPU.
13507c478bd9Sstevel@tonic-gate 	 */
13517c478bd9Sstevel@tonic-gate 	if (error == 0)
13527c478bd9Sstevel@tonic-gate 		error = mp_cpu_stop(cp);	/* arch-dep hook */
13537c478bd9Sstevel@tonic-gate 
13547c478bd9Sstevel@tonic-gate 	/*
13557c478bd9Sstevel@tonic-gate 	 * If that all worked, take the CPU offline and decrement
13567c478bd9Sstevel@tonic-gate 	 * ncpus_online.
13577c478bd9Sstevel@tonic-gate 	 */
13587c478bd9Sstevel@tonic-gate 	if (error == 0) {
13597c478bd9Sstevel@tonic-gate 		/*
13607c478bd9Sstevel@tonic-gate 		 * Put all the cpus into a known safe place.
13617c478bd9Sstevel@tonic-gate 		 * No mutexes can be entered while CPUs are paused.
13627c478bd9Sstevel@tonic-gate 		 */
13637c478bd9Sstevel@tonic-gate 		pause_cpus(cp);
13647c478bd9Sstevel@tonic-gate 		/*
13657c478bd9Sstevel@tonic-gate 		 * Repeat the operation, if necessary, to make sure that
13667c478bd9Sstevel@tonic-gate 		 * all outstanding low-level interrupts run to completion
13677c478bd9Sstevel@tonic-gate 		 * before we set the CPU_QUIESCED flag.  It's also possible
13687c478bd9Sstevel@tonic-gate 		 * that a thread has weak bound to the cpu despite our raising
13697c478bd9Sstevel@tonic-gate 		 * cpu_inmotion above since it may have loaded that
13707c478bd9Sstevel@tonic-gate 		 * value before the barrier became visible (this would have
13717c478bd9Sstevel@tonic-gate 		 * to be the thread that was on the target cpu at the time
13727c478bd9Sstevel@tonic-gate 		 * we raised the barrier).
13737c478bd9Sstevel@tonic-gate 		 */
13747c478bd9Sstevel@tonic-gate 		if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
13757c478bd9Sstevel@tonic-gate 		    (*bound_func)(cp, 1)) {
13767c478bd9Sstevel@tonic-gate 			start_cpus();
13777c478bd9Sstevel@tonic-gate 			(void) mp_cpu_start(cp);
13787c478bd9Sstevel@tonic-gate 			goto again;
13797c478bd9Sstevel@tonic-gate 		}
13807c478bd9Sstevel@tonic-gate 		ncp = cp->cpu_next_part;
13817c478bd9Sstevel@tonic-gate 		cpu_lpl = cp->cpu_lpl;
13827c478bd9Sstevel@tonic-gate 		ASSERT(cpu_lpl != NULL);
13837c478bd9Sstevel@tonic-gate 
13847c478bd9Sstevel@tonic-gate 		/*
13857c478bd9Sstevel@tonic-gate 		 * Remove the CPU from the list of active CPUs.
13867c478bd9Sstevel@tonic-gate 		 */
13877c478bd9Sstevel@tonic-gate 		cpu_remove_active(cp);
13887c478bd9Sstevel@tonic-gate 
13897c478bd9Sstevel@tonic-gate 		/*
13907c478bd9Sstevel@tonic-gate 		 * Walk the active process list and look for threads
13917c478bd9Sstevel@tonic-gate 		 * whose home lgroup needs to be updated, or
13927c478bd9Sstevel@tonic-gate 		 * the last CPU they run on is the one being offlined now.
13937c478bd9Sstevel@tonic-gate 		 */
13947c478bd9Sstevel@tonic-gate 
13957c478bd9Sstevel@tonic-gate 		ASSERT(curthread->t_cpu != cp);
13967c478bd9Sstevel@tonic-gate 		for (p = practive; p != NULL; p = p->p_next) {
13977c478bd9Sstevel@tonic-gate 
13987c478bd9Sstevel@tonic-gate 			t = p->p_tlist;
13997c478bd9Sstevel@tonic-gate 
14007c478bd9Sstevel@tonic-gate 			if (t == NULL)
14017c478bd9Sstevel@tonic-gate 				continue;
14027c478bd9Sstevel@tonic-gate 
14037c478bd9Sstevel@tonic-gate 			lgrp_diff_lpl = 0;
14047c478bd9Sstevel@tonic-gate 
14057c478bd9Sstevel@tonic-gate 			do {
14067c478bd9Sstevel@tonic-gate 				ASSERT(t->t_lpl != NULL);
14077c478bd9Sstevel@tonic-gate 				/*
14087c478bd9Sstevel@tonic-gate 				 * Taking last CPU in lpl offline
14097c478bd9Sstevel@tonic-gate 				 * Rehome thread if it is in this lpl
14107c478bd9Sstevel@tonic-gate 				 * Otherwise, update the count of how many
14117c478bd9Sstevel@tonic-gate 				 * threads are in this CPU's lgroup but have
14127c478bd9Sstevel@tonic-gate 				 * a different lpl.
14137c478bd9Sstevel@tonic-gate 				 */
14147c478bd9Sstevel@tonic-gate 
14157c478bd9Sstevel@tonic-gate 				if (cpu_lpl->lpl_ncpu == 0) {
14167c478bd9Sstevel@tonic-gate 					if (t->t_lpl == cpu_lpl)
14177c478bd9Sstevel@tonic-gate 						lgrp_move_thread(t,
14187c478bd9Sstevel@tonic-gate 						    lgrp_choose(t,
14197c478bd9Sstevel@tonic-gate 						    t->t_cpupart), 0);
14207c478bd9Sstevel@tonic-gate 					else if (t->t_lpl->lpl_lgrpid ==
14217c478bd9Sstevel@tonic-gate 					    cpu_lpl->lpl_lgrpid)
14227c478bd9Sstevel@tonic-gate 						lgrp_diff_lpl++;
14237c478bd9Sstevel@tonic-gate 				}
14247c478bd9Sstevel@tonic-gate 				ASSERT(t->t_lpl->lpl_ncpu > 0);
14257c478bd9Sstevel@tonic-gate 
14267c478bd9Sstevel@tonic-gate 				/*
14277c478bd9Sstevel@tonic-gate 				 * Update CPU last ran on if it was this CPU
14287c478bd9Sstevel@tonic-gate 				 */
14297c478bd9Sstevel@tonic-gate 				if (t->t_cpu == cp && t->t_bound_cpu != cp)
14307c478bd9Sstevel@tonic-gate 				    t->t_cpu = disp_lowpri_cpu(ncp, t->t_lpl,
14317c478bd9Sstevel@tonic-gate 					t->t_pri, NULL);
14327c478bd9Sstevel@tonic-gate 				ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
14337c478bd9Sstevel@tonic-gate 				    t->t_weakbound_cpu == cp);
14347c478bd9Sstevel@tonic-gate 
14357c478bd9Sstevel@tonic-gate 				t = t->t_forw;
14367c478bd9Sstevel@tonic-gate 			} while (t != p->p_tlist);
14377c478bd9Sstevel@tonic-gate 
14387c478bd9Sstevel@tonic-gate 			/*
14397c478bd9Sstevel@tonic-gate 			 * Didn't find any threads in the same lgroup as this
14407c478bd9Sstevel@tonic-gate 			 * CPU with a different lpl, so remove the lgroup from
14417c478bd9Sstevel@tonic-gate 			 * the process lgroup bitmask.
14427c478bd9Sstevel@tonic-gate 			 */
14437c478bd9Sstevel@tonic-gate 
14447c478bd9Sstevel@tonic-gate 			if (lgrp_diff_lpl == 0)
14457c478bd9Sstevel@tonic-gate 				klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
14467c478bd9Sstevel@tonic-gate 		}
14477c478bd9Sstevel@tonic-gate 
14487c478bd9Sstevel@tonic-gate 		/*
14497c478bd9Sstevel@tonic-gate 		 * Walk thread list looking for threads that need to be
14507c478bd9Sstevel@tonic-gate 		 * rehomed, since there are some threads that are not in
14517c478bd9Sstevel@tonic-gate 		 * their process's p_tlist.
14527c478bd9Sstevel@tonic-gate 		 */
14537c478bd9Sstevel@tonic-gate 
14547c478bd9Sstevel@tonic-gate 		t = curthread;
14557c478bd9Sstevel@tonic-gate 		do {
14567c478bd9Sstevel@tonic-gate 			ASSERT(t != NULL && t->t_lpl != NULL);
14577c478bd9Sstevel@tonic-gate 
14587c478bd9Sstevel@tonic-gate 			/*
14597c478bd9Sstevel@tonic-gate 			 * Rehome threads with same lpl as this CPU when this
14607c478bd9Sstevel@tonic-gate 			 * is the last CPU in the lpl.
14617c478bd9Sstevel@tonic-gate 			 */
14627c478bd9Sstevel@tonic-gate 
14637c478bd9Sstevel@tonic-gate 			if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
14647c478bd9Sstevel@tonic-gate 				lgrp_move_thread(t,
14657c478bd9Sstevel@tonic-gate 				    lgrp_choose(t, t->t_cpupart), 1);
14667c478bd9Sstevel@tonic-gate 
14677c478bd9Sstevel@tonic-gate 			ASSERT(t->t_lpl->lpl_ncpu > 0);
14687c478bd9Sstevel@tonic-gate 
14697c478bd9Sstevel@tonic-gate 			/*
14707c478bd9Sstevel@tonic-gate 			 * Update CPU last ran on if it was this CPU
14717c478bd9Sstevel@tonic-gate 			 */
14727c478bd9Sstevel@tonic-gate 
14737c478bd9Sstevel@tonic-gate 			if (t->t_cpu == cp && t->t_bound_cpu != cp) {
14747c478bd9Sstevel@tonic-gate 				t->t_cpu = disp_lowpri_cpu(ncp,
14757c478bd9Sstevel@tonic-gate 				    t->t_lpl, t->t_pri, NULL);
14767c478bd9Sstevel@tonic-gate 			}
14777c478bd9Sstevel@tonic-gate 			ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
14787c478bd9Sstevel@tonic-gate 			    t->t_weakbound_cpu == cp);
14797c478bd9Sstevel@tonic-gate 			t = t->t_next;
14807c478bd9Sstevel@tonic-gate 
14817c478bd9Sstevel@tonic-gate 		} while (t != curthread);
14827c478bd9Sstevel@tonic-gate 		ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
14837c478bd9Sstevel@tonic-gate 		cp->cpu_flags |= CPU_OFFLINE;
14847c478bd9Sstevel@tonic-gate 		disp_cpu_inactive(cp);
14857c478bd9Sstevel@tonic-gate 		if (!no_quiesce)
14867c478bd9Sstevel@tonic-gate 			cp->cpu_flags |= CPU_QUIESCED;
14877c478bd9Sstevel@tonic-gate 		ncpus_online--;
14887c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
14897c478bd9Sstevel@tonic-gate 		cpu_inmotion = NULL;
14907c478bd9Sstevel@tonic-gate 		start_cpus();
14917c478bd9Sstevel@tonic-gate 		cpu_stats_kstat_destroy(cp);
14927c478bd9Sstevel@tonic-gate 		cpu_delete_intrstat(cp);
14937c478bd9Sstevel@tonic-gate 		lgrp_kstat_destroy(cp);
14947c478bd9Sstevel@tonic-gate 	}
14957c478bd9Sstevel@tonic-gate 
14967c478bd9Sstevel@tonic-gate out:
14977c478bd9Sstevel@tonic-gate 	cpu_inmotion = NULL;
14987c478bd9Sstevel@tonic-gate 
14997c478bd9Sstevel@tonic-gate 	/*
15007c478bd9Sstevel@tonic-gate 	 * If we failed, re-enable interrupts.
15017c478bd9Sstevel@tonic-gate 	 * Do this even if cpu_intr_disable returned an error, because
15027c478bd9Sstevel@tonic-gate 	 * it may have partially disabled interrupts.
15037c478bd9Sstevel@tonic-gate 	 */
15047c478bd9Sstevel@tonic-gate 	if (error && intr_enable)
15057c478bd9Sstevel@tonic-gate 		cpu_intr_enable(cp);
15067c478bd9Sstevel@tonic-gate 
15077c478bd9Sstevel@tonic-gate 	/*
15087c478bd9Sstevel@tonic-gate 	 * If we failed, but managed to offline the cyclic subsystem on this
15097c478bd9Sstevel@tonic-gate 	 * CPU, bring it back online.
15107c478bd9Sstevel@tonic-gate 	 */
15117c478bd9Sstevel@tonic-gate 	if (error && cyclic_off)
15127c478bd9Sstevel@tonic-gate 		cyclic_online(cp);
15137c478bd9Sstevel@tonic-gate 
15147c478bd9Sstevel@tonic-gate 	/*
15157c478bd9Sstevel@tonic-gate 	 * If we failed, we need to notify everyone that this CPU is back on.
15167c478bd9Sstevel@tonic-gate 	 */
15177c478bd9Sstevel@tonic-gate 	if (error != 0)
15187c478bd9Sstevel@tonic-gate 		cpu_state_change_notify(cp->cpu_id, CPU_ON);
15197c478bd9Sstevel@tonic-gate 
15207c478bd9Sstevel@tonic-gate 	return (error);
15217c478bd9Sstevel@tonic-gate }
15227c478bd9Sstevel@tonic-gate 
15237c478bd9Sstevel@tonic-gate /*
15247c478bd9Sstevel@tonic-gate  * Mark the indicated CPU as faulted, taking it offline.
15257c478bd9Sstevel@tonic-gate  */
15267c478bd9Sstevel@tonic-gate int
15277c478bd9Sstevel@tonic-gate cpu_faulted(cpu_t *cp, int flags)
15287c478bd9Sstevel@tonic-gate {
15297c478bd9Sstevel@tonic-gate 	int	error = 0;
15307c478bd9Sstevel@tonic-gate 
15317c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
15327c478bd9Sstevel@tonic-gate 	ASSERT(!cpu_is_poweredoff(cp));
15337c478bd9Sstevel@tonic-gate 
15347c478bd9Sstevel@tonic-gate 	if (cpu_is_offline(cp)) {
15357c478bd9Sstevel@tonic-gate 		cp->cpu_flags &= ~CPU_SPARE;
15367c478bd9Sstevel@tonic-gate 		cp->cpu_flags |= CPU_FAULTED;
15377c478bd9Sstevel@tonic-gate 		mp_cpu_faulted_enter(cp);
15387c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
15397c478bd9Sstevel@tonic-gate 		return (0);
15407c478bd9Sstevel@tonic-gate 	}
15417c478bd9Sstevel@tonic-gate 
15427c478bd9Sstevel@tonic-gate 	if ((error = cpu_offline(cp, flags)) == 0) {
15437c478bd9Sstevel@tonic-gate 		cp->cpu_flags |= CPU_FAULTED;
15447c478bd9Sstevel@tonic-gate 		mp_cpu_faulted_enter(cp);
15457c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
15467c478bd9Sstevel@tonic-gate 	}
15477c478bd9Sstevel@tonic-gate 
15487c478bd9Sstevel@tonic-gate 	return (error);
15497c478bd9Sstevel@tonic-gate }
15507c478bd9Sstevel@tonic-gate 
15517c478bd9Sstevel@tonic-gate /*
15527c478bd9Sstevel@tonic-gate  * Mark the indicated CPU as a spare, taking it offline.
15537c478bd9Sstevel@tonic-gate  */
15547c478bd9Sstevel@tonic-gate int
15557c478bd9Sstevel@tonic-gate cpu_spare(cpu_t *cp, int flags)
15567c478bd9Sstevel@tonic-gate {
15577c478bd9Sstevel@tonic-gate 	int	error = 0;
15587c478bd9Sstevel@tonic-gate 
15597c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
15607c478bd9Sstevel@tonic-gate 	ASSERT(!cpu_is_poweredoff(cp));
15617c478bd9Sstevel@tonic-gate 
15627c478bd9Sstevel@tonic-gate 	if (cpu_is_offline(cp)) {
15637c478bd9Sstevel@tonic-gate 		if (cp->cpu_flags & CPU_FAULTED) {
15647c478bd9Sstevel@tonic-gate 			cp->cpu_flags &= ~CPU_FAULTED;
15657c478bd9Sstevel@tonic-gate 			mp_cpu_faulted_exit(cp);
15667c478bd9Sstevel@tonic-gate 		}
15677c478bd9Sstevel@tonic-gate 		cp->cpu_flags |= CPU_SPARE;
15687c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
15697c478bd9Sstevel@tonic-gate 		return (0);
15707c478bd9Sstevel@tonic-gate 	}
15717c478bd9Sstevel@tonic-gate 
15727c478bd9Sstevel@tonic-gate 	if ((error = cpu_offline(cp, flags)) == 0) {
15737c478bd9Sstevel@tonic-gate 		cp->cpu_flags |= CPU_SPARE;
15747c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
15757c478bd9Sstevel@tonic-gate 	}
15767c478bd9Sstevel@tonic-gate 
15777c478bd9Sstevel@tonic-gate 	return (error);
15787c478bd9Sstevel@tonic-gate }
15797c478bd9Sstevel@tonic-gate 
15807c478bd9Sstevel@tonic-gate /*
15817c478bd9Sstevel@tonic-gate  * Take the indicated CPU from poweroff to offline.
15827c478bd9Sstevel@tonic-gate  */
15837c478bd9Sstevel@tonic-gate int
15847c478bd9Sstevel@tonic-gate cpu_poweron(cpu_t *cp)
15857c478bd9Sstevel@tonic-gate {
15867c478bd9Sstevel@tonic-gate 	int	error = ENOTSUP;
15877c478bd9Sstevel@tonic-gate 
15887c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
15897c478bd9Sstevel@tonic-gate 	ASSERT(cpu_is_poweredoff(cp));
15907c478bd9Sstevel@tonic-gate 
15917c478bd9Sstevel@tonic-gate 	error = mp_cpu_poweron(cp);	/* arch-dep hook */
15927c478bd9Sstevel@tonic-gate 	if (error == 0)
15937c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
15947c478bd9Sstevel@tonic-gate 
15957c478bd9Sstevel@tonic-gate 	return (error);
15967c478bd9Sstevel@tonic-gate }
15977c478bd9Sstevel@tonic-gate 
15987c478bd9Sstevel@tonic-gate /*
15997c478bd9Sstevel@tonic-gate  * Take the indicated CPU from any inactive state to powered off.
16007c478bd9Sstevel@tonic-gate  */
16017c478bd9Sstevel@tonic-gate int
16027c478bd9Sstevel@tonic-gate cpu_poweroff(cpu_t *cp)
16037c478bd9Sstevel@tonic-gate {
16047c478bd9Sstevel@tonic-gate 	int	error = ENOTSUP;
16057c478bd9Sstevel@tonic-gate 
16067c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
16077c478bd9Sstevel@tonic-gate 	ASSERT(cpu_is_offline(cp));
16087c478bd9Sstevel@tonic-gate 
16097c478bd9Sstevel@tonic-gate 	if (!(cp->cpu_flags & CPU_QUIESCED))
16107c478bd9Sstevel@tonic-gate 		return (EBUSY);		/* not completely idle */
16117c478bd9Sstevel@tonic-gate 
16127c478bd9Sstevel@tonic-gate 	error = mp_cpu_poweroff(cp);	/* arch-dep hook */
16137c478bd9Sstevel@tonic-gate 	if (error == 0)
16147c478bd9Sstevel@tonic-gate 		cpu_set_state(cp);
16157c478bd9Sstevel@tonic-gate 
16167c478bd9Sstevel@tonic-gate 	return (error);
16177c478bd9Sstevel@tonic-gate }
16187c478bd9Sstevel@tonic-gate 
16197c478bd9Sstevel@tonic-gate /*
16207c478bd9Sstevel@tonic-gate  * Initialize the CPU lists for the first CPU.
16217c478bd9Sstevel@tonic-gate  */
16227c478bd9Sstevel@tonic-gate void
16237c478bd9Sstevel@tonic-gate cpu_list_init(cpu_t *cp)
16247c478bd9Sstevel@tonic-gate {
16257c478bd9Sstevel@tonic-gate 	cp->cpu_next = cp;
16267c478bd9Sstevel@tonic-gate 	cp->cpu_prev = cp;
16277c478bd9Sstevel@tonic-gate 	cpu_list = cp;
16287c478bd9Sstevel@tonic-gate 
16297c478bd9Sstevel@tonic-gate 	cp->cpu_next_onln = cp;
16307c478bd9Sstevel@tonic-gate 	cp->cpu_prev_onln = cp;
16317c478bd9Sstevel@tonic-gate 	cpu_active = cp;
16327c478bd9Sstevel@tonic-gate 
16337c478bd9Sstevel@tonic-gate 	cp->cpu_seqid = 0;
16347c478bd9Sstevel@tonic-gate 	CPUSET_ADD(cpu_seqid_inuse, 0);
16357c478bd9Sstevel@tonic-gate 	cp->cpu_cache_offset = KMEM_CACHE_SIZE(cp->cpu_seqid);
16367c478bd9Sstevel@tonic-gate 	cp_default.cp_cpulist = cp;
16377c478bd9Sstevel@tonic-gate 	cp_default.cp_ncpus = 1;
16387c478bd9Sstevel@tonic-gate 	cp->cpu_next_part = cp;
16397c478bd9Sstevel@tonic-gate 	cp->cpu_prev_part = cp;
16407c478bd9Sstevel@tonic-gate 	cp->cpu_part = &cp_default;
16417c478bd9Sstevel@tonic-gate 
16427c478bd9Sstevel@tonic-gate 	CPUSET_ADD(cpu_available, cp->cpu_id);
16437c478bd9Sstevel@tonic-gate }
16447c478bd9Sstevel@tonic-gate 
16457c478bd9Sstevel@tonic-gate /*
16467c478bd9Sstevel@tonic-gate  * Insert a CPU into the list of available CPUs.
16477c478bd9Sstevel@tonic-gate  */
16487c478bd9Sstevel@tonic-gate void
16497c478bd9Sstevel@tonic-gate cpu_add_unit(cpu_t *cp)
16507c478bd9Sstevel@tonic-gate {
16517c478bd9Sstevel@tonic-gate 	int seqid;
16527c478bd9Sstevel@tonic-gate 
16537c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
16547c478bd9Sstevel@tonic-gate 	ASSERT(cpu_list != NULL);	/* list started in cpu_list_init */
16557c478bd9Sstevel@tonic-gate 
16567c478bd9Sstevel@tonic-gate 	lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0);
16577c478bd9Sstevel@tonic-gate 
16587c478bd9Sstevel@tonic-gate 	/*
16597c478bd9Sstevel@tonic-gate 	 * Note: most users of the cpu_list will grab the
16607c478bd9Sstevel@tonic-gate 	 * cpu_lock to insure that it isn't modified.  However,
16617c478bd9Sstevel@tonic-gate 	 * certain users can't or won't do that.  To allow this
16627c478bd9Sstevel@tonic-gate 	 * we pause the other cpus.  Users who walk the list
16637c478bd9Sstevel@tonic-gate 	 * without cpu_lock, must disable kernel preemption
16647c478bd9Sstevel@tonic-gate 	 * to insure that the list isn't modified underneath
16657c478bd9Sstevel@tonic-gate 	 * them.  Also, any cached pointers to cpu structures
16667c478bd9Sstevel@tonic-gate 	 * must be revalidated by checking to see if the
16677c478bd9Sstevel@tonic-gate 	 * cpu_next pointer points to itself.  This check must
16687c478bd9Sstevel@tonic-gate 	 * be done with the cpu_lock held or kernel preemption
16697c478bd9Sstevel@tonic-gate 	 * disabled.  This check relies upon the fact that
16707c478bd9Sstevel@tonic-gate 	 * old cpu structures are not free'ed or cleared after
16717c478bd9Sstevel@tonic-gate 	 * then are removed from the cpu_list.
16727c478bd9Sstevel@tonic-gate 	 *
16737c478bd9Sstevel@tonic-gate 	 * Note that the clock code walks the cpu list dereferencing
16747c478bd9Sstevel@tonic-gate 	 * the cpu_part pointer, so we need to initialize it before
16757c478bd9Sstevel@tonic-gate 	 * adding the cpu to the list.
16767c478bd9Sstevel@tonic-gate 	 */
16777c478bd9Sstevel@tonic-gate 	cp->cpu_part = &cp_default;
16787c478bd9Sstevel@tonic-gate 	(void) pause_cpus(NULL);
16797c478bd9Sstevel@tonic-gate 	cp->cpu_next = cpu_list;
16807c478bd9Sstevel@tonic-gate 	cp->cpu_prev = cpu_list->cpu_prev;
16817c478bd9Sstevel@tonic-gate 	cpu_list->cpu_prev->cpu_next = cp;
16827c478bd9Sstevel@tonic-gate 	cpu_list->cpu_prev = cp;
16837c478bd9Sstevel@tonic-gate 	start_cpus();
16847c478bd9Sstevel@tonic-gate 
16857c478bd9Sstevel@tonic-gate 	for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
16867c478bd9Sstevel@tonic-gate 		continue;
16877c478bd9Sstevel@tonic-gate 	CPUSET_ADD(cpu_seqid_inuse, seqid);
16887c478bd9Sstevel@tonic-gate 	cp->cpu_seqid = seqid;
16897c478bd9Sstevel@tonic-gate 	ASSERT(ncpus < max_ncpus);
16907c478bd9Sstevel@tonic-gate 	ncpus++;
16917c478bd9Sstevel@tonic-gate 	cp->cpu_cache_offset = KMEM_CACHE_SIZE(cp->cpu_seqid);
16927c478bd9Sstevel@tonic-gate 	cpu[cp->cpu_id] = cp;
16937c478bd9Sstevel@tonic-gate 	CPUSET_ADD(cpu_available, cp->cpu_id);
16947c478bd9Sstevel@tonic-gate 
16957c478bd9Sstevel@tonic-gate 	/*
16967c478bd9Sstevel@tonic-gate 	 * allocate a pause thread for this CPU.
16977c478bd9Sstevel@tonic-gate 	 */
16987c478bd9Sstevel@tonic-gate 	cpu_pause_alloc(cp);
16997c478bd9Sstevel@tonic-gate 
17007c478bd9Sstevel@tonic-gate 	/*
17017c478bd9Sstevel@tonic-gate 	 * So that new CPUs won't have NULL prev_onln and next_onln pointers,
17027c478bd9Sstevel@tonic-gate 	 * link them into a list of just that CPU.
17037c478bd9Sstevel@tonic-gate 	 * This is so that disp_lowpri_cpu will work for thread_create in
17047c478bd9Sstevel@tonic-gate 	 * pause_cpus() when called from the startup thread in a new CPU.
17057c478bd9Sstevel@tonic-gate 	 */
17067c478bd9Sstevel@tonic-gate 	cp->cpu_next_onln = cp;
17077c478bd9Sstevel@tonic-gate 	cp->cpu_prev_onln = cp;
17087c478bd9Sstevel@tonic-gate 	cpu_info_kstat_create(cp);
17097c478bd9Sstevel@tonic-gate 	cp->cpu_next_part = cp;
17107c478bd9Sstevel@tonic-gate 	cp->cpu_prev_part = cp;
17117c478bd9Sstevel@tonic-gate 
17127c478bd9Sstevel@tonic-gate 	init_cpu_mstate(cp, CMS_SYSTEM);
17137c478bd9Sstevel@tonic-gate 
17147c478bd9Sstevel@tonic-gate 	pool_pset_mod = gethrtime();
17157c478bd9Sstevel@tonic-gate }
17167c478bd9Sstevel@tonic-gate 
17177c478bd9Sstevel@tonic-gate /*
17187c478bd9Sstevel@tonic-gate  * Do the opposite of cpu_add_unit().
17197c478bd9Sstevel@tonic-gate  */
17207c478bd9Sstevel@tonic-gate void
17217c478bd9Sstevel@tonic-gate cpu_del_unit(int cpuid)
17227c478bd9Sstevel@tonic-gate {
17237c478bd9Sstevel@tonic-gate 	struct cpu	*cp, *cpnext;
17247c478bd9Sstevel@tonic-gate 
17257c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
17267c478bd9Sstevel@tonic-gate 	cp = cpu[cpuid];
17277c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);
17287c478bd9Sstevel@tonic-gate 
17297c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_next_onln == cp);
17307c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_prev_onln == cp);
17317c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_next_part == cp);
17327c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_prev_part == cp);
17337c478bd9Sstevel@tonic-gate 
17347c478bd9Sstevel@tonic-gate 	chip_cpu_fini(cp);
17357c478bd9Sstevel@tonic-gate 
17367c478bd9Sstevel@tonic-gate 	/*
17377c478bd9Sstevel@tonic-gate 	 * Destroy kstat stuff.
17387c478bd9Sstevel@tonic-gate 	 */
17397c478bd9Sstevel@tonic-gate 	cpu_info_kstat_destroy(cp);
17407c478bd9Sstevel@tonic-gate 	term_cpu_mstate(cp);
17417c478bd9Sstevel@tonic-gate 	/*
17427c478bd9Sstevel@tonic-gate 	 * Free up pause thread.
17437c478bd9Sstevel@tonic-gate 	 */
17447c478bd9Sstevel@tonic-gate 	cpu_pause_free(cp);
17457c478bd9Sstevel@tonic-gate 	CPUSET_DEL(cpu_available, cp->cpu_id);
17467c478bd9Sstevel@tonic-gate 	cpu[cp->cpu_id] = NULL;
17477c478bd9Sstevel@tonic-gate 	/*
17487c478bd9Sstevel@tonic-gate 	 * The clock thread and mutex_vector_enter cannot hold the
17497c478bd9Sstevel@tonic-gate 	 * cpu_lock while traversing the cpu list, therefore we pause
17507c478bd9Sstevel@tonic-gate 	 * all other threads by pausing the other cpus. These, and any
17517c478bd9Sstevel@tonic-gate 	 * other routines holding cpu pointers while possibly sleeping
17527c478bd9Sstevel@tonic-gate 	 * must be sure to call kpreempt_disable before processing the
17537c478bd9Sstevel@tonic-gate 	 * list and be sure to check that the cpu has not been deleted
17547c478bd9Sstevel@tonic-gate 	 * after any sleeps (check cp->cpu_next != NULL). We guarantee
17557c478bd9Sstevel@tonic-gate 	 * to keep the deleted cpu structure around.
17567c478bd9Sstevel@tonic-gate 	 *
17577c478bd9Sstevel@tonic-gate 	 * Note that this MUST be done AFTER cpu_available
17587c478bd9Sstevel@tonic-gate 	 * has been updated so that we don't waste time
17597c478bd9Sstevel@tonic-gate 	 * trying to pause the cpu we're trying to delete.
17607c478bd9Sstevel@tonic-gate 	 */
17617c478bd9Sstevel@tonic-gate 	(void) pause_cpus(NULL);
17627c478bd9Sstevel@tonic-gate 
17637c478bd9Sstevel@tonic-gate 	cpnext = cp->cpu_next;
17647c478bd9Sstevel@tonic-gate 	cp->cpu_prev->cpu_next = cp->cpu_next;
17657c478bd9Sstevel@tonic-gate 	cp->cpu_next->cpu_prev = cp->cpu_prev;
17667c478bd9Sstevel@tonic-gate 	if (cp == cpu_list)
17677c478bd9Sstevel@tonic-gate 	    cpu_list = cpnext;
17687c478bd9Sstevel@tonic-gate 
17697c478bd9Sstevel@tonic-gate 	/*
17707c478bd9Sstevel@tonic-gate 	 * Signals that the cpu has been deleted (see above).
17717c478bd9Sstevel@tonic-gate 	 */
17727c478bd9Sstevel@tonic-gate 	cp->cpu_next = NULL;
17737c478bd9Sstevel@tonic-gate 	cp->cpu_prev = NULL;
17747c478bd9Sstevel@tonic-gate 
17757c478bd9Sstevel@tonic-gate 	start_cpus();
17767c478bd9Sstevel@tonic-gate 
17777c478bd9Sstevel@tonic-gate 	CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
17787c478bd9Sstevel@tonic-gate 	ncpus--;
17797c478bd9Sstevel@tonic-gate 	lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
17807c478bd9Sstevel@tonic-gate 
17817c478bd9Sstevel@tonic-gate 	pool_pset_mod = gethrtime();
17827c478bd9Sstevel@tonic-gate }
17837c478bd9Sstevel@tonic-gate 
17847c478bd9Sstevel@tonic-gate /*
17857c478bd9Sstevel@tonic-gate  * Add a CPU to the list of active CPUs.
17867c478bd9Sstevel@tonic-gate  *	This routine must not get any locks, because other CPUs are paused.
17877c478bd9Sstevel@tonic-gate  */
17887c478bd9Sstevel@tonic-gate static void
17897c478bd9Sstevel@tonic-gate cpu_add_active_internal(cpu_t *cp)
17907c478bd9Sstevel@tonic-gate {
17917c478bd9Sstevel@tonic-gate 	cpupart_t	*pp = cp->cpu_part;
17927c478bd9Sstevel@tonic-gate 
17937c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
17947c478bd9Sstevel@tonic-gate 	ASSERT(cpu_list != NULL);	/* list started in cpu_list_init */
17957c478bd9Sstevel@tonic-gate 
17967c478bd9Sstevel@tonic-gate 	ncpus_online++;
17977c478bd9Sstevel@tonic-gate 	cpu_set_state(cp);
17987c478bd9Sstevel@tonic-gate 	cp->cpu_next_onln = cpu_active;
17997c478bd9Sstevel@tonic-gate 	cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
18007c478bd9Sstevel@tonic-gate 	cpu_active->cpu_prev_onln->cpu_next_onln = cp;
18017c478bd9Sstevel@tonic-gate 	cpu_active->cpu_prev_onln = cp;
18027c478bd9Sstevel@tonic-gate 
18037c478bd9Sstevel@tonic-gate 	if (pp->cp_cpulist) {
18047c478bd9Sstevel@tonic-gate 		cp->cpu_next_part = pp->cp_cpulist;
18057c478bd9Sstevel@tonic-gate 		cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part;
18067c478bd9Sstevel@tonic-gate 		pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp;
18077c478bd9Sstevel@tonic-gate 		pp->cp_cpulist->cpu_prev_part = cp;
18087c478bd9Sstevel@tonic-gate 	} else {
18097c478bd9Sstevel@tonic-gate 		ASSERT(pp->cp_ncpus == 0);
18107c478bd9Sstevel@tonic-gate 		pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
18117c478bd9Sstevel@tonic-gate 	}
18127c478bd9Sstevel@tonic-gate 	pp->cp_ncpus++;
18137c478bd9Sstevel@tonic-gate 	if (pp->cp_ncpus == 1) {
18147c478bd9Sstevel@tonic-gate 		cp_numparts_nonempty++;
18157c478bd9Sstevel@tonic-gate 		ASSERT(cp_numparts_nonempty != 0);
18167c478bd9Sstevel@tonic-gate 	}
18177c478bd9Sstevel@tonic-gate 
18187c478bd9Sstevel@tonic-gate 	chip_cpu_assign(cp);
18197c478bd9Sstevel@tonic-gate 
18207c478bd9Sstevel@tonic-gate 	lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
18217c478bd9Sstevel@tonic-gate 
18227c478bd9Sstevel@tonic-gate 	bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
18237c478bd9Sstevel@tonic-gate }
18247c478bd9Sstevel@tonic-gate 
18257c478bd9Sstevel@tonic-gate /*
18267c478bd9Sstevel@tonic-gate  * Add a CPU to the list of active CPUs.
18277c478bd9Sstevel@tonic-gate  *	This is called from machine-dependent layers when a new CPU is started.
18287c478bd9Sstevel@tonic-gate  */
18297c478bd9Sstevel@tonic-gate void
18307c478bd9Sstevel@tonic-gate cpu_add_active(cpu_t *cp)
18317c478bd9Sstevel@tonic-gate {
18327c478bd9Sstevel@tonic-gate 	pause_cpus(NULL);
18337c478bd9Sstevel@tonic-gate 	cpu_add_active_internal(cp);
18347c478bd9Sstevel@tonic-gate 	start_cpus();
18357c478bd9Sstevel@tonic-gate 	cpu_stats_kstat_create(cp);
18367c478bd9Sstevel@tonic-gate 	cpu_create_intrstat(cp);
18377c478bd9Sstevel@tonic-gate 	lgrp_kstat_create(cp);
18387c478bd9Sstevel@tonic-gate 	cpu_state_change_notify(cp->cpu_id, CPU_INIT);
18397c478bd9Sstevel@tonic-gate }
18407c478bd9Sstevel@tonic-gate 
18417c478bd9Sstevel@tonic-gate 
18427c478bd9Sstevel@tonic-gate /*
18437c478bd9Sstevel@tonic-gate  * Remove a CPU from the list of active CPUs.
18447c478bd9Sstevel@tonic-gate  *	This routine must not get any locks, because other CPUs are paused.
18457c478bd9Sstevel@tonic-gate  */
18467c478bd9Sstevel@tonic-gate /* ARGSUSED */
18477c478bd9Sstevel@tonic-gate static void
18487c478bd9Sstevel@tonic-gate cpu_remove_active(cpu_t *cp)
18497c478bd9Sstevel@tonic-gate {
18507c478bd9Sstevel@tonic-gate 	cpupart_t	*pp = cp->cpu_part;
18517c478bd9Sstevel@tonic-gate 
18527c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
18537c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_next_onln != cp);	/* not the last one */
18547c478bd9Sstevel@tonic-gate 	ASSERT(cp->cpu_prev_onln != cp);	/* not the last one */
18557c478bd9Sstevel@tonic-gate 
18567c478bd9Sstevel@tonic-gate 	chip_cpu_unassign(cp);
18577c478bd9Sstevel@tonic-gate 
18587c478bd9Sstevel@tonic-gate 	lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0);
18597c478bd9Sstevel@tonic-gate 
18607c478bd9Sstevel@tonic-gate 	cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln;
18617c478bd9Sstevel@tonic-gate 	cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln;
18627c478bd9Sstevel@tonic-gate 	if (cpu_active == cp) {
18637c478bd9Sstevel@tonic-gate 		cpu_active = cp->cpu_next_onln;
18647c478bd9Sstevel@tonic-gate 	}
18657c478bd9Sstevel@tonic-gate 	cp->cpu_next_onln = cp;
18667c478bd9Sstevel@tonic-gate 	cp->cpu_prev_onln = cp;
18677c478bd9Sstevel@tonic-gate 
18687c478bd9Sstevel@tonic-gate 	cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
18697c478bd9Sstevel@tonic-gate 	cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
18707c478bd9Sstevel@tonic-gate 	if (pp->cp_cpulist == cp) {
18717c478bd9Sstevel@tonic-gate 		pp->cp_cpulist = cp->cpu_next_part;
18727c478bd9Sstevel@tonic-gate 		ASSERT(pp->cp_cpulist != cp);
18737c478bd9Sstevel@tonic-gate 	}
18747c478bd9Sstevel@tonic-gate 	cp->cpu_next_part = cp;
18757c478bd9Sstevel@tonic-gate 	cp->cpu_prev_part = cp;
18767c478bd9Sstevel@tonic-gate 	pp->cp_ncpus--;
18777c478bd9Sstevel@tonic-gate 	if (pp->cp_ncpus == 0) {
18787c478bd9Sstevel@tonic-gate 		cp_numparts_nonempty--;
18797c478bd9Sstevel@tonic-gate 		ASSERT(cp_numparts_nonempty != 0);
18807c478bd9Sstevel@tonic-gate 	}
18817c478bd9Sstevel@tonic-gate }
18827c478bd9Sstevel@tonic-gate 
18837c478bd9Sstevel@tonic-gate /*
18847c478bd9Sstevel@tonic-gate  * Routine used to setup a newly inserted CPU in preparation for starting
18857c478bd9Sstevel@tonic-gate  * it running code.
18867c478bd9Sstevel@tonic-gate  */
18877c478bd9Sstevel@tonic-gate int
18887c478bd9Sstevel@tonic-gate cpu_configure(int cpuid)
18897c478bd9Sstevel@tonic-gate {
18907c478bd9Sstevel@tonic-gate 	int retval = 0;
18917c478bd9Sstevel@tonic-gate 
18927c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
18937c478bd9Sstevel@tonic-gate 
18947c478bd9Sstevel@tonic-gate 	/*
18957c478bd9Sstevel@tonic-gate 	 * Some structures are statically allocated based upon
18967c478bd9Sstevel@tonic-gate 	 * the maximum number of cpus the system supports.  Do not
18977c478bd9Sstevel@tonic-gate 	 * try to add anything beyond this limit.
18987c478bd9Sstevel@tonic-gate 	 */
18997c478bd9Sstevel@tonic-gate 	if (cpuid < 0 || cpuid >= NCPU) {
19007c478bd9Sstevel@tonic-gate 		return (EINVAL);
19017c478bd9Sstevel@tonic-gate 	}
19027c478bd9Sstevel@tonic-gate 
19037c478bd9Sstevel@tonic-gate 	if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) {
19047c478bd9Sstevel@tonic-gate 		return (EALREADY);
19057c478bd9Sstevel@tonic-gate 	}
19067c478bd9Sstevel@tonic-gate 
19077c478bd9Sstevel@tonic-gate 	if ((retval = mp_cpu_configure(cpuid)) != 0) {
19087c478bd9Sstevel@tonic-gate 		return (retval);
19097c478bd9Sstevel@tonic-gate 	}
19107c478bd9Sstevel@tonic-gate 
19117c478bd9Sstevel@tonic-gate 	cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF;
19127c478bd9Sstevel@tonic-gate 	cpu_set_state(cpu[cpuid]);
19137c478bd9Sstevel@tonic-gate 	retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG);
19147c478bd9Sstevel@tonic-gate 	if (retval != 0)
19157c478bd9Sstevel@tonic-gate 		(void) mp_cpu_unconfigure(cpuid);
19167c478bd9Sstevel@tonic-gate 
19177c478bd9Sstevel@tonic-gate 	return (retval);
19187c478bd9Sstevel@tonic-gate }
19197c478bd9Sstevel@tonic-gate 
19207c478bd9Sstevel@tonic-gate /*
19217c478bd9Sstevel@tonic-gate  * Routine used to cleanup a CPU that has been powered off.  This will
19227c478bd9Sstevel@tonic-gate  * destroy all per-cpu information related to this cpu.
19237c478bd9Sstevel@tonic-gate  */
19247c478bd9Sstevel@tonic-gate int
19257c478bd9Sstevel@tonic-gate cpu_unconfigure(int cpuid)
19267c478bd9Sstevel@tonic-gate {
19277c478bd9Sstevel@tonic-gate 	int error;
19287c478bd9Sstevel@tonic-gate 
19297c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
19307c478bd9Sstevel@tonic-gate 
19317c478bd9Sstevel@tonic-gate 	if (cpu[cpuid] == NULL) {
19327c478bd9Sstevel@tonic-gate 		return (ENODEV);
19337c478bd9Sstevel@tonic-gate 	}
19347c478bd9Sstevel@tonic-gate 
19357c478bd9Sstevel@tonic-gate 	if (cpu[cpuid]->cpu_flags == 0) {
19367c478bd9Sstevel@tonic-gate 		return (EALREADY);
19377c478bd9Sstevel@tonic-gate 	}
19387c478bd9Sstevel@tonic-gate 
19397c478bd9Sstevel@tonic-gate 	if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) {
19407c478bd9Sstevel@tonic-gate 		return (EBUSY);
19417c478bd9Sstevel@tonic-gate 	}
19427c478bd9Sstevel@tonic-gate 
19437c478bd9Sstevel@tonic-gate 	if (cpu[cpuid]->cpu_props != NULL) {
19447c478bd9Sstevel@tonic-gate 		(void) nvlist_free(cpu[cpuid]->cpu_props);
19457c478bd9Sstevel@tonic-gate 		cpu[cpuid]->cpu_props = NULL;
19467c478bd9Sstevel@tonic-gate 	}
19477c478bd9Sstevel@tonic-gate 
19487c478bd9Sstevel@tonic-gate 	error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG);
19497c478bd9Sstevel@tonic-gate 
19507c478bd9Sstevel@tonic-gate 	if (error != 0)
19517c478bd9Sstevel@tonic-gate 		return (error);
19527c478bd9Sstevel@tonic-gate 
19537c478bd9Sstevel@tonic-gate 	return (mp_cpu_unconfigure(cpuid));
19547c478bd9Sstevel@tonic-gate }
19557c478bd9Sstevel@tonic-gate 
19567c478bd9Sstevel@tonic-gate /*
19577c478bd9Sstevel@tonic-gate  * Routines for registering and de-registering cpu_setup callback functions.
19587c478bd9Sstevel@tonic-gate  *
19597c478bd9Sstevel@tonic-gate  * Caller's context
19607c478bd9Sstevel@tonic-gate  *	These routines must not be called from a driver's attach(9E) or
19617c478bd9Sstevel@tonic-gate  *	detach(9E) entry point.
19627c478bd9Sstevel@tonic-gate  *
19637c478bd9Sstevel@tonic-gate  * NOTE: CPU callbacks should not block. They are called with cpu_lock held.
19647c478bd9Sstevel@tonic-gate  */
19657c478bd9Sstevel@tonic-gate 
19667c478bd9Sstevel@tonic-gate /*
19677c478bd9Sstevel@tonic-gate  * Ideally, these would be dynamically allocated and put into a linked
19687c478bd9Sstevel@tonic-gate  * list; however that is not feasible because the registration routine
19697c478bd9Sstevel@tonic-gate  * has to be available before the kmem allocator is working (in fact,
19707c478bd9Sstevel@tonic-gate  * it is called by the kmem allocator init code).  In any case, there
19717c478bd9Sstevel@tonic-gate  * are quite a few extra entries for future users.
19727c478bd9Sstevel@tonic-gate  */
19731aa15ad6Sjkennedy #define	NCPU_SETUPS	20
19747c478bd9Sstevel@tonic-gate 
19757c478bd9Sstevel@tonic-gate struct cpu_setup {
19767c478bd9Sstevel@tonic-gate 	cpu_setup_func_t *func;
19777c478bd9Sstevel@tonic-gate 	void *arg;
19787c478bd9Sstevel@tonic-gate } cpu_setups[NCPU_SETUPS];
19797c478bd9Sstevel@tonic-gate 
19807c478bd9Sstevel@tonic-gate void
19817c478bd9Sstevel@tonic-gate register_cpu_setup_func(cpu_setup_func_t *func, void *arg)
19827c478bd9Sstevel@tonic-gate {
19837c478bd9Sstevel@tonic-gate 	int i;
19847c478bd9Sstevel@tonic-gate 
19857c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
19867c478bd9Sstevel@tonic-gate 
19877c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU_SETUPS; i++)
19887c478bd9Sstevel@tonic-gate 		if (cpu_setups[i].func == NULL)
19897c478bd9Sstevel@tonic-gate 		    break;
19907c478bd9Sstevel@tonic-gate 	if (i >= NCPU_SETUPS)
19917c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries");
19927c478bd9Sstevel@tonic-gate 
19937c478bd9Sstevel@tonic-gate 	cpu_setups[i].func = func;
19947c478bd9Sstevel@tonic-gate 	cpu_setups[i].arg = arg;
19957c478bd9Sstevel@tonic-gate }
19967c478bd9Sstevel@tonic-gate 
19977c478bd9Sstevel@tonic-gate void
19987c478bd9Sstevel@tonic-gate unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg)
19997c478bd9Sstevel@tonic-gate {
20007c478bd9Sstevel@tonic-gate 	int i;
20017c478bd9Sstevel@tonic-gate 
20027c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
20037c478bd9Sstevel@tonic-gate 
20047c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU_SETUPS; i++)
20057c478bd9Sstevel@tonic-gate 		if ((cpu_setups[i].func == func) &&
20067c478bd9Sstevel@tonic-gate 		    (cpu_setups[i].arg == arg))
20077c478bd9Sstevel@tonic-gate 		    break;
20087c478bd9Sstevel@tonic-gate 	if (i >= NCPU_SETUPS)
20097c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "Could not find cpu_setup callback to "
20107c478bd9Sstevel@tonic-gate 		    "deregister");
20117c478bd9Sstevel@tonic-gate 
20127c478bd9Sstevel@tonic-gate 	cpu_setups[i].func = NULL;
20137c478bd9Sstevel@tonic-gate 	cpu_setups[i].arg = 0;
20147c478bd9Sstevel@tonic-gate }
20157c478bd9Sstevel@tonic-gate 
20167c478bd9Sstevel@tonic-gate /*
20177c478bd9Sstevel@tonic-gate  * Call any state change hooks for this CPU, ignore any errors.
20187c478bd9Sstevel@tonic-gate  */
20197c478bd9Sstevel@tonic-gate void
20207c478bd9Sstevel@tonic-gate cpu_state_change_notify(int id, cpu_setup_t what)
20217c478bd9Sstevel@tonic-gate {
20227c478bd9Sstevel@tonic-gate 	int i;
20237c478bd9Sstevel@tonic-gate 
20247c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
20257c478bd9Sstevel@tonic-gate 
20267c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU_SETUPS; i++) {
20277c478bd9Sstevel@tonic-gate 		if (cpu_setups[i].func != NULL) {
20287c478bd9Sstevel@tonic-gate 			cpu_setups[i].func(what, id, cpu_setups[i].arg);
20297c478bd9Sstevel@tonic-gate 		}
20307c478bd9Sstevel@tonic-gate 	}
20317c478bd9Sstevel@tonic-gate }
20327c478bd9Sstevel@tonic-gate 
20337c478bd9Sstevel@tonic-gate /*
20347c478bd9Sstevel@tonic-gate  * Call any state change hooks for this CPU, undo it if error found.
20357c478bd9Sstevel@tonic-gate  */
20367c478bd9Sstevel@tonic-gate static int
20377c478bd9Sstevel@tonic-gate cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo)
20387c478bd9Sstevel@tonic-gate {
20397c478bd9Sstevel@tonic-gate 	int i;
20407c478bd9Sstevel@tonic-gate 	int retval = 0;
20417c478bd9Sstevel@tonic-gate 
20427c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
20437c478bd9Sstevel@tonic-gate 
20447c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU_SETUPS; i++) {
20457c478bd9Sstevel@tonic-gate 		if (cpu_setups[i].func != NULL) {
20467c478bd9Sstevel@tonic-gate 			retval = cpu_setups[i].func(what, id,
20477c478bd9Sstevel@tonic-gate 			    cpu_setups[i].arg);
20487c478bd9Sstevel@tonic-gate 			if (retval) {
20497c478bd9Sstevel@tonic-gate 				for (i--; i >= 0; i--) {
20507c478bd9Sstevel@tonic-gate 					if (cpu_setups[i].func != NULL)
20517c478bd9Sstevel@tonic-gate 						cpu_setups[i].func(undo,
20527c478bd9Sstevel@tonic-gate 						    id, cpu_setups[i].arg);
20537c478bd9Sstevel@tonic-gate 				}
20547c478bd9Sstevel@tonic-gate 				break;
20557c478bd9Sstevel@tonic-gate 			}
20567c478bd9Sstevel@tonic-gate 		}
20577c478bd9Sstevel@tonic-gate 	}
20587c478bd9Sstevel@tonic-gate 	return (retval);
20597c478bd9Sstevel@tonic-gate }
20607c478bd9Sstevel@tonic-gate 
20617c478bd9Sstevel@tonic-gate /*
20627c478bd9Sstevel@tonic-gate  * Export information about this CPU via the kstat mechanism.
20637c478bd9Sstevel@tonic-gate  */
20647c478bd9Sstevel@tonic-gate static struct {
20657c478bd9Sstevel@tonic-gate 	kstat_named_t ci_state;
20667c478bd9Sstevel@tonic-gate 	kstat_named_t ci_state_begin;
20677c478bd9Sstevel@tonic-gate 	kstat_named_t ci_cpu_type;
20687c478bd9Sstevel@tonic-gate 	kstat_named_t ci_fpu_type;
20697c478bd9Sstevel@tonic-gate 	kstat_named_t ci_clock_MHz;
20707c478bd9Sstevel@tonic-gate 	kstat_named_t ci_chip_id;
20717c478bd9Sstevel@tonic-gate 	kstat_named_t ci_implementation;
20727aec1d6eScindi 	kstat_named_t ci_brandstr;
20737aec1d6eScindi 	kstat_named_t ci_core_id;
20747aec1d6eScindi #if defined(__sparcv9)
20757c478bd9Sstevel@tonic-gate 	kstat_named_t ci_device_ID;
20767c478bd9Sstevel@tonic-gate 	kstat_named_t ci_cpu_fru;
20777c478bd9Sstevel@tonic-gate #endif
20787aec1d6eScindi #if defined(__i386) || defined(__amd64)
20797aec1d6eScindi 	kstat_named_t ci_vendorstr;
20807aec1d6eScindi 	kstat_named_t ci_family;
20817aec1d6eScindi 	kstat_named_t ci_model;
20827aec1d6eScindi 	kstat_named_t ci_step;
20837aec1d6eScindi 	kstat_named_t ci_clogid;
20847aec1d6eScindi #endif
20857c478bd9Sstevel@tonic-gate } cpu_info_template = {
20867c478bd9Sstevel@tonic-gate 	{ "state",		KSTAT_DATA_CHAR },
20877c478bd9Sstevel@tonic-gate 	{ "state_begin",	KSTAT_DATA_LONG },
20887c478bd9Sstevel@tonic-gate 	{ "cpu_type",		KSTAT_DATA_CHAR },
20897c478bd9Sstevel@tonic-gate 	{ "fpu_type",		KSTAT_DATA_CHAR },
20907c478bd9Sstevel@tonic-gate 	{ "clock_MHz",		KSTAT_DATA_LONG },
20917c478bd9Sstevel@tonic-gate 	{ "chip_id",		KSTAT_DATA_LONG },
20927c478bd9Sstevel@tonic-gate 	{ "implementation",	KSTAT_DATA_STRING },
20937aec1d6eScindi 	{ "brand",		KSTAT_DATA_STRING },
20947aec1d6eScindi 	{ "core_id",		KSTAT_DATA_LONG },
20957aec1d6eScindi #if defined(__sparcv9)
20967c478bd9Sstevel@tonic-gate 	{ "device_ID",		KSTAT_DATA_UINT64 },
20977c478bd9Sstevel@tonic-gate 	{ "cpu_fru",		KSTAT_DATA_STRING },
20987c478bd9Sstevel@tonic-gate #endif
20997aec1d6eScindi #if defined(__i386) || defined(__amd64)
21007aec1d6eScindi 	{ "vendor_id",		KSTAT_DATA_STRING },
21017aec1d6eScindi 	{ "family",		KSTAT_DATA_INT32 },
21027aec1d6eScindi 	{ "model",		KSTAT_DATA_INT32 },
21037aec1d6eScindi 	{ "stepping",		KSTAT_DATA_INT32 },
21047aec1d6eScindi 	{ "clog_id",		KSTAT_DATA_INT32 },
21057aec1d6eScindi #endif
21067c478bd9Sstevel@tonic-gate };
21077c478bd9Sstevel@tonic-gate 
21087c478bd9Sstevel@tonic-gate static kmutex_t cpu_info_template_lock;
21097c478bd9Sstevel@tonic-gate 
21107c478bd9Sstevel@tonic-gate static int
21117c478bd9Sstevel@tonic-gate cpu_info_kstat_update(kstat_t *ksp, int rw)
21127c478bd9Sstevel@tonic-gate {
21137c478bd9Sstevel@tonic-gate 	cpu_t	*cp = ksp->ks_private;
21147c478bd9Sstevel@tonic-gate 	const char *pi_state;
21157c478bd9Sstevel@tonic-gate 
21167c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
21177c478bd9Sstevel@tonic-gate 		return (EACCES);
21187c478bd9Sstevel@tonic-gate 
21197c478bd9Sstevel@tonic-gate 	switch (cp->cpu_type_info.pi_state) {
21207c478bd9Sstevel@tonic-gate 	case P_ONLINE:
21217c478bd9Sstevel@tonic-gate 		pi_state = PS_ONLINE;
21227c478bd9Sstevel@tonic-gate 		break;
21237c478bd9Sstevel@tonic-gate 	case P_POWEROFF:
21247c478bd9Sstevel@tonic-gate 		pi_state = PS_POWEROFF;
21257c478bd9Sstevel@tonic-gate 		break;
21267c478bd9Sstevel@tonic-gate 	case P_NOINTR:
21277c478bd9Sstevel@tonic-gate 		pi_state = PS_NOINTR;
21287c478bd9Sstevel@tonic-gate 		break;
21297c478bd9Sstevel@tonic-gate 	case P_FAULTED:
21307c478bd9Sstevel@tonic-gate 		pi_state = PS_FAULTED;
21317c478bd9Sstevel@tonic-gate 		break;
21327c478bd9Sstevel@tonic-gate 	case P_SPARE:
21337c478bd9Sstevel@tonic-gate 		pi_state = PS_SPARE;
21347c478bd9Sstevel@tonic-gate 		break;
21357c478bd9Sstevel@tonic-gate 	case P_OFFLINE:
21367c478bd9Sstevel@tonic-gate 		pi_state = PS_OFFLINE;
21377c478bd9Sstevel@tonic-gate 		break;
21387c478bd9Sstevel@tonic-gate 	default:
21397c478bd9Sstevel@tonic-gate 		pi_state = "unknown";
21407c478bd9Sstevel@tonic-gate 	}
21417c478bd9Sstevel@tonic-gate 	(void) strcpy(cpu_info_template.ci_state.value.c, pi_state);
21427c478bd9Sstevel@tonic-gate 	cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin;
21437c478bd9Sstevel@tonic-gate 	(void) strncpy(cpu_info_template.ci_cpu_type.value.c,
21447c478bd9Sstevel@tonic-gate 	    cp->cpu_type_info.pi_processor_type, 15);
21457c478bd9Sstevel@tonic-gate 	(void) strncpy(cpu_info_template.ci_fpu_type.value.c,
21467c478bd9Sstevel@tonic-gate 	    cp->cpu_type_info.pi_fputypes, 15);
21477c478bd9Sstevel@tonic-gate 	cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock;
21487c478bd9Sstevel@tonic-gate 	cpu_info_template.ci_chip_id.value.l = chip_plat_get_chipid(cp);
21497c478bd9Sstevel@tonic-gate 	kstat_named_setstr(&cpu_info_template.ci_implementation,
21507c478bd9Sstevel@tonic-gate 	    cp->cpu_idstr);
21517aec1d6eScindi 	kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr);
215289c0ae93Scindi 	cpu_info_template.ci_core_id.value.l = chip_plat_get_coreid(cp);
21537aec1d6eScindi 
21547aec1d6eScindi #if defined(__sparcv9)
21557c478bd9Sstevel@tonic-gate 	cpu_info_template.ci_device_ID.value.ui64 =
21567c478bd9Sstevel@tonic-gate 	    cpunodes[cp->cpu_id].device_id;
21577c478bd9Sstevel@tonic-gate 	kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp));
21587c478bd9Sstevel@tonic-gate #endif
21597aec1d6eScindi #if defined(__i386) || defined(__amd64)
21607aec1d6eScindi 	kstat_named_setstr(&cpu_info_template.ci_vendorstr,
21617aec1d6eScindi 	    cpuid_getvendorstr(cp));
21627aec1d6eScindi 	cpu_info_template.ci_family.value.l = cpuid_getfamily(cp);
21637aec1d6eScindi 	cpu_info_template.ci_model.value.l = cpuid_getmodel(cp);
21647aec1d6eScindi 	cpu_info_template.ci_step.value.l = cpuid_getstep(cp);
21657aec1d6eScindi 	cpu_info_template.ci_clogid.value.l = chip_plat_get_clogid(cp);
21667aec1d6eScindi #endif
21677aec1d6eScindi 
21687c478bd9Sstevel@tonic-gate 	return (0);
21697c478bd9Sstevel@tonic-gate }
21707c478bd9Sstevel@tonic-gate 
21717c478bd9Sstevel@tonic-gate static void
21727c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cpu_t *cp)
21737c478bd9Sstevel@tonic-gate {
21747c478bd9Sstevel@tonic-gate 	zoneid_t zoneid;
21757c478bd9Sstevel@tonic-gate 
21767c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
21777c478bd9Sstevel@tonic-gate 
21787c478bd9Sstevel@tonic-gate 	if (pool_pset_enabled())
21797c478bd9Sstevel@tonic-gate 		zoneid = GLOBAL_ZONEID;
21807c478bd9Sstevel@tonic-gate 	else
21817c478bd9Sstevel@tonic-gate 		zoneid = ALL_ZONES;
21827c478bd9Sstevel@tonic-gate 	if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id,
21837c478bd9Sstevel@tonic-gate 	    NULL, "misc", KSTAT_TYPE_NAMED,
21847c478bd9Sstevel@tonic-gate 		    sizeof (cpu_info_template) / sizeof (kstat_named_t),
21857c478bd9Sstevel@tonic-gate 		    KSTAT_FLAG_VIRTUAL, zoneid)) != NULL) {
21867c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN;
21877aec1d6eScindi #if defined(__sparcv9)
21887c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_data_size +=
21897c478bd9Sstevel@tonic-gate 		    strlen(cpu_fru_fmri(cp)) + 1;
21907c478bd9Sstevel@tonic-gate #endif
21917aec1d6eScindi #if defined(__i386) || defined(__amd64)
21927aec1d6eScindi 		cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN;
21937aec1d6eScindi #endif
21947c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock;
21957c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_data = &cpu_info_template;
21967c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_private = cp;
21977c478bd9Sstevel@tonic-gate 		cp->cpu_info_kstat->ks_update = cpu_info_kstat_update;
21987c478bd9Sstevel@tonic-gate 		kstat_install(cp->cpu_info_kstat);
21997c478bd9Sstevel@tonic-gate 	}
22007c478bd9Sstevel@tonic-gate }
22017c478bd9Sstevel@tonic-gate 
22027c478bd9Sstevel@tonic-gate static void
22037c478bd9Sstevel@tonic-gate cpu_info_kstat_destroy(cpu_t *cp)
22047c478bd9Sstevel@tonic-gate {
22057c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
22067c478bd9Sstevel@tonic-gate 
22077c478bd9Sstevel@tonic-gate 	kstat_delete(cp->cpu_info_kstat);
22087c478bd9Sstevel@tonic-gate 	cp->cpu_info_kstat = NULL;
22097c478bd9Sstevel@tonic-gate }
22107c478bd9Sstevel@tonic-gate 
22117c478bd9Sstevel@tonic-gate /*
22127c478bd9Sstevel@tonic-gate  * Create and install kstats for the boot CPU.
22137c478bd9Sstevel@tonic-gate  */
22147c478bd9Sstevel@tonic-gate void
22157c478bd9Sstevel@tonic-gate cpu_kstat_init(cpu_t *cp)
22167c478bd9Sstevel@tonic-gate {
22177c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
22187c478bd9Sstevel@tonic-gate 	cpu_info_kstat_create(cp);
22197c478bd9Sstevel@tonic-gate 	cpu_stats_kstat_create(cp);
22207c478bd9Sstevel@tonic-gate 	cpu_create_intrstat(cp);
22217c478bd9Sstevel@tonic-gate 	chip_kstat_create(cp->cpu_chip);
22227c478bd9Sstevel@tonic-gate 	cpu_set_state(cp);
22237c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
22247c478bd9Sstevel@tonic-gate }
22257c478bd9Sstevel@tonic-gate 
22267c478bd9Sstevel@tonic-gate /*
22277c478bd9Sstevel@tonic-gate  * Make visible to the zone that subset of the cpu information that would be
22287c478bd9Sstevel@tonic-gate  * initialized when a cpu is configured (but still offline).
22297c478bd9Sstevel@tonic-gate  */
22307c478bd9Sstevel@tonic-gate void
22317c478bd9Sstevel@tonic-gate cpu_visibility_configure(cpu_t *cp, zone_t *zone)
22327c478bd9Sstevel@tonic-gate {
22337c478bd9Sstevel@tonic-gate 	zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
22347c478bd9Sstevel@tonic-gate 
22357c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
22367c478bd9Sstevel@tonic-gate 	ASSERT(pool_pset_enabled());
22377c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);
22387c478bd9Sstevel@tonic-gate 
22397c478bd9Sstevel@tonic-gate 	if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
22407c478bd9Sstevel@tonic-gate 		zone->zone_ncpus++;
22417c478bd9Sstevel@tonic-gate 		ASSERT(zone->zone_ncpus <= ncpus);
22427c478bd9Sstevel@tonic-gate 	}
22437c478bd9Sstevel@tonic-gate 	if (cp->cpu_info_kstat != NULL)
22447c478bd9Sstevel@tonic-gate 		kstat_zone_add(cp->cpu_info_kstat, zoneid);
22457c478bd9Sstevel@tonic-gate }
22467c478bd9Sstevel@tonic-gate 
22477c478bd9Sstevel@tonic-gate /*
22487c478bd9Sstevel@tonic-gate  * Make visible to the zone that subset of the cpu information that would be
22497c478bd9Sstevel@tonic-gate  * initialized when a previously configured cpu is onlined.
22507c478bd9Sstevel@tonic-gate  */
22517c478bd9Sstevel@tonic-gate void
22527c478bd9Sstevel@tonic-gate cpu_visibility_online(cpu_t *cp, zone_t *zone)
22537c478bd9Sstevel@tonic-gate {
22547c478bd9Sstevel@tonic-gate 	kstat_t *ksp;
22557c478bd9Sstevel@tonic-gate 	char name[sizeof ("cpu_stat") + 10];	/* enough for 32-bit cpuids */
22567c478bd9Sstevel@tonic-gate 	zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
22577c478bd9Sstevel@tonic-gate 	processorid_t cpun;
22587c478bd9Sstevel@tonic-gate 
22597c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
22607c478bd9Sstevel@tonic-gate 	ASSERT(pool_pset_enabled());
22617c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);
22627c478bd9Sstevel@tonic-gate 	ASSERT(cpu_is_active(cp));
22637c478bd9Sstevel@tonic-gate 
22647c478bd9Sstevel@tonic-gate 	cpun = cp->cpu_id;
22657c478bd9Sstevel@tonic-gate 	if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
22667c478bd9Sstevel@tonic-gate 		zone->zone_ncpus_online++;
22677c478bd9Sstevel@tonic-gate 		ASSERT(zone->zone_ncpus_online <= ncpus_online);
22687c478bd9Sstevel@tonic-gate 	}
22697c478bd9Sstevel@tonic-gate 	(void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
22707c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
22717c478bd9Sstevel@tonic-gate 	    != NULL) {
22727c478bd9Sstevel@tonic-gate 		kstat_zone_add(ksp, zoneid);
22737c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
22747c478bd9Sstevel@tonic-gate 	}
22757c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
22767c478bd9Sstevel@tonic-gate 		kstat_zone_add(ksp, zoneid);
22777c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
22787c478bd9Sstevel@tonic-gate 	}
22797c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
22807c478bd9Sstevel@tonic-gate 		kstat_zone_add(ksp, zoneid);
22817c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
22827c478bd9Sstevel@tonic-gate 	}
22837c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
22847c478bd9Sstevel@tonic-gate 	    NULL) {
22857c478bd9Sstevel@tonic-gate 		kstat_zone_add(ksp, zoneid);
22867c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
22877c478bd9Sstevel@tonic-gate 	}
22887c478bd9Sstevel@tonic-gate }
22897c478bd9Sstevel@tonic-gate 
22907c478bd9Sstevel@tonic-gate /*
22917c478bd9Sstevel@tonic-gate  * Update relevant kstats such that cpu is now visible to processes
22927c478bd9Sstevel@tonic-gate  * executing in specified zone.
22937c478bd9Sstevel@tonic-gate  */
22947c478bd9Sstevel@tonic-gate void
22957c478bd9Sstevel@tonic-gate cpu_visibility_add(cpu_t *cp, zone_t *zone)
22967c478bd9Sstevel@tonic-gate {
22977c478bd9Sstevel@tonic-gate 	cpu_visibility_configure(cp, zone);
22987c478bd9Sstevel@tonic-gate 	if (cpu_is_active(cp))
22997c478bd9Sstevel@tonic-gate 		cpu_visibility_online(cp, zone);
23007c478bd9Sstevel@tonic-gate }
23017c478bd9Sstevel@tonic-gate 
23027c478bd9Sstevel@tonic-gate /*
23037c478bd9Sstevel@tonic-gate  * Make invisible to the zone that subset of the cpu information that would be
23047c478bd9Sstevel@tonic-gate  * torn down when a previously offlined cpu is unconfigured.
23057c478bd9Sstevel@tonic-gate  */
23067c478bd9Sstevel@tonic-gate void
23077c478bd9Sstevel@tonic-gate cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone)
23087c478bd9Sstevel@tonic-gate {
23097c478bd9Sstevel@tonic-gate 	zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
23107c478bd9Sstevel@tonic-gate 
23117c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
23127c478bd9Sstevel@tonic-gate 	ASSERT(pool_pset_enabled());
23137c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);
23147c478bd9Sstevel@tonic-gate 
23157c478bd9Sstevel@tonic-gate 	if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
23167c478bd9Sstevel@tonic-gate 		ASSERT(zone->zone_ncpus != 0);
23177c478bd9Sstevel@tonic-gate 		zone->zone_ncpus--;
23187c478bd9Sstevel@tonic-gate 	}
23197c478bd9Sstevel@tonic-gate 	if (cp->cpu_info_kstat)
23207c478bd9Sstevel@tonic-gate 		kstat_zone_remove(cp->cpu_info_kstat, zoneid);
23217c478bd9Sstevel@tonic-gate }
23227c478bd9Sstevel@tonic-gate 
23237c478bd9Sstevel@tonic-gate /*
23247c478bd9Sstevel@tonic-gate  * Make invisible to the zone that subset of the cpu information that would be
23257c478bd9Sstevel@tonic-gate  * torn down when a cpu is offlined (but still configured).
23267c478bd9Sstevel@tonic-gate  */
23277c478bd9Sstevel@tonic-gate void
23287c478bd9Sstevel@tonic-gate cpu_visibility_offline(cpu_t *cp, zone_t *zone)
23297c478bd9Sstevel@tonic-gate {
23307c478bd9Sstevel@tonic-gate 	kstat_t *ksp;
23317c478bd9Sstevel@tonic-gate 	char name[sizeof ("cpu_stat") + 10];	/* enough for 32-bit cpuids */
23327c478bd9Sstevel@tonic-gate 	zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
23337c478bd9Sstevel@tonic-gate 	processorid_t cpun;
23347c478bd9Sstevel@tonic-gate 
23357c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
23367c478bd9Sstevel@tonic-gate 	ASSERT(pool_pset_enabled());
23377c478bd9Sstevel@tonic-gate 	ASSERT(cp != NULL);
23387c478bd9Sstevel@tonic-gate 	ASSERT(cpu_is_active(cp));
23397c478bd9Sstevel@tonic-gate 
23407c478bd9Sstevel@tonic-gate 	cpun = cp->cpu_id;
23417c478bd9Sstevel@tonic-gate 	if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
23427c478bd9Sstevel@tonic-gate 		ASSERT(zone->zone_ncpus_online != 0);
23437c478bd9Sstevel@tonic-gate 		zone->zone_ncpus_online--;
23447c478bd9Sstevel@tonic-gate 	}
23457c478bd9Sstevel@tonic-gate 
23467c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
23477c478bd9Sstevel@tonic-gate 	    NULL) {
23487c478bd9Sstevel@tonic-gate 		kstat_zone_remove(ksp, zoneid);
23497c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
23507c478bd9Sstevel@tonic-gate 	}
23517c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
23527c478bd9Sstevel@tonic-gate 		kstat_zone_remove(ksp, zoneid);
23537c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
23547c478bd9Sstevel@tonic-gate 	}
23557c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
23567c478bd9Sstevel@tonic-gate 		kstat_zone_remove(ksp, zoneid);
23577c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
23587c478bd9Sstevel@tonic-gate 	}
23597c478bd9Sstevel@tonic-gate 	(void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
23607c478bd9Sstevel@tonic-gate 	if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
23617c478bd9Sstevel@tonic-gate 	    != NULL) {
23627c478bd9Sstevel@tonic-gate 		kstat_zone_remove(ksp, zoneid);
23637c478bd9Sstevel@tonic-gate 		kstat_rele(ksp);
23647c478bd9Sstevel@tonic-gate 	}
23657c478bd9Sstevel@tonic-gate }
23667c478bd9Sstevel@tonic-gate 
23677c478bd9Sstevel@tonic-gate /*
23687c478bd9Sstevel@tonic-gate  * Update relevant kstats such that cpu is no longer visible to processes
23697c478bd9Sstevel@tonic-gate  * executing in specified zone.
23707c478bd9Sstevel@tonic-gate  */
23717c478bd9Sstevel@tonic-gate void
23727c478bd9Sstevel@tonic-gate cpu_visibility_remove(cpu_t *cp, zone_t *zone)
23737c478bd9Sstevel@tonic-gate {
23747c478bd9Sstevel@tonic-gate 	if (cpu_is_active(cp))
23757c478bd9Sstevel@tonic-gate 		cpu_visibility_offline(cp, zone);
23767c478bd9Sstevel@tonic-gate 	cpu_visibility_unconfigure(cp, zone);
23777c478bd9Sstevel@tonic-gate }
23787c478bd9Sstevel@tonic-gate 
23797c478bd9Sstevel@tonic-gate /*
23807c478bd9Sstevel@tonic-gate  * Bind a thread to a CPU as requested.
23817c478bd9Sstevel@tonic-gate  */
23827c478bd9Sstevel@tonic-gate int
23837c478bd9Sstevel@tonic-gate cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
23847c478bd9Sstevel@tonic-gate     int *error)
23857c478bd9Sstevel@tonic-gate {
23867c478bd9Sstevel@tonic-gate 	processorid_t	binding;
23877c478bd9Sstevel@tonic-gate 	cpu_t		*cp;
23887c478bd9Sstevel@tonic-gate 
23897c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
23907c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
23917c478bd9Sstevel@tonic-gate 
23927c478bd9Sstevel@tonic-gate 	thread_lock(tp);
23937c478bd9Sstevel@tonic-gate 
23947c478bd9Sstevel@tonic-gate 	/*
23957c478bd9Sstevel@tonic-gate 	 * Record old binding, but change the obind, which was initialized
23967c478bd9Sstevel@tonic-gate 	 * to PBIND_NONE, only if this thread has a binding.  This avoids
23977c478bd9Sstevel@tonic-gate 	 * reporting PBIND_NONE for a process when some LWPs are bound.
23987c478bd9Sstevel@tonic-gate 	 */
23997c478bd9Sstevel@tonic-gate 	binding = tp->t_bind_cpu;
24007c478bd9Sstevel@tonic-gate 	if (binding != PBIND_NONE)
24017c478bd9Sstevel@tonic-gate 		*obind = binding;	/* record old binding */
24027c478bd9Sstevel@tonic-gate 
24037c478bd9Sstevel@tonic-gate 	if (bind == PBIND_QUERY) {
24047c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
24057c478bd9Sstevel@tonic-gate 		return (0);
24067c478bd9Sstevel@tonic-gate 	}
24077c478bd9Sstevel@tonic-gate 
24087c478bd9Sstevel@tonic-gate 	/*
24097c478bd9Sstevel@tonic-gate 	 * If this thread/LWP cannot be bound because of permission
24107c478bd9Sstevel@tonic-gate 	 * problems, just note that and return success so that the
24117c478bd9Sstevel@tonic-gate 	 * other threads/LWPs will be bound.  This is the way
24127c478bd9Sstevel@tonic-gate 	 * processor_bind() is defined to work.
24137c478bd9Sstevel@tonic-gate 	 *
24147c478bd9Sstevel@tonic-gate 	 * Binding will get EPERM if the thread is of system class
24157c478bd9Sstevel@tonic-gate 	 * or hasprocperm() fails.
24167c478bd9Sstevel@tonic-gate 	 */
24177c478bd9Sstevel@tonic-gate 	if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) {
24187c478bd9Sstevel@tonic-gate 		*error = EPERM;
24197c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
24207c478bd9Sstevel@tonic-gate 		return (0);
24217c478bd9Sstevel@tonic-gate 	}
24227c478bd9Sstevel@tonic-gate 
24237c478bd9Sstevel@tonic-gate 	binding = bind;
24247c478bd9Sstevel@tonic-gate 	if (binding != PBIND_NONE) {
24257c478bd9Sstevel@tonic-gate 		cp = cpu[binding];
24267c478bd9Sstevel@tonic-gate 		/*
24277c478bd9Sstevel@tonic-gate 		 * Make sure binding is in right partition.
24287c478bd9Sstevel@tonic-gate 		 */
24297c478bd9Sstevel@tonic-gate 		if (tp->t_cpupart != cp->cpu_part) {
24307c478bd9Sstevel@tonic-gate 			*error = EINVAL;
24317c478bd9Sstevel@tonic-gate 			thread_unlock(tp);
24327c478bd9Sstevel@tonic-gate 			return (0);
24337c478bd9Sstevel@tonic-gate 		}
24347c478bd9Sstevel@tonic-gate 	}
24357c478bd9Sstevel@tonic-gate 	tp->t_bind_cpu = binding;	/* set new binding */
24367c478bd9Sstevel@tonic-gate 
24377c478bd9Sstevel@tonic-gate 	/*
24387c478bd9Sstevel@tonic-gate 	 * If there is no system-set reason for affinity, set
24397c478bd9Sstevel@tonic-gate 	 * the t_bound_cpu field to reflect the binding.
24407c478bd9Sstevel@tonic-gate 	 */
24417c478bd9Sstevel@tonic-gate 	if (tp->t_affinitycnt == 0) {
24427c478bd9Sstevel@tonic-gate 		if (binding == PBIND_NONE) {
24437c478bd9Sstevel@tonic-gate 			/*
24447c478bd9Sstevel@tonic-gate 			 * We may need to adjust disp_max_unbound_pri
24457c478bd9Sstevel@tonic-gate 			 * since we're becoming unbound.
24467c478bd9Sstevel@tonic-gate 			 */
24477c478bd9Sstevel@tonic-gate 			disp_adjust_unbound_pri(tp);
24487c478bd9Sstevel@tonic-gate 
24497c478bd9Sstevel@tonic-gate 			tp->t_bound_cpu = NULL;	/* set new binding */
24507c478bd9Sstevel@tonic-gate 
24517c478bd9Sstevel@tonic-gate 			/*
24527c478bd9Sstevel@tonic-gate 			 * Move thread to lgroup with strongest affinity
24537c478bd9Sstevel@tonic-gate 			 * after unbinding
24547c478bd9Sstevel@tonic-gate 			 */
24557c478bd9Sstevel@tonic-gate 			if (tp->t_lgrp_affinity)
24567c478bd9Sstevel@tonic-gate 				lgrp_move_thread(tp,
24577c478bd9Sstevel@tonic-gate 				    lgrp_choose(tp, tp->t_cpupart), 1);
24587c478bd9Sstevel@tonic-gate 
24597c478bd9Sstevel@tonic-gate 			if (tp->t_state == TS_ONPROC &&
24607c478bd9Sstevel@tonic-gate 			    tp->t_cpu->cpu_part != tp->t_cpupart)
24617c478bd9Sstevel@tonic-gate 				cpu_surrender(tp);
24627c478bd9Sstevel@tonic-gate 		} else {
24637c478bd9Sstevel@tonic-gate 			lpl_t	*lpl;
24647c478bd9Sstevel@tonic-gate 
24657c478bd9Sstevel@tonic-gate 			tp->t_bound_cpu = cp;
24667c478bd9Sstevel@tonic-gate 			ASSERT(cp->cpu_lpl != NULL);
24677c478bd9Sstevel@tonic-gate 
24687c478bd9Sstevel@tonic-gate 			/*
24697c478bd9Sstevel@tonic-gate 			 * Set home to lgroup with most affinity containing CPU
24707c478bd9Sstevel@tonic-gate 			 * that thread is being bound or minimum bounding
24717c478bd9Sstevel@tonic-gate 			 * lgroup if no affinities set
24727c478bd9Sstevel@tonic-gate 			 */
24737c478bd9Sstevel@tonic-gate 			if (tp->t_lgrp_affinity)
24747c478bd9Sstevel@tonic-gate 				lpl = lgrp_affinity_best(tp, tp->t_cpupart, 0);
24757c478bd9Sstevel@tonic-gate 			else
24767c478bd9Sstevel@tonic-gate 				lpl = cp->cpu_lpl;
24777c478bd9Sstevel@tonic-gate 
24787c478bd9Sstevel@tonic-gate 			if (tp->t_lpl != lpl) {
24797c478bd9Sstevel@tonic-gate 				/* can't grab cpu_lock */
24807c478bd9Sstevel@tonic-gate 				lgrp_move_thread(tp, lpl, 1);
24817c478bd9Sstevel@tonic-gate 			}
24827c478bd9Sstevel@tonic-gate 
24837c478bd9Sstevel@tonic-gate 			/*
24847c478bd9Sstevel@tonic-gate 			 * Make the thread switch to the bound CPU.
24857c478bd9Sstevel@tonic-gate 			 * If the thread is runnable, we need to
24867c478bd9Sstevel@tonic-gate 			 * requeue it even if t_cpu is already set
24877c478bd9Sstevel@tonic-gate 			 * to the right CPU, since it may be on a
24887c478bd9Sstevel@tonic-gate 			 * kpreempt queue and need to move to a local
24897c478bd9Sstevel@tonic-gate 			 * queue.  We could check t_disp_queue to
24907c478bd9Sstevel@tonic-gate 			 * avoid unnecessary overhead if it's already
24917c478bd9Sstevel@tonic-gate 			 * on the right queue, but since this isn't
24927c478bd9Sstevel@tonic-gate 			 * a performance-critical operation it doesn't
24937c478bd9Sstevel@tonic-gate 			 * seem worth the extra code and complexity.
24947c478bd9Sstevel@tonic-gate 			 *
24957c478bd9Sstevel@tonic-gate 			 * If the thread is weakbound to the cpu then it will
24967c478bd9Sstevel@tonic-gate 			 * resist the new binding request until the weak
24977c478bd9Sstevel@tonic-gate 			 * binding drops.  The cpu_surrender or requeueing
24987c478bd9Sstevel@tonic-gate 			 * below could be skipped in such cases (since it
24997c478bd9Sstevel@tonic-gate 			 * will have no effect), but that would require
25007c478bd9Sstevel@tonic-gate 			 * thread_allowmigrate to acquire thread_lock so
25017c478bd9Sstevel@tonic-gate 			 * we'll take the very occasional hit here instead.
25027c478bd9Sstevel@tonic-gate 			 */
25037c478bd9Sstevel@tonic-gate 			if (tp->t_state == TS_ONPROC) {
25047c478bd9Sstevel@tonic-gate 				cpu_surrender(tp);
25057c478bd9Sstevel@tonic-gate 			} else if (tp->t_state == TS_RUN) {
25067c478bd9Sstevel@tonic-gate 				cpu_t *ocp = tp->t_cpu;
25077c478bd9Sstevel@tonic-gate 
25087c478bd9Sstevel@tonic-gate 				(void) dispdeq(tp);
25097c478bd9Sstevel@tonic-gate 				setbackdq(tp);
25107c478bd9Sstevel@tonic-gate 				/*
25117c478bd9Sstevel@tonic-gate 				 * Either on the bound CPU's disp queue now,
25127c478bd9Sstevel@tonic-gate 				 * or swapped out or on the swap queue.
25137c478bd9Sstevel@tonic-gate 				 */
25147c478bd9Sstevel@tonic-gate 				ASSERT(tp->t_disp_queue == cp->cpu_disp ||
25157c478bd9Sstevel@tonic-gate 				    tp->t_weakbound_cpu == ocp ||
25167c478bd9Sstevel@tonic-gate 				    (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ))
25177c478bd9Sstevel@tonic-gate 				    != TS_LOAD);
25187c478bd9Sstevel@tonic-gate 			}
25197c478bd9Sstevel@tonic-gate 		}
25207c478bd9Sstevel@tonic-gate 	}
25217c478bd9Sstevel@tonic-gate 
25227c478bd9Sstevel@tonic-gate 	/*
25237c478bd9Sstevel@tonic-gate 	 * Our binding has changed; set TP_CHANGEBIND.
25247c478bd9Sstevel@tonic-gate 	 */
25257c478bd9Sstevel@tonic-gate 	tp->t_proc_flag |= TP_CHANGEBIND;
25267c478bd9Sstevel@tonic-gate 	aston(tp);
25277c478bd9Sstevel@tonic-gate 
25287c478bd9Sstevel@tonic-gate 	thread_unlock(tp);
25297c478bd9Sstevel@tonic-gate 
25307c478bd9Sstevel@tonic-gate 	return (0);
25317c478bd9Sstevel@tonic-gate }
25327c478bd9Sstevel@tonic-gate 
25337c478bd9Sstevel@tonic-gate #if CPUSET_WORDS > 1
25347c478bd9Sstevel@tonic-gate 
25357c478bd9Sstevel@tonic-gate /*
25367c478bd9Sstevel@tonic-gate  * Functions for implementing cpuset operations when a cpuset is more
25377c478bd9Sstevel@tonic-gate  * than one word.  On platforms where a cpuset is a single word these
25387c478bd9Sstevel@tonic-gate  * are implemented as macros in cpuvar.h.
25397c478bd9Sstevel@tonic-gate  */
25407c478bd9Sstevel@tonic-gate 
25417c478bd9Sstevel@tonic-gate void
25427c478bd9Sstevel@tonic-gate cpuset_all(cpuset_t *s)
25437c478bd9Sstevel@tonic-gate {
25447c478bd9Sstevel@tonic-gate 	int i;
25457c478bd9Sstevel@tonic-gate 
25467c478bd9Sstevel@tonic-gate 	for (i = 0; i < CPUSET_WORDS; i++)
25477c478bd9Sstevel@tonic-gate 		s->cpub[i] = ~0UL;
25487c478bd9Sstevel@tonic-gate }
25497c478bd9Sstevel@tonic-gate 
25507c478bd9Sstevel@tonic-gate void
25517c478bd9Sstevel@tonic-gate cpuset_all_but(cpuset_t *s, uint_t cpu)
25527c478bd9Sstevel@tonic-gate {
25537c478bd9Sstevel@tonic-gate 	cpuset_all(s);
25547c478bd9Sstevel@tonic-gate 	CPUSET_DEL(*s, cpu);
25557c478bd9Sstevel@tonic-gate }
25567c478bd9Sstevel@tonic-gate 
25577c478bd9Sstevel@tonic-gate void
25587c478bd9Sstevel@tonic-gate cpuset_only(cpuset_t *s, uint_t cpu)
25597c478bd9Sstevel@tonic-gate {
25607c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(*s);
25617c478bd9Sstevel@tonic-gate 	CPUSET_ADD(*s, cpu);
25627c478bd9Sstevel@tonic-gate }
25637c478bd9Sstevel@tonic-gate 
25647c478bd9Sstevel@tonic-gate int
25657c478bd9Sstevel@tonic-gate cpuset_isnull(cpuset_t *s)
25667c478bd9Sstevel@tonic-gate {
25677c478bd9Sstevel@tonic-gate 	int i;
25687c478bd9Sstevel@tonic-gate 
25697c478bd9Sstevel@tonic-gate 	for (i = 0; i < CPUSET_WORDS; i++)
25707c478bd9Sstevel@tonic-gate 		if (s->cpub[i] != 0)
25717c478bd9Sstevel@tonic-gate 			return (0);
25727c478bd9Sstevel@tonic-gate 	return (1);
25737c478bd9Sstevel@tonic-gate }
25747c478bd9Sstevel@tonic-gate 
25757c478bd9Sstevel@tonic-gate int
25767c478bd9Sstevel@tonic-gate cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
25777c478bd9Sstevel@tonic-gate {
25787c478bd9Sstevel@tonic-gate 	int i;
25797c478bd9Sstevel@tonic-gate 
25807c478bd9Sstevel@tonic-gate 	for (i = 0; i < CPUSET_WORDS; i++)
25817c478bd9Sstevel@tonic-gate 		if (s1->cpub[i] != s2->cpub[i])
25827c478bd9Sstevel@tonic-gate 			return (0);
25837c478bd9Sstevel@tonic-gate 	return (1);
25847c478bd9Sstevel@tonic-gate }
25857c478bd9Sstevel@tonic-gate 
25867c478bd9Sstevel@tonic-gate uint_t
25877c478bd9Sstevel@tonic-gate cpuset_find(cpuset_t *s)
25887c478bd9Sstevel@tonic-gate {
25897c478bd9Sstevel@tonic-gate 
25907c478bd9Sstevel@tonic-gate 	uint_t	i;
25917c478bd9Sstevel@tonic-gate 	uint_t	cpu = (uint_t)-1;
25927c478bd9Sstevel@tonic-gate 
25937c478bd9Sstevel@tonic-gate 	/*
25947c478bd9Sstevel@tonic-gate 	 * Find a cpu in the cpuset
25957c478bd9Sstevel@tonic-gate 	 */
2596*25cf1a30Sjl139090 	for (i = 0; i < CPUSET_WORDS; i++) {
25977c478bd9Sstevel@tonic-gate 		cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
2598*25cf1a30Sjl139090 		if (cpu != (uint_t)-1) {
2599*25cf1a30Sjl139090 			cpu += i * BT_NBIPUL;
2600*25cf1a30Sjl139090 			break;
2601*25cf1a30Sjl139090 		}
2602*25cf1a30Sjl139090 	}
26037c478bd9Sstevel@tonic-gate 	return (cpu);
26047c478bd9Sstevel@tonic-gate }
26057c478bd9Sstevel@tonic-gate 
26067c478bd9Sstevel@tonic-gate #endif	/* CPUSET_WORDS */
26077c478bd9Sstevel@tonic-gate 
26087c478bd9Sstevel@tonic-gate /*
26097c478bd9Sstevel@tonic-gate  * Unbind all user threads bound to a given CPU.
26107c478bd9Sstevel@tonic-gate  */
26117c478bd9Sstevel@tonic-gate int
26127c478bd9Sstevel@tonic-gate cpu_unbind(processorid_t cpu)
26137c478bd9Sstevel@tonic-gate {
26147c478bd9Sstevel@tonic-gate 	processorid_t obind;
26157c478bd9Sstevel@tonic-gate 	kthread_t *tp;
26167c478bd9Sstevel@tonic-gate 	int ret = 0;
26177c478bd9Sstevel@tonic-gate 	proc_t *pp;
26187c478bd9Sstevel@tonic-gate 	int err, berr = 0;
26197c478bd9Sstevel@tonic-gate 
26207c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
26217c478bd9Sstevel@tonic-gate 
26227c478bd9Sstevel@tonic-gate 	mutex_enter(&pidlock);
26237c478bd9Sstevel@tonic-gate 	for (pp = practive; pp != NULL; pp = pp->p_next) {
26247c478bd9Sstevel@tonic-gate 		mutex_enter(&pp->p_lock);
26257c478bd9Sstevel@tonic-gate 		tp = pp->p_tlist;
26267c478bd9Sstevel@tonic-gate 		/*
26277c478bd9Sstevel@tonic-gate 		 * Skip zombies, kernel processes, and processes in
26287c478bd9Sstevel@tonic-gate 		 * other zones, if called from a non-global zone.
26297c478bd9Sstevel@tonic-gate 		 */
26307c478bd9Sstevel@tonic-gate 		if (tp == NULL || (pp->p_flag & SSYS) ||
26317c478bd9Sstevel@tonic-gate 		    !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
26327c478bd9Sstevel@tonic-gate 			mutex_exit(&pp->p_lock);
26337c478bd9Sstevel@tonic-gate 			continue;
26347c478bd9Sstevel@tonic-gate 		}
26357c478bd9Sstevel@tonic-gate 		do {
26367c478bd9Sstevel@tonic-gate 			if (tp->t_bind_cpu != cpu)
26377c478bd9Sstevel@tonic-gate 				continue;
26387c478bd9Sstevel@tonic-gate 			err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr);
26397c478bd9Sstevel@tonic-gate 			if (ret == 0)
26407c478bd9Sstevel@tonic-gate 				ret = err;
26417c478bd9Sstevel@tonic-gate 		} while ((tp = tp->t_forw) != pp->p_tlist);
26427c478bd9Sstevel@tonic-gate 		mutex_exit(&pp->p_lock);
26437c478bd9Sstevel@tonic-gate 	}
26447c478bd9Sstevel@tonic-gate 	mutex_exit(&pidlock);
26457c478bd9Sstevel@tonic-gate 	if (ret == 0)
26467c478bd9Sstevel@tonic-gate 		ret = berr;
26477c478bd9Sstevel@tonic-gate 	return (ret);
26487c478bd9Sstevel@tonic-gate }
26497c478bd9Sstevel@tonic-gate 
26507c478bd9Sstevel@tonic-gate 
26517c478bd9Sstevel@tonic-gate /*
26527c478bd9Sstevel@tonic-gate  * Destroy all remaining bound threads on a cpu.
26537c478bd9Sstevel@tonic-gate  */
26547c478bd9Sstevel@tonic-gate void
26557c478bd9Sstevel@tonic-gate cpu_destroy_bound_threads(cpu_t *cp)
26567c478bd9Sstevel@tonic-gate {
26577c478bd9Sstevel@tonic-gate 	extern id_t syscid;
26587c478bd9Sstevel@tonic-gate 	register kthread_id_t	t, tlist, tnext;
26597c478bd9Sstevel@tonic-gate 
26607c478bd9Sstevel@tonic-gate 	/*
26617c478bd9Sstevel@tonic-gate 	 * Destroy all remaining bound threads on the cpu.  This
26627c478bd9Sstevel@tonic-gate 	 * should include both the interrupt threads and the idle thread.
26637c478bd9Sstevel@tonic-gate 	 * This requires some care, since we need to traverse the
26647c478bd9Sstevel@tonic-gate 	 * thread list with the pidlock mutex locked, but thread_free
26657c478bd9Sstevel@tonic-gate 	 * also locks the pidlock mutex.  So, we collect the threads
26667c478bd9Sstevel@tonic-gate 	 * we're going to reap in a list headed by "tlist", then we
26677c478bd9Sstevel@tonic-gate 	 * unlock the pidlock mutex and traverse the tlist list,
26687c478bd9Sstevel@tonic-gate 	 * doing thread_free's on the thread's.	 Simple, n'est pas?
26697c478bd9Sstevel@tonic-gate 	 * Also, this depends on thread_free not mucking with the
26707c478bd9Sstevel@tonic-gate 	 * t_next and t_prev links of the thread.
26717c478bd9Sstevel@tonic-gate 	 */
26727c478bd9Sstevel@tonic-gate 
26737c478bd9Sstevel@tonic-gate 	if ((t = curthread) != NULL) {
26747c478bd9Sstevel@tonic-gate 
26757c478bd9Sstevel@tonic-gate 		tlist = NULL;
26767c478bd9Sstevel@tonic-gate 		mutex_enter(&pidlock);
26777c478bd9Sstevel@tonic-gate 		do {
26787c478bd9Sstevel@tonic-gate 			tnext = t->t_next;
26797c478bd9Sstevel@tonic-gate 			if (t->t_bound_cpu == cp) {
26807c478bd9Sstevel@tonic-gate 
26817c478bd9Sstevel@tonic-gate 				/*
26827c478bd9Sstevel@tonic-gate 				 * We've found a bound thread, carefully unlink
26837c478bd9Sstevel@tonic-gate 				 * it out of the thread list, and add it to
26847c478bd9Sstevel@tonic-gate 				 * our "tlist".	 We "know" we don't have to
26857c478bd9Sstevel@tonic-gate 				 * worry about unlinking curthread (the thread
26867c478bd9Sstevel@tonic-gate 				 * that is executing this code).
26877c478bd9Sstevel@tonic-gate 				 */
26887c478bd9Sstevel@tonic-gate 				t->t_next->t_prev = t->t_prev;
26897c478bd9Sstevel@tonic-gate 				t->t_prev->t_next = t->t_next;
26907c478bd9Sstevel@tonic-gate 				t->t_next = tlist;
26917c478bd9Sstevel@tonic-gate 				tlist = t;
26927c478bd9Sstevel@tonic-gate 				ASSERT(t->t_cid == syscid);
26937c478bd9Sstevel@tonic-gate 				/* wake up anyone blocked in thread_join */
26947c478bd9Sstevel@tonic-gate 				cv_broadcast(&t->t_joincv);
26957c478bd9Sstevel@tonic-gate 				/*
26967c478bd9Sstevel@tonic-gate 				 * t_lwp set by interrupt threads and not
26977c478bd9Sstevel@tonic-gate 				 * cleared.
26987c478bd9Sstevel@tonic-gate 				 */
26997c478bd9Sstevel@tonic-gate 				t->t_lwp = NULL;
27007c478bd9Sstevel@tonic-gate 				/*
27017c478bd9Sstevel@tonic-gate 				 * Pause and idle threads always have
27027c478bd9Sstevel@tonic-gate 				 * t_state set to TS_ONPROC.
27037c478bd9Sstevel@tonic-gate 				 */
27047c478bd9Sstevel@tonic-gate 				t->t_state = TS_FREE;
27057c478bd9Sstevel@tonic-gate 				t->t_prev = NULL;	/* Just in case */
27067c478bd9Sstevel@tonic-gate 			}
27077c478bd9Sstevel@tonic-gate 
27087c478bd9Sstevel@tonic-gate 		} while ((t = tnext) != curthread);
27097c478bd9Sstevel@tonic-gate 
27107c478bd9Sstevel@tonic-gate 		mutex_exit(&pidlock);
27117c478bd9Sstevel@tonic-gate 
27127c478bd9Sstevel@tonic-gate 
27137c478bd9Sstevel@tonic-gate 		for (t = tlist; t != NULL; t = tnext) {
27147c478bd9Sstevel@tonic-gate 			tnext = t->t_next;
27157c478bd9Sstevel@tonic-gate 			thread_free(t);
27167c478bd9Sstevel@tonic-gate 		}
27177c478bd9Sstevel@tonic-gate 	}
27187c478bd9Sstevel@tonic-gate }
27197c478bd9Sstevel@tonic-gate 
27207c478bd9Sstevel@tonic-gate /*
27217c478bd9Sstevel@tonic-gate  * processor_info(2) and p_online(2) status support functions
27227c478bd9Sstevel@tonic-gate  *   The constants returned by the cpu_get_state() and cpu_get_state_str() are
27237c478bd9Sstevel@tonic-gate  *   for use in communicating processor state information to userland.  Kernel
27247c478bd9Sstevel@tonic-gate  *   subsystems should only be using the cpu_flags value directly.  Subsystems
27257c478bd9Sstevel@tonic-gate  *   modifying cpu_flags should record the state change via a call to the
27267c478bd9Sstevel@tonic-gate  *   cpu_set_state().
27277c478bd9Sstevel@tonic-gate  */
27287c478bd9Sstevel@tonic-gate 
27297c478bd9Sstevel@tonic-gate /*
27307c478bd9Sstevel@tonic-gate  * Update the pi_state of this CPU.  This function provides the CPU status for
27317c478bd9Sstevel@tonic-gate  * the information returned by processor_info(2).
27327c478bd9Sstevel@tonic-gate  */
27337c478bd9Sstevel@tonic-gate void
27347c478bd9Sstevel@tonic-gate cpu_set_state(cpu_t *cpu)
27357c478bd9Sstevel@tonic-gate {
27367c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
27377c478bd9Sstevel@tonic-gate 	cpu->cpu_type_info.pi_state = cpu_get_state(cpu);
27387c478bd9Sstevel@tonic-gate 	cpu->cpu_state_begin = gethrestime_sec();
27397c478bd9Sstevel@tonic-gate 	pool_cpu_mod = gethrtime();
27407c478bd9Sstevel@tonic-gate }
27417c478bd9Sstevel@tonic-gate 
27427c478bd9Sstevel@tonic-gate /*
27437c478bd9Sstevel@tonic-gate  * Return offline/online/other status for the indicated CPU.  Use only for
27447c478bd9Sstevel@tonic-gate  * communication with user applications; cpu_flags provides the in-kernel
27457c478bd9Sstevel@tonic-gate  * interface.
27467c478bd9Sstevel@tonic-gate  */
27477c478bd9Sstevel@tonic-gate int
27487c478bd9Sstevel@tonic-gate cpu_get_state(cpu_t *cpu)
27497c478bd9Sstevel@tonic-gate {
27507c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
27517c478bd9Sstevel@tonic-gate 	if (cpu->cpu_flags & CPU_POWEROFF)
27527c478bd9Sstevel@tonic-gate 		return (P_POWEROFF);
27537c478bd9Sstevel@tonic-gate 	else if (cpu->cpu_flags & CPU_FAULTED)
27547c478bd9Sstevel@tonic-gate 		return (P_FAULTED);
27557c478bd9Sstevel@tonic-gate 	else if (cpu->cpu_flags & CPU_SPARE)
27567c478bd9Sstevel@tonic-gate 		return (P_SPARE);
27577c478bd9Sstevel@tonic-gate 	else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)
27587c478bd9Sstevel@tonic-gate 		return (P_OFFLINE);
27597c478bd9Sstevel@tonic-gate 	else if (cpu->cpu_flags & CPU_ENABLE)
27607c478bd9Sstevel@tonic-gate 		return (P_ONLINE);
27617c478bd9Sstevel@tonic-gate 	else
27627c478bd9Sstevel@tonic-gate 		return (P_NOINTR);
27637c478bd9Sstevel@tonic-gate }
27647c478bd9Sstevel@tonic-gate 
27657c478bd9Sstevel@tonic-gate /*
27667c478bd9Sstevel@tonic-gate  * Return processor_info(2) state as a string.
27677c478bd9Sstevel@tonic-gate  */
27687c478bd9Sstevel@tonic-gate const char *
27697c478bd9Sstevel@tonic-gate cpu_get_state_str(cpu_t *cpu)
27707c478bd9Sstevel@tonic-gate {
27717c478bd9Sstevel@tonic-gate 	const char *string;
27727c478bd9Sstevel@tonic-gate 
27737c478bd9Sstevel@tonic-gate 	switch (cpu_get_state(cpu)) {
27747c478bd9Sstevel@tonic-gate 	case P_ONLINE:
27757c478bd9Sstevel@tonic-gate 		string = PS_ONLINE;
27767c478bd9Sstevel@tonic-gate 		break;
27777c478bd9Sstevel@tonic-gate 	case P_POWEROFF:
27787c478bd9Sstevel@tonic-gate 		string = PS_POWEROFF;
27797c478bd9Sstevel@tonic-gate 		break;
27807c478bd9Sstevel@tonic-gate 	case P_NOINTR:
27817c478bd9Sstevel@tonic-gate 		string = PS_NOINTR;
27827c478bd9Sstevel@tonic-gate 		break;
27837c478bd9Sstevel@tonic-gate 	case P_SPARE:
27847c478bd9Sstevel@tonic-gate 		string = PS_SPARE;
27857c478bd9Sstevel@tonic-gate 		break;
27867c478bd9Sstevel@tonic-gate 	case P_FAULTED:
27877c478bd9Sstevel@tonic-gate 		string = PS_FAULTED;
27887c478bd9Sstevel@tonic-gate 		break;
27897c478bd9Sstevel@tonic-gate 	case P_OFFLINE:
27907c478bd9Sstevel@tonic-gate 		string = PS_OFFLINE;
27917c478bd9Sstevel@tonic-gate 		break;
27927c478bd9Sstevel@tonic-gate 	default:
27937c478bd9Sstevel@tonic-gate 		string = "unknown";
27947c478bd9Sstevel@tonic-gate 		break;
27957c478bd9Sstevel@tonic-gate 	}
27967c478bd9Sstevel@tonic-gate 	return (string);
27977c478bd9Sstevel@tonic-gate }
27987c478bd9Sstevel@tonic-gate 
27997c478bd9Sstevel@tonic-gate /*
28007c478bd9Sstevel@tonic-gate  * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named
28017c478bd9Sstevel@tonic-gate  * kstats, respectively.  This is done when a CPU is initialized or placed
28027c478bd9Sstevel@tonic-gate  * online via p_online(2).
28037c478bd9Sstevel@tonic-gate  */
28047c478bd9Sstevel@tonic-gate static void
28057c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cpu_t *cp)
28067c478bd9Sstevel@tonic-gate {
28077c478bd9Sstevel@tonic-gate 	int 	instance = cp->cpu_id;
28087c478bd9Sstevel@tonic-gate 	char 	*module = "cpu";
28097c478bd9Sstevel@tonic-gate 	char 	*class = "misc";
28107c478bd9Sstevel@tonic-gate 	kstat_t	*ksp;
28117c478bd9Sstevel@tonic-gate 	zoneid_t zoneid;
28127c478bd9Sstevel@tonic-gate 
28137c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
28147c478bd9Sstevel@tonic-gate 
28157c478bd9Sstevel@tonic-gate 	if (pool_pset_enabled())
28167c478bd9Sstevel@tonic-gate 		zoneid = GLOBAL_ZONEID;
28177c478bd9Sstevel@tonic-gate 	else
28187c478bd9Sstevel@tonic-gate 		zoneid = ALL_ZONES;
28197c478bd9Sstevel@tonic-gate 	/*
28207c478bd9Sstevel@tonic-gate 	 * Create named kstats
28217c478bd9Sstevel@tonic-gate 	 */
28227c478bd9Sstevel@tonic-gate #define	CPU_STATS_KS_CREATE(name, tsize, update_func)                    \
28237c478bd9Sstevel@tonic-gate 	ksp = kstat_create_zone(module, instance, (name), class,         \
28247c478bd9Sstevel@tonic-gate 	    KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0,       \
28257c478bd9Sstevel@tonic-gate 	    zoneid);                                                     \
28267c478bd9Sstevel@tonic-gate 	if (ksp != NULL) {                                               \
28277c478bd9Sstevel@tonic-gate 		ksp->ks_private = cp;                                    \
28287c478bd9Sstevel@tonic-gate 		ksp->ks_update = (update_func);                          \
28297c478bd9Sstevel@tonic-gate 		kstat_install(ksp);                                      \
28307c478bd9Sstevel@tonic-gate 	} else                                                           \
28317c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \
28327c478bd9Sstevel@tonic-gate 		    module, instance, (name));
28337c478bd9Sstevel@tonic-gate 
28347c478bd9Sstevel@tonic-gate 	CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template),
28357c478bd9Sstevel@tonic-gate 	    cpu_sys_stats_ks_update);
28367c478bd9Sstevel@tonic-gate 	CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template),
28377c478bd9Sstevel@tonic-gate 	    cpu_vm_stats_ks_update);
28387c478bd9Sstevel@tonic-gate 
28397c478bd9Sstevel@tonic-gate 	/*
28407c478bd9Sstevel@tonic-gate 	 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat.
28417c478bd9Sstevel@tonic-gate 	 */
28427c478bd9Sstevel@tonic-gate 	ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL,
28437c478bd9Sstevel@tonic-gate 	    "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid);
28447c478bd9Sstevel@tonic-gate 	if (ksp != NULL) {
28457c478bd9Sstevel@tonic-gate 		ksp->ks_update = cpu_stat_ks_update;
28467c478bd9Sstevel@tonic-gate 		ksp->ks_private = cp;
28477c478bd9Sstevel@tonic-gate 		kstat_install(ksp);
28487c478bd9Sstevel@tonic-gate 	}
28497c478bd9Sstevel@tonic-gate }
28507c478bd9Sstevel@tonic-gate 
28517c478bd9Sstevel@tonic-gate static void
28527c478bd9Sstevel@tonic-gate cpu_stats_kstat_destroy(cpu_t *cp)
28537c478bd9Sstevel@tonic-gate {
28547c478bd9Sstevel@tonic-gate 	char ks_name[KSTAT_STRLEN];
28557c478bd9Sstevel@tonic-gate 
28567c478bd9Sstevel@tonic-gate 	(void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id);
28577c478bd9Sstevel@tonic-gate 	kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name);
28587c478bd9Sstevel@tonic-gate 
28597c478bd9Sstevel@tonic-gate 	kstat_delete_byname("cpu", cp->cpu_id, "sys");
28607c478bd9Sstevel@tonic-gate 	kstat_delete_byname("cpu", cp->cpu_id, "vm");
28617c478bd9Sstevel@tonic-gate }
28627c478bd9Sstevel@tonic-gate 
28637c478bd9Sstevel@tonic-gate static int
28647c478bd9Sstevel@tonic-gate cpu_sys_stats_ks_update(kstat_t *ksp, int rw)
28657c478bd9Sstevel@tonic-gate {
28667c478bd9Sstevel@tonic-gate 	cpu_t *cp = (cpu_t *)ksp->ks_private;
28677c478bd9Sstevel@tonic-gate 	struct cpu_sys_stats_ks_data *csskd;
28687c478bd9Sstevel@tonic-gate 	cpu_sys_stats_t *css;
2869eda89462Sesolom 	hrtime_t msnsecs[NCMSTATES];
28707c478bd9Sstevel@tonic-gate 	int	i;
28717c478bd9Sstevel@tonic-gate 
28727c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
28737c478bd9Sstevel@tonic-gate 		return (EACCES);
28747c478bd9Sstevel@tonic-gate 
28757c478bd9Sstevel@tonic-gate 	csskd = ksp->ks_data;
28767c478bd9Sstevel@tonic-gate 	css = &cp->cpu_stats.sys;
28777c478bd9Sstevel@tonic-gate 
2878eda89462Sesolom 	/*
2879eda89462Sesolom 	 * Read CPU mstate, but compare with the last values we
2880eda89462Sesolom 	 * received to make sure that the returned kstats never
2881eda89462Sesolom 	 * decrease.
2882eda89462Sesolom 	 */
2883eda89462Sesolom 
2884eda89462Sesolom 	get_cpu_mstate(cp, msnsecs);
2885eda89462Sesolom 	if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE])
2886eda89462Sesolom 		msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64;
2887eda89462Sesolom 	if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER])
2888eda89462Sesolom 		msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64;
2889eda89462Sesolom 	if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM])
2890eda89462Sesolom 		msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64;
2891eda89462Sesolom 
28927c478bd9Sstevel@tonic-gate 	bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data,
28937c478bd9Sstevel@tonic-gate 	    sizeof (cpu_sys_stats_ks_data_template));
2894eda89462Sesolom 
28957c478bd9Sstevel@tonic-gate 	csskd->cpu_ticks_wait.value.ui64 = 0;
28967c478bd9Sstevel@tonic-gate 	csskd->wait_ticks_io.value.ui64 = 0;
28977c478bd9Sstevel@tonic-gate 
2898eda89462Sesolom 	csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE];
2899eda89462Sesolom 	csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER];
2900eda89462Sesolom 	csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM];
29017c478bd9Sstevel@tonic-gate 	csskd->cpu_ticks_idle.value.ui64 =
29027c478bd9Sstevel@tonic-gate 	    NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64);
29037c478bd9Sstevel@tonic-gate 	csskd->cpu_ticks_user.value.ui64 =
29047c478bd9Sstevel@tonic-gate 	    NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64);
29057c478bd9Sstevel@tonic-gate 	csskd->cpu_ticks_kernel.value.ui64 =
29067c478bd9Sstevel@tonic-gate 	    NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64);
29077c478bd9Sstevel@tonic-gate 	csskd->bread.value.ui64 = css->bread;
29087c478bd9Sstevel@tonic-gate 	csskd->bwrite.value.ui64 = css->bwrite;
29097c478bd9Sstevel@tonic-gate 	csskd->lread.value.ui64 = css->lread;
29107c478bd9Sstevel@tonic-gate 	csskd->lwrite.value.ui64 = css->lwrite;
29117c478bd9Sstevel@tonic-gate 	csskd->phread.value.ui64 = css->phread;
29127c478bd9Sstevel@tonic-gate 	csskd->phwrite.value.ui64 = css->phwrite;
29137c478bd9Sstevel@tonic-gate 	csskd->pswitch.value.ui64 = css->pswitch;
29147c478bd9Sstevel@tonic-gate 	csskd->trap.value.ui64 = css->trap;
29157c478bd9Sstevel@tonic-gate 	csskd->intr.value.ui64 = 0;
29167c478bd9Sstevel@tonic-gate 	for (i = 0; i < PIL_MAX; i++)
29177c478bd9Sstevel@tonic-gate 		csskd->intr.value.ui64 += css->intr[i];
29187c478bd9Sstevel@tonic-gate 	csskd->syscall.value.ui64 = css->syscall;
29197c478bd9Sstevel@tonic-gate 	csskd->sysread.value.ui64 = css->sysread;
29207c478bd9Sstevel@tonic-gate 	csskd->syswrite.value.ui64 = css->syswrite;
29217c478bd9Sstevel@tonic-gate 	csskd->sysfork.value.ui64 = css->sysfork;
29227c478bd9Sstevel@tonic-gate 	csskd->sysvfork.value.ui64 = css->sysvfork;
29237c478bd9Sstevel@tonic-gate 	csskd->sysexec.value.ui64 = css->sysexec;
29247c478bd9Sstevel@tonic-gate 	csskd->readch.value.ui64 = css->readch;
29257c478bd9Sstevel@tonic-gate 	csskd->writech.value.ui64 = css->writech;
29267c478bd9Sstevel@tonic-gate 	csskd->rcvint.value.ui64 = css->rcvint;
29277c478bd9Sstevel@tonic-gate 	csskd->xmtint.value.ui64 = css->xmtint;
29287c478bd9Sstevel@tonic-gate 	csskd->mdmint.value.ui64 = css->mdmint;
29297c478bd9Sstevel@tonic-gate 	csskd->rawch.value.ui64 = css->rawch;
29307c478bd9Sstevel@tonic-gate 	csskd->canch.value.ui64 = css->canch;
29317c478bd9Sstevel@tonic-gate 	csskd->outch.value.ui64 = css->outch;
29327c478bd9Sstevel@tonic-gate 	csskd->msg.value.ui64 = css->msg;
29337c478bd9Sstevel@tonic-gate 	csskd->sema.value.ui64 = css->sema;
29347c478bd9Sstevel@tonic-gate 	csskd->namei.value.ui64 = css->namei;
29357c478bd9Sstevel@tonic-gate 	csskd->ufsiget.value.ui64 = css->ufsiget;
29367c478bd9Sstevel@tonic-gate 	csskd->ufsdirblk.value.ui64 = css->ufsdirblk;
29377c478bd9Sstevel@tonic-gate 	csskd->ufsipage.value.ui64 = css->ufsipage;
29387c478bd9Sstevel@tonic-gate 	csskd->ufsinopage.value.ui64 = css->ufsinopage;
29397c478bd9Sstevel@tonic-gate 	csskd->procovf.value.ui64 = css->procovf;
29407c478bd9Sstevel@tonic-gate 	csskd->intrthread.value.ui64 = 0;
29417c478bd9Sstevel@tonic-gate 	for (i = 0; i < LOCK_LEVEL; i++)
29427c478bd9Sstevel@tonic-gate 		csskd->intrthread.value.ui64 += css->intr[i];
29437c478bd9Sstevel@tonic-gate 	csskd->intrblk.value.ui64 = css->intrblk;
29447c478bd9Sstevel@tonic-gate 	csskd->intrunpin.value.ui64 = css->intrunpin;
29457c478bd9Sstevel@tonic-gate 	csskd->idlethread.value.ui64 = css->idlethread;
29467c478bd9Sstevel@tonic-gate 	csskd->inv_swtch.value.ui64 = css->inv_swtch;
29477c478bd9Sstevel@tonic-gate 	csskd->nthreads.value.ui64 = css->nthreads;
29487c478bd9Sstevel@tonic-gate 	csskd->cpumigrate.value.ui64 = css->cpumigrate;
29497c478bd9Sstevel@tonic-gate 	csskd->xcalls.value.ui64 = css->xcalls;
29507c478bd9Sstevel@tonic-gate 	csskd->mutex_adenters.value.ui64 = css->mutex_adenters;
29517c478bd9Sstevel@tonic-gate 	csskd->rw_rdfails.value.ui64 = css->rw_rdfails;
29527c478bd9Sstevel@tonic-gate 	csskd->rw_wrfails.value.ui64 = css->rw_wrfails;
29537c478bd9Sstevel@tonic-gate 	csskd->modload.value.ui64 = css->modload;
29547c478bd9Sstevel@tonic-gate 	csskd->modunload.value.ui64 = css->modunload;
29557c478bd9Sstevel@tonic-gate 	csskd->bawrite.value.ui64 = css->bawrite;
29567c478bd9Sstevel@tonic-gate 	csskd->iowait.value.ui64 = 0;
29577c478bd9Sstevel@tonic-gate 
29587c478bd9Sstevel@tonic-gate 	return (0);
29597c478bd9Sstevel@tonic-gate }
29607c478bd9Sstevel@tonic-gate 
29617c478bd9Sstevel@tonic-gate static int
29627c478bd9Sstevel@tonic-gate cpu_vm_stats_ks_update(kstat_t *ksp, int rw)
29637c478bd9Sstevel@tonic-gate {
29647c478bd9Sstevel@tonic-gate 	cpu_t *cp = (cpu_t *)ksp->ks_private;
29657c478bd9Sstevel@tonic-gate 	struct cpu_vm_stats_ks_data *cvskd;
29667c478bd9Sstevel@tonic-gate 	cpu_vm_stats_t *cvs;
29677c478bd9Sstevel@tonic-gate 
29687c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
29697c478bd9Sstevel@tonic-gate 		return (EACCES);
29707c478bd9Sstevel@tonic-gate 
29717c478bd9Sstevel@tonic-gate 	cvs = &cp->cpu_stats.vm;
29727c478bd9Sstevel@tonic-gate 	cvskd = ksp->ks_data;
29737c478bd9Sstevel@tonic-gate 
29747c478bd9Sstevel@tonic-gate 	bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data,
29757c478bd9Sstevel@tonic-gate 	    sizeof (cpu_vm_stats_ks_data_template));
29767c478bd9Sstevel@tonic-gate 	cvskd->pgrec.value.ui64 = cvs->pgrec;
29777c478bd9Sstevel@tonic-gate 	cvskd->pgfrec.value.ui64 = cvs->pgfrec;
29787c478bd9Sstevel@tonic-gate 	cvskd->pgin.value.ui64 = cvs->pgin;
29797c478bd9Sstevel@tonic-gate 	cvskd->pgpgin.value.ui64 = cvs->pgpgin;
29807c478bd9Sstevel@tonic-gate 	cvskd->pgout.value.ui64 = cvs->pgout;
29817c478bd9Sstevel@tonic-gate 	cvskd->pgpgout.value.ui64 = cvs->pgpgout;
29827c478bd9Sstevel@tonic-gate 	cvskd->swapin.value.ui64 = cvs->swapin;
29837c478bd9Sstevel@tonic-gate 	cvskd->pgswapin.value.ui64 = cvs->pgswapin;
29847c478bd9Sstevel@tonic-gate 	cvskd->swapout.value.ui64 = cvs->swapout;
29857c478bd9Sstevel@tonic-gate 	cvskd->pgswapout.value.ui64 = cvs->pgswapout;
29867c478bd9Sstevel@tonic-gate 	cvskd->zfod.value.ui64 = cvs->zfod;
29877c478bd9Sstevel@tonic-gate 	cvskd->dfree.value.ui64 = cvs->dfree;
29887c478bd9Sstevel@tonic-gate 	cvskd->scan.value.ui64 = cvs->scan;
29897c478bd9Sstevel@tonic-gate 	cvskd->rev.value.ui64 = cvs->rev;
29907c478bd9Sstevel@tonic-gate 	cvskd->hat_fault.value.ui64 = cvs->hat_fault;
29917c478bd9Sstevel@tonic-gate 	cvskd->as_fault.value.ui64 = cvs->as_fault;
29927c478bd9Sstevel@tonic-gate 	cvskd->maj_fault.value.ui64 = cvs->maj_fault;
29937c478bd9Sstevel@tonic-gate 	cvskd->cow_fault.value.ui64 = cvs->cow_fault;
29947c478bd9Sstevel@tonic-gate 	cvskd->prot_fault.value.ui64 = cvs->prot_fault;
29957c478bd9Sstevel@tonic-gate 	cvskd->softlock.value.ui64 = cvs->softlock;
29967c478bd9Sstevel@tonic-gate 	cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt;
29977c478bd9Sstevel@tonic-gate 	cvskd->pgrrun.value.ui64 = cvs->pgrrun;
29987c478bd9Sstevel@tonic-gate 	cvskd->execpgin.value.ui64 = cvs->execpgin;
29997c478bd9Sstevel@tonic-gate 	cvskd->execpgout.value.ui64 = cvs->execpgout;
30007c478bd9Sstevel@tonic-gate 	cvskd->execfree.value.ui64 = cvs->execfree;
30017c478bd9Sstevel@tonic-gate 	cvskd->anonpgin.value.ui64 = cvs->anonpgin;
30027c478bd9Sstevel@tonic-gate 	cvskd->anonpgout.value.ui64 = cvs->anonpgout;
30037c478bd9Sstevel@tonic-gate 	cvskd->anonfree.value.ui64 = cvs->anonfree;
30047c478bd9Sstevel@tonic-gate 	cvskd->fspgin.value.ui64 = cvs->fspgin;
30057c478bd9Sstevel@tonic-gate 	cvskd->fspgout.value.ui64 = cvs->fspgout;
30067c478bd9Sstevel@tonic-gate 	cvskd->fsfree.value.ui64 = cvs->fsfree;
30077c478bd9Sstevel@tonic-gate 
30087c478bd9Sstevel@tonic-gate 	return (0);
30097c478bd9Sstevel@tonic-gate }
30107c478bd9Sstevel@tonic-gate 
30117c478bd9Sstevel@tonic-gate static int
30127c478bd9Sstevel@tonic-gate cpu_stat_ks_update(kstat_t *ksp, int rw)
30137c478bd9Sstevel@tonic-gate {
30147c478bd9Sstevel@tonic-gate 	cpu_stat_t *cso;
30157c478bd9Sstevel@tonic-gate 	cpu_t *cp;
30167c478bd9Sstevel@tonic-gate 	int i;
3017eda89462Sesolom 	hrtime_t msnsecs[NCMSTATES];
30187c478bd9Sstevel@tonic-gate 
30197c478bd9Sstevel@tonic-gate 	cso = (cpu_stat_t *)ksp->ks_data;
30207c478bd9Sstevel@tonic-gate 	cp = (cpu_t *)ksp->ks_private;
30217c478bd9Sstevel@tonic-gate 
30227c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
30237c478bd9Sstevel@tonic-gate 		return (EACCES);
30247c478bd9Sstevel@tonic-gate 
30257c478bd9Sstevel@tonic-gate 	/*
3026eda89462Sesolom 	 * Read CPU mstate, but compare with the last values we
3027eda89462Sesolom 	 * received to make sure that the returned kstats never
3028eda89462Sesolom 	 * decrease.
30297c478bd9Sstevel@tonic-gate 	 */
30307c478bd9Sstevel@tonic-gate 
3031eda89462Sesolom 	get_cpu_mstate(cp, msnsecs);
3032eda89462Sesolom 	msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]);
3033eda89462Sesolom 	msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]);
3034eda89462Sesolom 	msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]);
3035eda89462Sesolom 	if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE])
3036eda89462Sesolom 		cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE];
3037eda89462Sesolom 	if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER])
3038eda89462Sesolom 		cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER];
3039eda89462Sesolom 	if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM])
3040eda89462Sesolom 		cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM];
30417c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.cpu[CPU_WAIT] 	= 0;
30427c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.wait[W_IO] 	= 0;
30437c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.wait[W_SWAP]	= 0;
30447c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.wait[W_PIO]	= 0;
30457c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.bread 		= CPU_STATS(cp, sys.bread);
30467c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.bwrite 	= CPU_STATS(cp, sys.bwrite);
30477c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.lread 		= CPU_STATS(cp, sys.lread);
30487c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.lwrite 	= CPU_STATS(cp, sys.lwrite);
30497c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.phread 	= CPU_STATS(cp, sys.phread);
30507c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.phwrite 	= CPU_STATS(cp, sys.phwrite);
30517c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.pswitch 	= CPU_STATS(cp, sys.pswitch);
30527c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.trap 		= CPU_STATS(cp, sys.trap);
30537c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.intr		= 0;
30547c478bd9Sstevel@tonic-gate 	for (i = 0; i < PIL_MAX; i++)
30557c478bd9Sstevel@tonic-gate 		cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]);
30567c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.syscall	= CPU_STATS(cp, sys.syscall);
30577c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.sysread	= CPU_STATS(cp, sys.sysread);
30587c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.syswrite	= CPU_STATS(cp, sys.syswrite);
30597c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.sysfork	= CPU_STATS(cp, sys.sysfork);
30607c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.sysvfork	= CPU_STATS(cp, sys.sysvfork);
30617c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.sysexec	= CPU_STATS(cp, sys.sysexec);
30627c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.readch		= CPU_STATS(cp, sys.readch);
30637c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.writech	= CPU_STATS(cp, sys.writech);
30647c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.rcvint		= CPU_STATS(cp, sys.rcvint);
30657c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.xmtint		= CPU_STATS(cp, sys.xmtint);
30667c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.mdmint		= CPU_STATS(cp, sys.mdmint);
30677c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.rawch		= CPU_STATS(cp, sys.rawch);
30687c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.canch		= CPU_STATS(cp, sys.canch);
30697c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.outch		= CPU_STATS(cp, sys.outch);
30707c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.msg		= CPU_STATS(cp, sys.msg);
30717c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.sema		= CPU_STATS(cp, sys.sema);
30727c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.namei		= CPU_STATS(cp, sys.namei);
30737c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.ufsiget	= CPU_STATS(cp, sys.ufsiget);
30747c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.ufsdirblk	= CPU_STATS(cp, sys.ufsdirblk);
30757c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.ufsipage	= CPU_STATS(cp, sys.ufsipage);
30767c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.ufsinopage	= CPU_STATS(cp, sys.ufsinopage);
30777c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.inodeovf	= 0;
30787c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.fileovf	= 0;
30797c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.procovf	= CPU_STATS(cp, sys.procovf);
30807c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.intrthread	= 0;
30817c478bd9Sstevel@tonic-gate 	for (i = 0; i < LOCK_LEVEL; i++)
30827c478bd9Sstevel@tonic-gate 		cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]);
30837c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.intrblk	= CPU_STATS(cp, sys.intrblk);
30847c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.idlethread	= CPU_STATS(cp, sys.idlethread);
30857c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.inv_swtch	= CPU_STATS(cp, sys.inv_swtch);
30867c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.nthreads	= CPU_STATS(cp, sys.nthreads);
30877c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.cpumigrate	= CPU_STATS(cp, sys.cpumigrate);
30887c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.xcalls		= CPU_STATS(cp, sys.xcalls);
30897c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.mutex_adenters	= CPU_STATS(cp, sys.mutex_adenters);
30907c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.rw_rdfails	= CPU_STATS(cp, sys.rw_rdfails);
30917c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.rw_wrfails	= CPU_STATS(cp, sys.rw_wrfails);
30927c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.modload	= CPU_STATS(cp, sys.modload);
30937c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.modunload	= CPU_STATS(cp, sys.modunload);
30947c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.bawrite	= CPU_STATS(cp, sys.bawrite);
30957c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.rw_enters	= 0;
30967c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.win_uo_cnt	= 0;
30977c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.win_uu_cnt	= 0;
30987c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.win_so_cnt	= 0;
30997c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.win_su_cnt	= 0;
31007c478bd9Sstevel@tonic-gate 	cso->cpu_sysinfo.win_suo_cnt	= 0;
31017c478bd9Sstevel@tonic-gate 
31027c478bd9Sstevel@tonic-gate 	cso->cpu_syswait.iowait		= 0;
31037c478bd9Sstevel@tonic-gate 	cso->cpu_syswait.swap		= 0;
31047c478bd9Sstevel@tonic-gate 	cso->cpu_syswait.physio		= 0;
31057c478bd9Sstevel@tonic-gate 
31067c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgrec		= CPU_STATS(cp, vm.pgrec);
31077c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgfrec		= CPU_STATS(cp, vm.pgfrec);
31087c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgin		= CPU_STATS(cp, vm.pgin);
31097c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgpgin		= CPU_STATS(cp, vm.pgpgin);
31107c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgout		= CPU_STATS(cp, vm.pgout);
31117c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgpgout		= CPU_STATS(cp, vm.pgpgout);
31127c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.swapin		= CPU_STATS(cp, vm.swapin);
31137c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgswapin	= CPU_STATS(cp, vm.pgswapin);
31147c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.swapout		= CPU_STATS(cp, vm.swapout);
31157c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgswapout	= CPU_STATS(cp, vm.pgswapout);
31167c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.zfod		= CPU_STATS(cp, vm.zfod);
31177c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.dfree		= CPU_STATS(cp, vm.dfree);
31187c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.scan		= CPU_STATS(cp, vm.scan);
31197c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.rev		= CPU_STATS(cp, vm.rev);
31207c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.hat_fault	= CPU_STATS(cp, vm.hat_fault);
31217c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.as_fault	= CPU_STATS(cp, vm.as_fault);
31227c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.maj_fault	= CPU_STATS(cp, vm.maj_fault);
31237c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.cow_fault	= CPU_STATS(cp, vm.cow_fault);
31247c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.prot_fault	= CPU_STATS(cp, vm.prot_fault);
31257c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.softlock	= CPU_STATS(cp, vm.softlock);
31267c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.kernel_asflt	= CPU_STATS(cp, vm.kernel_asflt);
31277c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.pgrrun		= CPU_STATS(cp, vm.pgrrun);
31287c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.execpgin	= CPU_STATS(cp, vm.execpgin);
31297c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.execpgout	= CPU_STATS(cp, vm.execpgout);
31307c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.execfree	= CPU_STATS(cp, vm.execfree);
31317c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.anonpgin	= CPU_STATS(cp, vm.anonpgin);
31327c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.anonpgout	= CPU_STATS(cp, vm.anonpgout);
31337c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.anonfree	= CPU_STATS(cp, vm.anonfree);
31347c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.fspgin		= CPU_STATS(cp, vm.fspgin);
31357c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.fspgout		= CPU_STATS(cp, vm.fspgout);
31367c478bd9Sstevel@tonic-gate 	cso->cpu_vminfo.fsfree		= CPU_STATS(cp, vm.fsfree);
31377c478bd9Sstevel@tonic-gate 
31387c478bd9Sstevel@tonic-gate 	return (0);
31397c478bd9Sstevel@tonic-gate }
3140