17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
589c0ae93Scindi * Common Development and Distribution License (the "License").
689c0ae93Scindi * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22b52a336eSPavel Tatashin * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
231f9f06cfSMatthew Ahrens * Copyright (c) 2012 by Delphix. All rights reserved.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate /*
277c478bd9Sstevel@tonic-gate * Architecture-independent CPU control functions.
287c478bd9Sstevel@tonic-gate */
297c478bd9Sstevel@tonic-gate
307c478bd9Sstevel@tonic-gate #include <sys/types.h>
317c478bd9Sstevel@tonic-gate #include <sys/param.h>
327c478bd9Sstevel@tonic-gate #include <sys/var.h>
337c478bd9Sstevel@tonic-gate #include <sys/thread.h>
347c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
35fb2caebeSRandy Fishel #include <sys/cpu_event.h>
367c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
377c478bd9Sstevel@tonic-gate #include <sys/uadmin.h>
387c478bd9Sstevel@tonic-gate #include <sys/systm.h>
397c478bd9Sstevel@tonic-gate #include <sys/errno.h>
407c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
417c478bd9Sstevel@tonic-gate #include <sys/procset.h>
427c478bd9Sstevel@tonic-gate #include <sys/processor.h>
437c478bd9Sstevel@tonic-gate #include <sys/debug.h>
447c478bd9Sstevel@tonic-gate #include <sys/cpupart.h>
457c478bd9Sstevel@tonic-gate #include <sys/lgrp.h>
467c478bd9Sstevel@tonic-gate #include <sys/pset.h>
47fb2f18f8Sesaxe #include <sys/pghw.h>
487c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
497c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */
507c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
517c478bd9Sstevel@tonic-gate #include <sys/callb.h>
527c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
537c478bd9Sstevel@tonic-gate #include <sys/cyclic.h>
547c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
557c478bd9Sstevel@tonic-gate #include <sys/nvpair.h>
567c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h>
577c478bd9Sstevel@tonic-gate #include <sys/msacct.h>
587c478bd9Sstevel@tonic-gate #include <sys/time.h>
597c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
600e751525SEric Saxe #include <sys/sdt.h>
61575a7426Spt157919 #if defined(__x86) || defined(__amd64)
627aec1d6eScindi #include <sys/x86_archext.h>
637aec1d6eScindi #endif
6487a18d3fSMadhavan Venkataraman #include <sys/callo.h>
657c478bd9Sstevel@tonic-gate
667c478bd9Sstevel@tonic-gate extern int mp_cpu_start(cpu_t *);
677c478bd9Sstevel@tonic-gate extern int mp_cpu_stop(cpu_t *);
687c478bd9Sstevel@tonic-gate extern int mp_cpu_poweron(cpu_t *);
697c478bd9Sstevel@tonic-gate extern int mp_cpu_poweroff(cpu_t *);
707c478bd9Sstevel@tonic-gate extern int mp_cpu_configure(int);
717c478bd9Sstevel@tonic-gate extern int mp_cpu_unconfigure(int);
727c478bd9Sstevel@tonic-gate extern void mp_cpu_faulted_enter(cpu_t *);
737c478bd9Sstevel@tonic-gate extern void mp_cpu_faulted_exit(cpu_t *);
747c478bd9Sstevel@tonic-gate
757c478bd9Sstevel@tonic-gate extern int cmp_cpu_to_chip(processorid_t cpuid);
767c478bd9Sstevel@tonic-gate #ifdef __sparcv9
777c478bd9Sstevel@tonic-gate extern char *cpu_fru_fmri(cpu_t *cp);
787c478bd9Sstevel@tonic-gate #endif
797c478bd9Sstevel@tonic-gate
807c478bd9Sstevel@tonic-gate static void cpu_add_active_internal(cpu_t *cp);
817c478bd9Sstevel@tonic-gate static void cpu_remove_active(cpu_t *cp);
827c478bd9Sstevel@tonic-gate static void cpu_info_kstat_create(cpu_t *cp);
837c478bd9Sstevel@tonic-gate static void cpu_info_kstat_destroy(cpu_t *cp);
847c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_create(cpu_t *cp);
857c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_destroy(cpu_t *cp);
867c478bd9Sstevel@tonic-gate
877c478bd9Sstevel@tonic-gate static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw);
887c478bd9Sstevel@tonic-gate static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw);
897c478bd9Sstevel@tonic-gate static int cpu_stat_ks_update(kstat_t *ksp, int rw);
907c478bd9Sstevel@tonic-gate static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t);
917c478bd9Sstevel@tonic-gate
927c478bd9Sstevel@tonic-gate /*
937c478bd9Sstevel@tonic-gate * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active,
94b52a336eSPavel Tatashin * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with
95b52a336eSPavel Tatashin * respect to related locks is:
967c478bd9Sstevel@tonic-gate *
977c478bd9Sstevel@tonic-gate * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock()
987c478bd9Sstevel@tonic-gate *
997c478bd9Sstevel@tonic-gate * Warning: Certain sections of code do not use the cpu_lock when
1007c478bd9Sstevel@tonic-gate * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since
1017c478bd9Sstevel@tonic-gate * all cpus are paused during modifications to this list, a solution
1027c478bd9Sstevel@tonic-gate * to protect the list is too either disable kernel preemption while
1037c478bd9Sstevel@tonic-gate * walking the list, *or* recheck the cpu_next pointer at each
1047c478bd9Sstevel@tonic-gate * iteration in the loop. Note that in no cases can any cached
1057c478bd9Sstevel@tonic-gate * copies of the cpu pointers be kept as they may become invalid.
1067c478bd9Sstevel@tonic-gate */
1077c478bd9Sstevel@tonic-gate kmutex_t cpu_lock;
1087c478bd9Sstevel@tonic-gate cpu_t *cpu_list; /* list of all CPUs */
109c97ad5cdSakolb cpu_t *clock_cpu_list; /* used by clock to walk CPUs */
1107c478bd9Sstevel@tonic-gate cpu_t *cpu_active; /* list of active CPUs */
1117c478bd9Sstevel@tonic-gate static cpuset_t cpu_available; /* set of available CPUs */
1127c478bd9Sstevel@tonic-gate cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */
1137c478bd9Sstevel@tonic-gate
1146890d023SEric Saxe cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */
1156890d023SEric Saxe
1167c478bd9Sstevel@tonic-gate /*
1177c478bd9Sstevel@tonic-gate * max_ncpus keeps the max cpus the system can have. Initially
1187c478bd9Sstevel@tonic-gate * it's NCPU, but since most archs scan the devtree for cpus
1197c478bd9Sstevel@tonic-gate * fairly early on during boot, the real max can be known before
1207c478bd9Sstevel@tonic-gate * ncpus is set (useful for early NCPU based allocations).
1217c478bd9Sstevel@tonic-gate */
1227c478bd9Sstevel@tonic-gate int max_ncpus = NCPU;
1237c478bd9Sstevel@tonic-gate /*
1247c478bd9Sstevel@tonic-gate * platforms that set max_ncpus to maxiumum number of cpus that can be
1257c478bd9Sstevel@tonic-gate * dynamically added will set boot_max_ncpus to the number of cpus found
1267c478bd9Sstevel@tonic-gate * at device tree scan time during boot.
1277c478bd9Sstevel@tonic-gate */
1287c478bd9Sstevel@tonic-gate int boot_max_ncpus = -1;
12906fb6a36Sdv142724 int boot_ncpus = -1;
1307c478bd9Sstevel@tonic-gate /*
1317c478bd9Sstevel@tonic-gate * Maximum possible CPU id. This can never be >= NCPU since NCPU is
1327c478bd9Sstevel@tonic-gate * used to size arrays that are indexed by CPU id.
1337c478bd9Sstevel@tonic-gate */
1347c478bd9Sstevel@tonic-gate processorid_t max_cpuid = NCPU - 1;
1357c478bd9Sstevel@tonic-gate
136b52a336eSPavel Tatashin /*
137b52a336eSPavel Tatashin * Maximum cpu_seqid was given. This number can only grow and never shrink. It
138b52a336eSPavel Tatashin * can be used to optimize NCPU loops to avoid going through CPUs which were
139b52a336eSPavel Tatashin * never on-line.
140b52a336eSPavel Tatashin */
141b52a336eSPavel Tatashin processorid_t max_cpu_seqid_ever = 0;
142b52a336eSPavel Tatashin
1437c478bd9Sstevel@tonic-gate int ncpus = 1;
1447c478bd9Sstevel@tonic-gate int ncpus_online = 1;
1457c478bd9Sstevel@tonic-gate
1467c478bd9Sstevel@tonic-gate /*
1477c478bd9Sstevel@tonic-gate * CPU that we're trying to offline. Protected by cpu_lock.
1487c478bd9Sstevel@tonic-gate */
1497c478bd9Sstevel@tonic-gate cpu_t *cpu_inmotion;
1507c478bd9Sstevel@tonic-gate
1517c478bd9Sstevel@tonic-gate /*
1527c478bd9Sstevel@tonic-gate * Can be raised to suppress further weakbinding, which are instead
1537c478bd9Sstevel@tonic-gate * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
154fb2caebeSRandy Fishel * while individual thread weakbinding synchronization is done under thread
1557c478bd9Sstevel@tonic-gate * lock.
1567c478bd9Sstevel@tonic-gate */
1577c478bd9Sstevel@tonic-gate int weakbindingbarrier;
1587c478bd9Sstevel@tonic-gate
1597c478bd9Sstevel@tonic-gate /*
1607c478bd9Sstevel@tonic-gate * Variables used in pause_cpus().
1617c478bd9Sstevel@tonic-gate */
1627c478bd9Sstevel@tonic-gate static volatile char safe_list[NCPU];
1637c478bd9Sstevel@tonic-gate
1647c478bd9Sstevel@tonic-gate static struct _cpu_pause_info {
1657c478bd9Sstevel@tonic-gate int cp_spl; /* spl saved in pause_cpus() */
1667c478bd9Sstevel@tonic-gate volatile int cp_go; /* Go signal sent after all ready */
1677c478bd9Sstevel@tonic-gate int cp_count; /* # of CPUs to pause */
1687c478bd9Sstevel@tonic-gate ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
1697c478bd9Sstevel@tonic-gate kthread_id_t cp_paused;
170*bce835f2SJosef 'Jeff' Sipek void *(*cp_func)(void *);
1717c478bd9Sstevel@tonic-gate } cpu_pause_info;
1727c478bd9Sstevel@tonic-gate
1737c478bd9Sstevel@tonic-gate static kmutex_t pause_free_mutex;
1747c478bd9Sstevel@tonic-gate static kcondvar_t pause_free_cv;
1757c478bd9Sstevel@tonic-gate
1762df1fe9cSrandyf
1777c478bd9Sstevel@tonic-gate static struct cpu_sys_stats_ks_data {
1787c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_idle;
1797c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_user;
1807c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_kernel;
1817c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_wait;
1827c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_idle;
1837c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_user;
1847c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_kernel;
1851f9f06cfSMatthew Ahrens kstat_named_t cpu_nsec_dtrace;
1863aedfe0bSmishra kstat_named_t cpu_nsec_intr;
1873aedfe0bSmishra kstat_named_t cpu_load_intr;
1887c478bd9Sstevel@tonic-gate kstat_named_t wait_ticks_io;
1891f9f06cfSMatthew Ahrens kstat_named_t dtrace_probes;
1907c478bd9Sstevel@tonic-gate kstat_named_t bread;
1917c478bd9Sstevel@tonic-gate kstat_named_t bwrite;
1927c478bd9Sstevel@tonic-gate kstat_named_t lread;
1937c478bd9Sstevel@tonic-gate kstat_named_t lwrite;
1947c478bd9Sstevel@tonic-gate kstat_named_t phread;
1957c478bd9Sstevel@tonic-gate kstat_named_t phwrite;
1967c478bd9Sstevel@tonic-gate kstat_named_t pswitch;
1977c478bd9Sstevel@tonic-gate kstat_named_t trap;
1987c478bd9Sstevel@tonic-gate kstat_named_t intr;
1997c478bd9Sstevel@tonic-gate kstat_named_t syscall;
2007c478bd9Sstevel@tonic-gate kstat_named_t sysread;
2017c478bd9Sstevel@tonic-gate kstat_named_t syswrite;
2027c478bd9Sstevel@tonic-gate kstat_named_t sysfork;
2037c478bd9Sstevel@tonic-gate kstat_named_t sysvfork;
2047c478bd9Sstevel@tonic-gate kstat_named_t sysexec;
2057c478bd9Sstevel@tonic-gate kstat_named_t readch;
2067c478bd9Sstevel@tonic-gate kstat_named_t writech;
2077c478bd9Sstevel@tonic-gate kstat_named_t rcvint;
2087c478bd9Sstevel@tonic-gate kstat_named_t xmtint;
2097c478bd9Sstevel@tonic-gate kstat_named_t mdmint;
2107c478bd9Sstevel@tonic-gate kstat_named_t rawch;
2117c478bd9Sstevel@tonic-gate kstat_named_t canch;
2127c478bd9Sstevel@tonic-gate kstat_named_t outch;
2137c478bd9Sstevel@tonic-gate kstat_named_t msg;
2147c478bd9Sstevel@tonic-gate kstat_named_t sema;
2157c478bd9Sstevel@tonic-gate kstat_named_t namei;
2167c478bd9Sstevel@tonic-gate kstat_named_t ufsiget;
2177c478bd9Sstevel@tonic-gate kstat_named_t ufsdirblk;
2187c478bd9Sstevel@tonic-gate kstat_named_t ufsipage;
2197c478bd9Sstevel@tonic-gate kstat_named_t ufsinopage;
2207c478bd9Sstevel@tonic-gate kstat_named_t procovf;
2217c478bd9Sstevel@tonic-gate kstat_named_t intrthread;
2227c478bd9Sstevel@tonic-gate kstat_named_t intrblk;
2237c478bd9Sstevel@tonic-gate kstat_named_t intrunpin;
2247c478bd9Sstevel@tonic-gate kstat_named_t idlethread;
2257c478bd9Sstevel@tonic-gate kstat_named_t inv_swtch;
2267c478bd9Sstevel@tonic-gate kstat_named_t nthreads;
2277c478bd9Sstevel@tonic-gate kstat_named_t cpumigrate;
2287c478bd9Sstevel@tonic-gate kstat_named_t xcalls;
2297c478bd9Sstevel@tonic-gate kstat_named_t mutex_adenters;
2307c478bd9Sstevel@tonic-gate kstat_named_t rw_rdfails;
2317c478bd9Sstevel@tonic-gate kstat_named_t rw_wrfails;
2327c478bd9Sstevel@tonic-gate kstat_named_t modload;
2337c478bd9Sstevel@tonic-gate kstat_named_t modunload;
2347c478bd9Sstevel@tonic-gate kstat_named_t bawrite;
2357c478bd9Sstevel@tonic-gate kstat_named_t iowait;
2367c478bd9Sstevel@tonic-gate } cpu_sys_stats_ks_data_template = {
2377c478bd9Sstevel@tonic-gate { "cpu_ticks_idle", KSTAT_DATA_UINT64 },
2387c478bd9Sstevel@tonic-gate { "cpu_ticks_user", KSTAT_DATA_UINT64 },
2397c478bd9Sstevel@tonic-gate { "cpu_ticks_kernel", KSTAT_DATA_UINT64 },
2407c478bd9Sstevel@tonic-gate { "cpu_ticks_wait", KSTAT_DATA_UINT64 },
2417c478bd9Sstevel@tonic-gate { "cpu_nsec_idle", KSTAT_DATA_UINT64 },
2427c478bd9Sstevel@tonic-gate { "cpu_nsec_user", KSTAT_DATA_UINT64 },
2437c478bd9Sstevel@tonic-gate { "cpu_nsec_kernel", KSTAT_DATA_UINT64 },
2441f9f06cfSMatthew Ahrens { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 },
2453aedfe0bSmishra { "cpu_nsec_intr", KSTAT_DATA_UINT64 },
2463aedfe0bSmishra { "cpu_load_intr", KSTAT_DATA_UINT64 },
2477c478bd9Sstevel@tonic-gate { "wait_ticks_io", KSTAT_DATA_UINT64 },
2481f9f06cfSMatthew Ahrens { "dtrace_probes", KSTAT_DATA_UINT64 },
2497c478bd9Sstevel@tonic-gate { "bread", KSTAT_DATA_UINT64 },
2507c478bd9Sstevel@tonic-gate { "bwrite", KSTAT_DATA_UINT64 },
2517c478bd9Sstevel@tonic-gate { "lread", KSTAT_DATA_UINT64 },
2527c478bd9Sstevel@tonic-gate { "lwrite", KSTAT_DATA_UINT64 },
2537c478bd9Sstevel@tonic-gate { "phread", KSTAT_DATA_UINT64 },
2547c478bd9Sstevel@tonic-gate { "phwrite", KSTAT_DATA_UINT64 },
2557c478bd9Sstevel@tonic-gate { "pswitch", KSTAT_DATA_UINT64 },
2567c478bd9Sstevel@tonic-gate { "trap", KSTAT_DATA_UINT64 },
2577c478bd9Sstevel@tonic-gate { "intr", KSTAT_DATA_UINT64 },
2587c478bd9Sstevel@tonic-gate { "syscall", KSTAT_DATA_UINT64 },
2597c478bd9Sstevel@tonic-gate { "sysread", KSTAT_DATA_UINT64 },
2607c478bd9Sstevel@tonic-gate { "syswrite", KSTAT_DATA_UINT64 },
2617c478bd9Sstevel@tonic-gate { "sysfork", KSTAT_DATA_UINT64 },
2627c478bd9Sstevel@tonic-gate { "sysvfork", KSTAT_DATA_UINT64 },
2637c478bd9Sstevel@tonic-gate { "sysexec", KSTAT_DATA_UINT64 },
2647c478bd9Sstevel@tonic-gate { "readch", KSTAT_DATA_UINT64 },
2657c478bd9Sstevel@tonic-gate { "writech", KSTAT_DATA_UINT64 },
2667c478bd9Sstevel@tonic-gate { "rcvint", KSTAT_DATA_UINT64 },
2677c478bd9Sstevel@tonic-gate { "xmtint", KSTAT_DATA_UINT64 },
2687c478bd9Sstevel@tonic-gate { "mdmint", KSTAT_DATA_UINT64 },
2697c478bd9Sstevel@tonic-gate { "rawch", KSTAT_DATA_UINT64 },
2707c478bd9Sstevel@tonic-gate { "canch", KSTAT_DATA_UINT64 },
2717c478bd9Sstevel@tonic-gate { "outch", KSTAT_DATA_UINT64 },
2727c478bd9Sstevel@tonic-gate { "msg", KSTAT_DATA_UINT64 },
2737c478bd9Sstevel@tonic-gate { "sema", KSTAT_DATA_UINT64 },
2747c478bd9Sstevel@tonic-gate { "namei", KSTAT_DATA_UINT64 },
2757c478bd9Sstevel@tonic-gate { "ufsiget", KSTAT_DATA_UINT64 },
2767c478bd9Sstevel@tonic-gate { "ufsdirblk", KSTAT_DATA_UINT64 },
2777c478bd9Sstevel@tonic-gate { "ufsipage", KSTAT_DATA_UINT64 },
2787c478bd9Sstevel@tonic-gate { "ufsinopage", KSTAT_DATA_UINT64 },
2797c478bd9Sstevel@tonic-gate { "procovf", KSTAT_DATA_UINT64 },
2807c478bd9Sstevel@tonic-gate { "intrthread", KSTAT_DATA_UINT64 },
2817c478bd9Sstevel@tonic-gate { "intrblk", KSTAT_DATA_UINT64 },
2827c478bd9Sstevel@tonic-gate { "intrunpin", KSTAT_DATA_UINT64 },
2837c478bd9Sstevel@tonic-gate { "idlethread", KSTAT_DATA_UINT64 },
2847c478bd9Sstevel@tonic-gate { "inv_swtch", KSTAT_DATA_UINT64 },
2857c478bd9Sstevel@tonic-gate { "nthreads", KSTAT_DATA_UINT64 },
2867c478bd9Sstevel@tonic-gate { "cpumigrate", KSTAT_DATA_UINT64 },
2877c478bd9Sstevel@tonic-gate { "xcalls", KSTAT_DATA_UINT64 },
2887c478bd9Sstevel@tonic-gate { "mutex_adenters", KSTAT_DATA_UINT64 },
2897c478bd9Sstevel@tonic-gate { "rw_rdfails", KSTAT_DATA_UINT64 },
2907c478bd9Sstevel@tonic-gate { "rw_wrfails", KSTAT_DATA_UINT64 },
2917c478bd9Sstevel@tonic-gate { "modload", KSTAT_DATA_UINT64 },
2927c478bd9Sstevel@tonic-gate { "modunload", KSTAT_DATA_UINT64 },
2937c478bd9Sstevel@tonic-gate { "bawrite", KSTAT_DATA_UINT64 },
2947c478bd9Sstevel@tonic-gate { "iowait", KSTAT_DATA_UINT64 },
2957c478bd9Sstevel@tonic-gate };
2967c478bd9Sstevel@tonic-gate
2977c478bd9Sstevel@tonic-gate static struct cpu_vm_stats_ks_data {
2987c478bd9Sstevel@tonic-gate kstat_named_t pgrec;
2997c478bd9Sstevel@tonic-gate kstat_named_t pgfrec;
3007c478bd9Sstevel@tonic-gate kstat_named_t pgin;
3017c478bd9Sstevel@tonic-gate kstat_named_t pgpgin;
3027c478bd9Sstevel@tonic-gate kstat_named_t pgout;
3037c478bd9Sstevel@tonic-gate kstat_named_t pgpgout;
3047c478bd9Sstevel@tonic-gate kstat_named_t swapin;
3057c478bd9Sstevel@tonic-gate kstat_named_t pgswapin;
3067c478bd9Sstevel@tonic-gate kstat_named_t swapout;
3077c478bd9Sstevel@tonic-gate kstat_named_t pgswapout;
3087c478bd9Sstevel@tonic-gate kstat_named_t zfod;
3097c478bd9Sstevel@tonic-gate kstat_named_t dfree;
3107c478bd9Sstevel@tonic-gate kstat_named_t scan;
3117c478bd9Sstevel@tonic-gate kstat_named_t rev;
3127c478bd9Sstevel@tonic-gate kstat_named_t hat_fault;
3137c478bd9Sstevel@tonic-gate kstat_named_t as_fault;
3147c478bd9Sstevel@tonic-gate kstat_named_t maj_fault;
3157c478bd9Sstevel@tonic-gate kstat_named_t cow_fault;
3167c478bd9Sstevel@tonic-gate kstat_named_t prot_fault;
3177c478bd9Sstevel@tonic-gate kstat_named_t softlock;
3187c478bd9Sstevel@tonic-gate kstat_named_t kernel_asflt;
3197c478bd9Sstevel@tonic-gate kstat_named_t pgrrun;
3207c478bd9Sstevel@tonic-gate kstat_named_t execpgin;
3217c478bd9Sstevel@tonic-gate kstat_named_t execpgout;
3227c478bd9Sstevel@tonic-gate kstat_named_t execfree;
3237c478bd9Sstevel@tonic-gate kstat_named_t anonpgin;
3247c478bd9Sstevel@tonic-gate kstat_named_t anonpgout;
3257c478bd9Sstevel@tonic-gate kstat_named_t anonfree;
3267c478bd9Sstevel@tonic-gate kstat_named_t fspgin;
3277c478bd9Sstevel@tonic-gate kstat_named_t fspgout;
3287c478bd9Sstevel@tonic-gate kstat_named_t fsfree;
3297c478bd9Sstevel@tonic-gate } cpu_vm_stats_ks_data_template = {
3307c478bd9Sstevel@tonic-gate { "pgrec", KSTAT_DATA_UINT64 },
3317c478bd9Sstevel@tonic-gate { "pgfrec", KSTAT_DATA_UINT64 },
3327c478bd9Sstevel@tonic-gate { "pgin", KSTAT_DATA_UINT64 },
3337c478bd9Sstevel@tonic-gate { "pgpgin", KSTAT_DATA_UINT64 },
3347c478bd9Sstevel@tonic-gate { "pgout", KSTAT_DATA_UINT64 },
3357c478bd9Sstevel@tonic-gate { "pgpgout", KSTAT_DATA_UINT64 },
3367c478bd9Sstevel@tonic-gate { "swapin", KSTAT_DATA_UINT64 },
3377c478bd9Sstevel@tonic-gate { "pgswapin", KSTAT_DATA_UINT64 },
3387c478bd9Sstevel@tonic-gate { "swapout", KSTAT_DATA_UINT64 },
3397c478bd9Sstevel@tonic-gate { "pgswapout", KSTAT_DATA_UINT64 },
3407c478bd9Sstevel@tonic-gate { "zfod", KSTAT_DATA_UINT64 },
3417c478bd9Sstevel@tonic-gate { "dfree", KSTAT_DATA_UINT64 },
3427c478bd9Sstevel@tonic-gate { "scan", KSTAT_DATA_UINT64 },
3437c478bd9Sstevel@tonic-gate { "rev", KSTAT_DATA_UINT64 },
3447c478bd9Sstevel@tonic-gate { "hat_fault", KSTAT_DATA_UINT64 },
3457c478bd9Sstevel@tonic-gate { "as_fault", KSTAT_DATA_UINT64 },
3467c478bd9Sstevel@tonic-gate { "maj_fault", KSTAT_DATA_UINT64 },
3477c478bd9Sstevel@tonic-gate { "cow_fault", KSTAT_DATA_UINT64 },
3487c478bd9Sstevel@tonic-gate { "prot_fault", KSTAT_DATA_UINT64 },
3497c478bd9Sstevel@tonic-gate { "softlock", KSTAT_DATA_UINT64 },
3507c478bd9Sstevel@tonic-gate { "kernel_asflt", KSTAT_DATA_UINT64 },
3517c478bd9Sstevel@tonic-gate { "pgrrun", KSTAT_DATA_UINT64 },
3527c478bd9Sstevel@tonic-gate { "execpgin", KSTAT_DATA_UINT64 },
3537c478bd9Sstevel@tonic-gate { "execpgout", KSTAT_DATA_UINT64 },
3547c478bd9Sstevel@tonic-gate { "execfree", KSTAT_DATA_UINT64 },
3557c478bd9Sstevel@tonic-gate { "anonpgin", KSTAT_DATA_UINT64 },
3567c478bd9Sstevel@tonic-gate { "anonpgout", KSTAT_DATA_UINT64 },
3577c478bd9Sstevel@tonic-gate { "anonfree", KSTAT_DATA_UINT64 },
3587c478bd9Sstevel@tonic-gate { "fspgin", KSTAT_DATA_UINT64 },
3597c478bd9Sstevel@tonic-gate { "fspgout", KSTAT_DATA_UINT64 },
3607c478bd9Sstevel@tonic-gate { "fsfree", KSTAT_DATA_UINT64 },
3617c478bd9Sstevel@tonic-gate };
3627c478bd9Sstevel@tonic-gate
3637c478bd9Sstevel@tonic-gate /*
3647c478bd9Sstevel@tonic-gate * Force the specified thread to migrate to the appropriate processor.
3657c478bd9Sstevel@tonic-gate * Called with thread lock held, returns with it dropped.
3667c478bd9Sstevel@tonic-gate */
3677c478bd9Sstevel@tonic-gate static void
force_thread_migrate(kthread_id_t tp)3687c478bd9Sstevel@tonic-gate force_thread_migrate(kthread_id_t tp)
3697c478bd9Sstevel@tonic-gate {
3707c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
3717c478bd9Sstevel@tonic-gate if (tp == curthread) {
3727c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp);
3737c478bd9Sstevel@tonic-gate CL_SETRUN(tp);
3747c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp);
3757c478bd9Sstevel@tonic-gate swtch();
3767c478bd9Sstevel@tonic-gate } else {
3777c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC) {
3787c478bd9Sstevel@tonic-gate cpu_surrender(tp);
3797c478bd9Sstevel@tonic-gate } else if (tp->t_state == TS_RUN) {
3807c478bd9Sstevel@tonic-gate (void) dispdeq(tp);
3817c478bd9Sstevel@tonic-gate setbackdq(tp);
3827c478bd9Sstevel@tonic-gate }
3837c478bd9Sstevel@tonic-gate thread_unlock(tp);
3847c478bd9Sstevel@tonic-gate }
3857c478bd9Sstevel@tonic-gate }
3867c478bd9Sstevel@tonic-gate
3877c478bd9Sstevel@tonic-gate /*
3887c478bd9Sstevel@tonic-gate * Set affinity for a specified CPU.
3897c478bd9Sstevel@tonic-gate * A reference count is incremented and the affinity is held until the
3907c478bd9Sstevel@tonic-gate * reference count is decremented to zero by thread_affinity_clear().
3917c478bd9Sstevel@tonic-gate * This is so regions of code requiring affinity can be nested.
3927c478bd9Sstevel@tonic-gate * Caller needs to ensure that cpu_id remains valid, which can be
3937c478bd9Sstevel@tonic-gate * done by holding cpu_lock across this call, unless the caller
3947c478bd9Sstevel@tonic-gate * specifies CPU_CURRENT in which case the cpu_lock will be acquired
3957c478bd9Sstevel@tonic-gate * by thread_affinity_set and CPU->cpu_id will be the target CPU.
3967c478bd9Sstevel@tonic-gate */
3977c478bd9Sstevel@tonic-gate void
thread_affinity_set(kthread_id_t t,int cpu_id)3987c478bd9Sstevel@tonic-gate thread_affinity_set(kthread_id_t t, int cpu_id)
3997c478bd9Sstevel@tonic-gate {
4007c478bd9Sstevel@tonic-gate cpu_t *cp;
4017c478bd9Sstevel@tonic-gate int c;
4027c478bd9Sstevel@tonic-gate
4037c478bd9Sstevel@tonic-gate ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
4047c478bd9Sstevel@tonic-gate
4057c478bd9Sstevel@tonic-gate if ((c = cpu_id) == CPU_CURRENT) {
4067c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
4077c478bd9Sstevel@tonic-gate cpu_id = CPU->cpu_id;
4087c478bd9Sstevel@tonic-gate }
4097c478bd9Sstevel@tonic-gate /*
4107c478bd9Sstevel@tonic-gate * We should be asserting that cpu_lock is held here, but
4117c478bd9Sstevel@tonic-gate * the NCA code doesn't acquire it. The following assert
4127c478bd9Sstevel@tonic-gate * should be uncommented when the NCA code is fixed.
4137c478bd9Sstevel@tonic-gate *
4147c478bd9Sstevel@tonic-gate * ASSERT(MUTEX_HELD(&cpu_lock));
4157c478bd9Sstevel@tonic-gate */
4167c478bd9Sstevel@tonic-gate ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
4177c478bd9Sstevel@tonic-gate cp = cpu[cpu_id];
4187c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); /* user must provide a good cpu_id */
4197c478bd9Sstevel@tonic-gate /*
4207c478bd9Sstevel@tonic-gate * If there is already a hard affinity requested, and this affinity
4217c478bd9Sstevel@tonic-gate * conflicts with that, panic.
4227c478bd9Sstevel@tonic-gate */
4237c478bd9Sstevel@tonic-gate thread_lock(t);
4247c478bd9Sstevel@tonic-gate if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
4257c478bd9Sstevel@tonic-gate panic("affinity_set: setting %p but already bound to %p",
4267c478bd9Sstevel@tonic-gate (void *)cp, (void *)t->t_bound_cpu);
4277c478bd9Sstevel@tonic-gate }
4287c478bd9Sstevel@tonic-gate t->t_affinitycnt++;
4297c478bd9Sstevel@tonic-gate t->t_bound_cpu = cp;
4307c478bd9Sstevel@tonic-gate
4317c478bd9Sstevel@tonic-gate /*
4327c478bd9Sstevel@tonic-gate * Make sure we're running on the right CPU.
4337c478bd9Sstevel@tonic-gate */
4347c478bd9Sstevel@tonic-gate if (cp != t->t_cpu || t != curthread) {
4357c478bd9Sstevel@tonic-gate force_thread_migrate(t); /* drops thread lock */
4367c478bd9Sstevel@tonic-gate } else {
4377c478bd9Sstevel@tonic-gate thread_unlock(t);
4387c478bd9Sstevel@tonic-gate }
4397c478bd9Sstevel@tonic-gate
4407c478bd9Sstevel@tonic-gate if (c == CPU_CURRENT)
4417c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
4427c478bd9Sstevel@tonic-gate }
4437c478bd9Sstevel@tonic-gate
4447c478bd9Sstevel@tonic-gate /*
4457c478bd9Sstevel@tonic-gate * Wrapper for backward compatibility.
4467c478bd9Sstevel@tonic-gate */
4477c478bd9Sstevel@tonic-gate void
affinity_set(int cpu_id)4487c478bd9Sstevel@tonic-gate affinity_set(int cpu_id)
4497c478bd9Sstevel@tonic-gate {
4507c478bd9Sstevel@tonic-gate thread_affinity_set(curthread, cpu_id);
4517c478bd9Sstevel@tonic-gate }
4527c478bd9Sstevel@tonic-gate
4537c478bd9Sstevel@tonic-gate /*
4547c478bd9Sstevel@tonic-gate * Decrement the affinity reservation count and if it becomes zero,
4557c478bd9Sstevel@tonic-gate * clear the CPU affinity for the current thread, or set it to the user's
4567c478bd9Sstevel@tonic-gate * software binding request.
4577c478bd9Sstevel@tonic-gate */
4587c478bd9Sstevel@tonic-gate void
thread_affinity_clear(kthread_id_t t)4597c478bd9Sstevel@tonic-gate thread_affinity_clear(kthread_id_t t)
4607c478bd9Sstevel@tonic-gate {
4617c478bd9Sstevel@tonic-gate register processorid_t binding;
4627c478bd9Sstevel@tonic-gate
4637c478bd9Sstevel@tonic-gate thread_lock(t);
4647c478bd9Sstevel@tonic-gate if (--t->t_affinitycnt == 0) {
4657c478bd9Sstevel@tonic-gate if ((binding = t->t_bind_cpu) == PBIND_NONE) {
4667c478bd9Sstevel@tonic-gate /*
4677c478bd9Sstevel@tonic-gate * Adjust disp_max_unbound_pri if necessary.
4687c478bd9Sstevel@tonic-gate */
4697c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(t);
4707c478bd9Sstevel@tonic-gate t->t_bound_cpu = NULL;
4717c478bd9Sstevel@tonic-gate if (t->t_cpu->cpu_part != t->t_cpupart) {
4727c478bd9Sstevel@tonic-gate force_thread_migrate(t);
4737c478bd9Sstevel@tonic-gate return;
4747c478bd9Sstevel@tonic-gate }
4757c478bd9Sstevel@tonic-gate } else {
4767c478bd9Sstevel@tonic-gate t->t_bound_cpu = cpu[binding];
4777c478bd9Sstevel@tonic-gate /*
4787c478bd9Sstevel@tonic-gate * Make sure the thread is running on the bound CPU.
4797c478bd9Sstevel@tonic-gate */
4807c478bd9Sstevel@tonic-gate if (t->t_cpu != t->t_bound_cpu) {
4817c478bd9Sstevel@tonic-gate force_thread_migrate(t);
4827c478bd9Sstevel@tonic-gate return; /* already dropped lock */
4837c478bd9Sstevel@tonic-gate }
4847c478bd9Sstevel@tonic-gate }
4857c478bd9Sstevel@tonic-gate }
4867c478bd9Sstevel@tonic-gate thread_unlock(t);
4877c478bd9Sstevel@tonic-gate }
4887c478bd9Sstevel@tonic-gate
4897c478bd9Sstevel@tonic-gate /*
4907c478bd9Sstevel@tonic-gate * Wrapper for backward compatibility.
4917c478bd9Sstevel@tonic-gate */
4927c478bd9Sstevel@tonic-gate void
affinity_clear(void)4937c478bd9Sstevel@tonic-gate affinity_clear(void)
4947c478bd9Sstevel@tonic-gate {
4957c478bd9Sstevel@tonic-gate thread_affinity_clear(curthread);
4967c478bd9Sstevel@tonic-gate }
4977c478bd9Sstevel@tonic-gate
4987c478bd9Sstevel@tonic-gate /*
4997c478bd9Sstevel@tonic-gate * Weak cpu affinity. Bind to the "current" cpu for short periods
5007c478bd9Sstevel@tonic-gate * of time during which the thread must not block (but may be preempted).
5017c478bd9Sstevel@tonic-gate * Use this instead of kpreempt_disable() when it is only "no migration"
5027c478bd9Sstevel@tonic-gate * rather than "no preemption" semantics that are required - disabling
5037c478bd9Sstevel@tonic-gate * preemption holds higher priority threads off of cpu and if the
5047c478bd9Sstevel@tonic-gate * operation that is protected is more than momentary this is not good
5057c478bd9Sstevel@tonic-gate * for realtime etc.
5067c478bd9Sstevel@tonic-gate *
5077c478bd9Sstevel@tonic-gate * Weakly bound threads will not prevent a cpu from being offlined -
5087c478bd9Sstevel@tonic-gate * we'll only run them on the cpu to which they are weakly bound but
5097c478bd9Sstevel@tonic-gate * (because they do not block) we'll always be able to move them on to
5107c478bd9Sstevel@tonic-gate * another cpu at offline time if we give them just a short moment to
5117c478bd9Sstevel@tonic-gate * run during which they will unbind. To give a cpu a chance of offlining,
5127c478bd9Sstevel@tonic-gate * however, we require a barrier to weak bindings that may be raised for a
5137c478bd9Sstevel@tonic-gate * given cpu (offline/move code may set this and then wait a short time for
5147c478bd9Sstevel@tonic-gate * existing weak bindings to drop); the cpu_inmotion pointer is that barrier.
5157c478bd9Sstevel@tonic-gate *
5167c478bd9Sstevel@tonic-gate * There are few restrictions on the calling context of thread_nomigrate.
5177c478bd9Sstevel@tonic-gate * The caller must not hold the thread lock. Calls may be nested.
5187c478bd9Sstevel@tonic-gate *
5197c478bd9Sstevel@tonic-gate * After weakbinding a thread must not perform actions that may block.
5207c478bd9Sstevel@tonic-gate * In particular it must not call thread_affinity_set; calling that when
5217c478bd9Sstevel@tonic-gate * already weakbound is nonsensical anyway.
5227c478bd9Sstevel@tonic-gate *
5237c478bd9Sstevel@tonic-gate * If curthread is prevented from migrating for other reasons
5247c478bd9Sstevel@tonic-gate * (kernel preemption disabled; high pil; strongly bound; interrupt thread)
5257c478bd9Sstevel@tonic-gate * then the weak binding will succeed even if this cpu is the target of an
5267c478bd9Sstevel@tonic-gate * offline/move request.
5277c478bd9Sstevel@tonic-gate */
5287c478bd9Sstevel@tonic-gate void
thread_nomigrate(void)5297c478bd9Sstevel@tonic-gate thread_nomigrate(void)
5307c478bd9Sstevel@tonic-gate {
5317c478bd9Sstevel@tonic-gate cpu_t *cp;
5327c478bd9Sstevel@tonic-gate kthread_id_t t = curthread;
5337c478bd9Sstevel@tonic-gate
5347c478bd9Sstevel@tonic-gate again:
5357c478bd9Sstevel@tonic-gate kpreempt_disable();
5367c478bd9Sstevel@tonic-gate cp = CPU;
5377c478bd9Sstevel@tonic-gate
5387c478bd9Sstevel@tonic-gate /*
5397c478bd9Sstevel@tonic-gate * A highlevel interrupt must not modify t_nomigrate or
5407c478bd9Sstevel@tonic-gate * t_weakbound_cpu of the thread it has interrupted. A lowlevel
5417c478bd9Sstevel@tonic-gate * interrupt thread cannot migrate and we can avoid the
5427c478bd9Sstevel@tonic-gate * thread_lock call below by short-circuiting here. In either
5437c478bd9Sstevel@tonic-gate * case we can just return since no migration is possible and
5447c478bd9Sstevel@tonic-gate * the condition will persist (ie, when we test for these again
5457c478bd9Sstevel@tonic-gate * in thread_allowmigrate they can't have changed). Migration
5467c478bd9Sstevel@tonic-gate * is also impossible if we're at or above DISP_LEVEL pil.
5477c478bd9Sstevel@tonic-gate */
5487c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
5497c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL) {
5507c478bd9Sstevel@tonic-gate kpreempt_enable();
5517c478bd9Sstevel@tonic-gate return;
5527c478bd9Sstevel@tonic-gate }
5537c478bd9Sstevel@tonic-gate
5547c478bd9Sstevel@tonic-gate /*
5557c478bd9Sstevel@tonic-gate * We must be consistent with existing weak bindings. Since we
5567c478bd9Sstevel@tonic-gate * may be interrupted between the increment of t_nomigrate and
5577c478bd9Sstevel@tonic-gate * the store to t_weakbound_cpu below we cannot assume that
5587c478bd9Sstevel@tonic-gate * t_weakbound_cpu will be set if t_nomigrate is. Note that we
5597c478bd9Sstevel@tonic-gate * cannot assert t_weakbound_cpu == t_bind_cpu since that is not
5607c478bd9Sstevel@tonic-gate * always the case.
5617c478bd9Sstevel@tonic-gate */
5627c478bd9Sstevel@tonic-gate if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
5637c478bd9Sstevel@tonic-gate if (!panicstr)
5647c478bd9Sstevel@tonic-gate panic("thread_nomigrate: binding to %p but already "
5657c478bd9Sstevel@tonic-gate "bound to %p", (void *)cp,
5667c478bd9Sstevel@tonic-gate (void *)t->t_weakbound_cpu);
5677c478bd9Sstevel@tonic-gate }
5687c478bd9Sstevel@tonic-gate
5697c478bd9Sstevel@tonic-gate /*
5707c478bd9Sstevel@tonic-gate * At this point we have preemption disabled and we don't yet hold
5717c478bd9Sstevel@tonic-gate * the thread lock. So it's possible that somebody else could
5727c478bd9Sstevel@tonic-gate * set t_bind_cpu here and not be able to force us across to the
5737c478bd9Sstevel@tonic-gate * new cpu (since we have preemption disabled).
5747c478bd9Sstevel@tonic-gate */
5757c478bd9Sstevel@tonic-gate thread_lock(curthread);
5767c478bd9Sstevel@tonic-gate
5777c478bd9Sstevel@tonic-gate /*
5787c478bd9Sstevel@tonic-gate * If further weak bindings are being (temporarily) suppressed then
5797c478bd9Sstevel@tonic-gate * we'll settle for disabling kernel preemption (which assures
5807c478bd9Sstevel@tonic-gate * no migration provided the thread does not block which it is
5817c478bd9Sstevel@tonic-gate * not allowed to if using thread_nomigrate). We must remember
5827c478bd9Sstevel@tonic-gate * this disposition so we can take appropriate action in
5837c478bd9Sstevel@tonic-gate * thread_allowmigrate. If this is a nested call and the
5847c478bd9Sstevel@tonic-gate * thread is already weakbound then fall through as normal.
5857c478bd9Sstevel@tonic-gate * We remember the decision to settle for kpreempt_disable through
5867c478bd9Sstevel@tonic-gate * negative nesting counting in t_nomigrate. Once a thread has had one
5877c478bd9Sstevel@tonic-gate * weakbinding request satisfied in this way any further (nested)
5887c478bd9Sstevel@tonic-gate * requests will continue to be satisfied in the same way,
5897c478bd9Sstevel@tonic-gate * even if weak bindings have recommenced.
5907c478bd9Sstevel@tonic-gate */
5917c478bd9Sstevel@tonic-gate if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
5927c478bd9Sstevel@tonic-gate --t->t_nomigrate;
5937c478bd9Sstevel@tonic-gate thread_unlock(curthread);
5947c478bd9Sstevel@tonic-gate return; /* with kpreempt_disable still active */
5957c478bd9Sstevel@tonic-gate }
5967c478bd9Sstevel@tonic-gate
5977c478bd9Sstevel@tonic-gate /*
5987c478bd9Sstevel@tonic-gate * We hold thread_lock so t_bind_cpu cannot change. We could,
5997c478bd9Sstevel@tonic-gate * however, be running on a different cpu to which we are t_bound_cpu
6007c478bd9Sstevel@tonic-gate * to (as explained above). If we grant the weak binding request
6017c478bd9Sstevel@tonic-gate * in that case then the dispatcher must favour our weak binding
6027c478bd9Sstevel@tonic-gate * over our strong (in which case, just as when preemption is
6037c478bd9Sstevel@tonic-gate * disabled, we can continue to run on a cpu other than the one to
6047c478bd9Sstevel@tonic-gate * which we are strongbound; the difference in this case is that
6057c478bd9Sstevel@tonic-gate * this thread can be preempted and so can appear on the dispatch
6067c478bd9Sstevel@tonic-gate * queues of a cpu other than the one it is strongbound to).
6077c478bd9Sstevel@tonic-gate *
6087c478bd9Sstevel@tonic-gate * If the cpu we are running on does not appear to be a current
6097c478bd9Sstevel@tonic-gate * offline target (we check cpu_inmotion to determine this - since
6107c478bd9Sstevel@tonic-gate * we don't hold cpu_lock we may not see a recent store to that,
6117c478bd9Sstevel@tonic-gate * so it's possible that we at times can grant a weak binding to a
6127c478bd9Sstevel@tonic-gate * cpu that is an offline target, but that one request will not
6137c478bd9Sstevel@tonic-gate * prevent the offline from succeeding) then we will always grant
6147c478bd9Sstevel@tonic-gate * the weak binding request. This includes the case above where
6157c478bd9Sstevel@tonic-gate * we grant a weakbinding not commensurate with our strong binding.
6167c478bd9Sstevel@tonic-gate *
6177c478bd9Sstevel@tonic-gate * If our cpu does appear to be an offline target then we're inclined
6187c478bd9Sstevel@tonic-gate * not to grant the weakbinding request just yet - we'd prefer to
6197c478bd9Sstevel@tonic-gate * migrate to another cpu and grant the request there. The
6207c478bd9Sstevel@tonic-gate * exceptions are those cases where going through preemption code
6217c478bd9Sstevel@tonic-gate * will not result in us changing cpu:
6227c478bd9Sstevel@tonic-gate *
6237c478bd9Sstevel@tonic-gate * . interrupts have already bypassed this case (see above)
6247c478bd9Sstevel@tonic-gate * . we are already weakbound to this cpu (dispatcher code will
6257c478bd9Sstevel@tonic-gate * always return us to the weakbound cpu)
6267c478bd9Sstevel@tonic-gate * . preemption was disabled even before we disabled it above
6277c478bd9Sstevel@tonic-gate * . we are strongbound to this cpu (if we're strongbound to
6287c478bd9Sstevel@tonic-gate * another and not yet running there the trip through the
6297c478bd9Sstevel@tonic-gate * dispatcher will move us to the strongbound cpu and we
6307c478bd9Sstevel@tonic-gate * will grant the weak binding there)
6317c478bd9Sstevel@tonic-gate */
6327c478bd9Sstevel@tonic-gate if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
6337c478bd9Sstevel@tonic-gate t->t_bound_cpu == cp) {
6347c478bd9Sstevel@tonic-gate /*
6357c478bd9Sstevel@tonic-gate * Don't be tempted to store to t_weakbound_cpu only on
6367c478bd9Sstevel@tonic-gate * the first nested bind request - if we're interrupted
6377c478bd9Sstevel@tonic-gate * after the increment of t_nomigrate and before the
6387c478bd9Sstevel@tonic-gate * store to t_weakbound_cpu and the interrupt calls
6397c478bd9Sstevel@tonic-gate * thread_nomigrate then the assertion in thread_allowmigrate
6407c478bd9Sstevel@tonic-gate * would fail.
6417c478bd9Sstevel@tonic-gate */
6427c478bd9Sstevel@tonic-gate t->t_nomigrate++;
6437c478bd9Sstevel@tonic-gate t->t_weakbound_cpu = cp;
6447c478bd9Sstevel@tonic-gate membar_producer();
6457c478bd9Sstevel@tonic-gate thread_unlock(curthread);
6467c478bd9Sstevel@tonic-gate /*
6477c478bd9Sstevel@tonic-gate * Now that we have dropped the thread_lock another thread
6487c478bd9Sstevel@tonic-gate * can set our t_weakbound_cpu, and will try to migrate us
6497c478bd9Sstevel@tonic-gate * to the strongbound cpu (which will not be prevented by
6507c478bd9Sstevel@tonic-gate * preemption being disabled since we're about to enable
6517c478bd9Sstevel@tonic-gate * preemption). We have granted the weakbinding to the current
6527c478bd9Sstevel@tonic-gate * cpu, so again we are in the position that is is is possible
6537c478bd9Sstevel@tonic-gate * that our weak and strong bindings differ. Again this
6547c478bd9Sstevel@tonic-gate * is catered for by dispatcher code which will favour our
6557c478bd9Sstevel@tonic-gate * weak binding.
6567c478bd9Sstevel@tonic-gate */
6577c478bd9Sstevel@tonic-gate kpreempt_enable();
6587c478bd9Sstevel@tonic-gate } else {
6597c478bd9Sstevel@tonic-gate /*
6607c478bd9Sstevel@tonic-gate * Move to another cpu before granting the request by
6617c478bd9Sstevel@tonic-gate * forcing this thread through preemption code. When we
6627c478bd9Sstevel@tonic-gate * get to set{front,back}dq called from CL_PREEMPT()
6637c478bd9Sstevel@tonic-gate * cpu_choose() will be used to select a cpu to queue
6647c478bd9Sstevel@tonic-gate * us on - that will see cpu_inmotion and take
6657c478bd9Sstevel@tonic-gate * steps to avoid returning us to this cpu.
6667c478bd9Sstevel@tonic-gate */
6677c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1;
6687c478bd9Sstevel@tonic-gate thread_unlock(curthread);
6697c478bd9Sstevel@tonic-gate kpreempt_enable(); /* will call preempt() */
6707c478bd9Sstevel@tonic-gate goto again;
6717c478bd9Sstevel@tonic-gate }
6727c478bd9Sstevel@tonic-gate }
6737c478bd9Sstevel@tonic-gate
6747c478bd9Sstevel@tonic-gate void
thread_allowmigrate(void)6757c478bd9Sstevel@tonic-gate thread_allowmigrate(void)
6767c478bd9Sstevel@tonic-gate {
6777c478bd9Sstevel@tonic-gate kthread_id_t t = curthread;
6787c478bd9Sstevel@tonic-gate
6797c478bd9Sstevel@tonic-gate ASSERT(t->t_weakbound_cpu == CPU ||
6807c478bd9Sstevel@tonic-gate (t->t_nomigrate < 0 && t->t_preempt > 0) ||
6817c478bd9Sstevel@tonic-gate CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
6827c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL);
6837c478bd9Sstevel@tonic-gate
6847c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
6857c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL)
6867c478bd9Sstevel@tonic-gate return;
6877c478bd9Sstevel@tonic-gate
6887c478bd9Sstevel@tonic-gate if (t->t_nomigrate < 0) {
6897c478bd9Sstevel@tonic-gate /*
6907c478bd9Sstevel@tonic-gate * This thread was granted "weak binding" in the
6917c478bd9Sstevel@tonic-gate * stronger form of kernel preemption disabling.
6927c478bd9Sstevel@tonic-gate * Undo a level of nesting for both t_nomigrate
6937c478bd9Sstevel@tonic-gate * and t_preempt.
6947c478bd9Sstevel@tonic-gate */
6957c478bd9Sstevel@tonic-gate ++t->t_nomigrate;
6967c478bd9Sstevel@tonic-gate kpreempt_enable();
6977c478bd9Sstevel@tonic-gate } else if (--t->t_nomigrate == 0) {
6987c478bd9Sstevel@tonic-gate /*
6997c478bd9Sstevel@tonic-gate * Time to drop the weak binding. We need to cater
7007c478bd9Sstevel@tonic-gate * for the case where we're weakbound to a different
7017c478bd9Sstevel@tonic-gate * cpu than that to which we're strongbound (a very
7027c478bd9Sstevel@tonic-gate * temporary arrangement that must only persist until
7037c478bd9Sstevel@tonic-gate * weak binding drops). We don't acquire thread_lock
7047c478bd9Sstevel@tonic-gate * here so even as this code executes t_bound_cpu
7057c478bd9Sstevel@tonic-gate * may be changing. So we disable preemption and
7067c478bd9Sstevel@tonic-gate * a) in the case that t_bound_cpu changes while we
7077c478bd9Sstevel@tonic-gate * have preemption disabled kprunrun will be set
7087c478bd9Sstevel@tonic-gate * asynchronously, and b) if before disabling
7097c478bd9Sstevel@tonic-gate * preemption we were already on a different cpu to
7107c478bd9Sstevel@tonic-gate * our t_bound_cpu then we set kprunrun ourselves
7117c478bd9Sstevel@tonic-gate * to force a trip through the dispatcher when
7127c478bd9Sstevel@tonic-gate * preemption is enabled.
7137c478bd9Sstevel@tonic-gate */
7147c478bd9Sstevel@tonic-gate kpreempt_disable();
7157c478bd9Sstevel@tonic-gate if (t->t_bound_cpu &&
7167c478bd9Sstevel@tonic-gate t->t_weakbound_cpu != t->t_bound_cpu)
7177c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 1;
7187c478bd9Sstevel@tonic-gate t->t_weakbound_cpu = NULL;
7197c478bd9Sstevel@tonic-gate membar_producer();
7207c478bd9Sstevel@tonic-gate kpreempt_enable();
7217c478bd9Sstevel@tonic-gate }
7227c478bd9Sstevel@tonic-gate }
7237c478bd9Sstevel@tonic-gate
7247c478bd9Sstevel@tonic-gate /*
7257c478bd9Sstevel@tonic-gate * weakbinding_stop can be used to temporarily cause weakbindings made
7267c478bd9Sstevel@tonic-gate * with thread_nomigrate to be satisfied through the stronger action of
7277c478bd9Sstevel@tonic-gate * kpreempt_disable. weakbinding_start recommences normal weakbinding.
7287c478bd9Sstevel@tonic-gate */
7297c478bd9Sstevel@tonic-gate
7307c478bd9Sstevel@tonic-gate void
weakbinding_stop(void)7317c478bd9Sstevel@tonic-gate weakbinding_stop(void)
7327c478bd9Sstevel@tonic-gate {
7337c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
7347c478bd9Sstevel@tonic-gate weakbindingbarrier = 1;
7357c478bd9Sstevel@tonic-gate membar_producer(); /* make visible before subsequent thread_lock */
7367c478bd9Sstevel@tonic-gate }
7377c478bd9Sstevel@tonic-gate
7387c478bd9Sstevel@tonic-gate void
weakbinding_start(void)7397c478bd9Sstevel@tonic-gate weakbinding_start(void)
7407c478bd9Sstevel@tonic-gate {
7417c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
7427c478bd9Sstevel@tonic-gate weakbindingbarrier = 0;
7437c478bd9Sstevel@tonic-gate }
7447c478bd9Sstevel@tonic-gate
745575a7426Spt157919 void
null_xcall(void)746575a7426Spt157919 null_xcall(void)
747575a7426Spt157919 {
748575a7426Spt157919 }
749575a7426Spt157919
7507c478bd9Sstevel@tonic-gate /*
7517c478bd9Sstevel@tonic-gate * This routine is called to place the CPUs in a safe place so that
7527c478bd9Sstevel@tonic-gate * one of them can be taken off line or placed on line. What we are
7537c478bd9Sstevel@tonic-gate * trying to do here is prevent a thread from traversing the list
7547c478bd9Sstevel@tonic-gate * of active CPUs while we are changing it or from getting placed on
7557c478bd9Sstevel@tonic-gate * the run queue of a CPU that has just gone off line. We do this by
7567c478bd9Sstevel@tonic-gate * creating a thread with the highest possible prio for each CPU and
7577c478bd9Sstevel@tonic-gate * having it call this routine. The advantage of this method is that
7587c478bd9Sstevel@tonic-gate * we can eliminate all checks for CPU_ACTIVE in the disp routines.
7597c478bd9Sstevel@tonic-gate * This makes disp faster at the expense of making p_online() slower
7607c478bd9Sstevel@tonic-gate * which is a good trade off.
7617c478bd9Sstevel@tonic-gate */
7627c478bd9Sstevel@tonic-gate static void
cpu_pause(int index)7632df1fe9cSrandyf cpu_pause(int index)
7647c478bd9Sstevel@tonic-gate {
7657c478bd9Sstevel@tonic-gate int s;
7667c478bd9Sstevel@tonic-gate struct _cpu_pause_info *cpi = &cpu_pause_info;
7672df1fe9cSrandyf volatile char *safe = &safe_list[index];
7682df1fe9cSrandyf long lindex = index;
7697c478bd9Sstevel@tonic-gate
7707c478bd9Sstevel@tonic-gate ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
7717c478bd9Sstevel@tonic-gate
7727c478bd9Sstevel@tonic-gate while (*safe != PAUSE_DIE) {
7737c478bd9Sstevel@tonic-gate *safe = PAUSE_READY;
7747c478bd9Sstevel@tonic-gate membar_enter(); /* make sure stores are flushed */
7757c478bd9Sstevel@tonic-gate sema_v(&cpi->cp_sem); /* signal requesting thread */
7767c478bd9Sstevel@tonic-gate
7777c478bd9Sstevel@tonic-gate /*
7787c478bd9Sstevel@tonic-gate * Wait here until all pause threads are running. That
7797c478bd9Sstevel@tonic-gate * indicates that it's safe to do the spl. Until
7807c478bd9Sstevel@tonic-gate * cpu_pause_info.cp_go is set, we don't want to spl
7817c478bd9Sstevel@tonic-gate * because that might block clock interrupts needed
7827c478bd9Sstevel@tonic-gate * to preempt threads on other CPUs.
7837c478bd9Sstevel@tonic-gate */
7847c478bd9Sstevel@tonic-gate while (cpi->cp_go == 0)
7857c478bd9Sstevel@tonic-gate ;
7867c478bd9Sstevel@tonic-gate /*
7877c478bd9Sstevel@tonic-gate * Even though we are at the highest disp prio, we need
7887c478bd9Sstevel@tonic-gate * to block out all interrupts below LOCK_LEVEL so that
7897c478bd9Sstevel@tonic-gate * an intr doesn't come in, wake up a thread, and call
7907c478bd9Sstevel@tonic-gate * setbackdq/setfrontdq.
7917c478bd9Sstevel@tonic-gate */
7927c478bd9Sstevel@tonic-gate s = splhigh();
7932df1fe9cSrandyf /*
794*bce835f2SJosef 'Jeff' Sipek * if cp_func has been set then call it using index as the
795*bce835f2SJosef 'Jeff' Sipek * argument, currently only used by cpr_suspend_cpus().
796*bce835f2SJosef 'Jeff' Sipek * This function is used as the code to execute on the
797*bce835f2SJosef 'Jeff' Sipek * "paused" cpu's when a machine comes out of a sleep state
798*bce835f2SJosef 'Jeff' Sipek * and CPU's were powered off. (could also be used for
799*bce835f2SJosef 'Jeff' Sipek * hotplugging CPU's).
8002df1fe9cSrandyf */
801*bce835f2SJosef 'Jeff' Sipek if (cpi->cp_func != NULL)
802*bce835f2SJosef 'Jeff' Sipek (*cpi->cp_func)((void *)lindex);
8037c478bd9Sstevel@tonic-gate
804ae115bc7Smrj mach_cpu_pause(safe);
8057c478bd9Sstevel@tonic-gate
8067c478bd9Sstevel@tonic-gate splx(s);
8077c478bd9Sstevel@tonic-gate /*
8087c478bd9Sstevel@tonic-gate * Waiting is at an end. Switch out of cpu_pause
8097c478bd9Sstevel@tonic-gate * loop and resume useful work.
8107c478bd9Sstevel@tonic-gate */
8117c478bd9Sstevel@tonic-gate swtch();
8127c478bd9Sstevel@tonic-gate }
8137c478bd9Sstevel@tonic-gate
8147c478bd9Sstevel@tonic-gate mutex_enter(&pause_free_mutex);
8157c478bd9Sstevel@tonic-gate *safe = PAUSE_DEAD;
8167c478bd9Sstevel@tonic-gate cv_broadcast(&pause_free_cv);
8177c478bd9Sstevel@tonic-gate mutex_exit(&pause_free_mutex);
8187c478bd9Sstevel@tonic-gate }
8197c478bd9Sstevel@tonic-gate
8207c478bd9Sstevel@tonic-gate /*
8217c478bd9Sstevel@tonic-gate * Allow the cpus to start running again.
8227c478bd9Sstevel@tonic-gate */
8237c478bd9Sstevel@tonic-gate void
start_cpus()8247c478bd9Sstevel@tonic-gate start_cpus()
8257c478bd9Sstevel@tonic-gate {
8267c478bd9Sstevel@tonic-gate int i;
8277c478bd9Sstevel@tonic-gate
8287c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
8297c478bd9Sstevel@tonic-gate ASSERT(cpu_pause_info.cp_paused);
8307c478bd9Sstevel@tonic-gate cpu_pause_info.cp_paused = NULL;
8317c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++)
8327c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_IDLE;
8337c478bd9Sstevel@tonic-gate membar_enter(); /* make sure stores are flushed */
8347c478bd9Sstevel@tonic-gate affinity_clear();
8357c478bd9Sstevel@tonic-gate splx(cpu_pause_info.cp_spl);
8367c478bd9Sstevel@tonic-gate kpreempt_enable();
8377c478bd9Sstevel@tonic-gate }
8387c478bd9Sstevel@tonic-gate
8397c478bd9Sstevel@tonic-gate /*
8407c478bd9Sstevel@tonic-gate * Allocate a pause thread for a CPU.
8417c478bd9Sstevel@tonic-gate */
8427c478bd9Sstevel@tonic-gate static void
cpu_pause_alloc(cpu_t * cp)8437c478bd9Sstevel@tonic-gate cpu_pause_alloc(cpu_t *cp)
8447c478bd9Sstevel@tonic-gate {
8457c478bd9Sstevel@tonic-gate kthread_id_t t;
8462df1fe9cSrandyf long cpun = cp->cpu_id;
8477c478bd9Sstevel@tonic-gate
8487c478bd9Sstevel@tonic-gate /*
8497c478bd9Sstevel@tonic-gate * Note, v.v_nglobpris will not change value as long as I hold
8507c478bd9Sstevel@tonic-gate * cpu_lock.
8517c478bd9Sstevel@tonic-gate */
8522df1fe9cSrandyf t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
8537c478bd9Sstevel@tonic-gate 0, &p0, TS_STOPPED, v.v_nglobpris - 1);
8547c478bd9Sstevel@tonic-gate thread_lock(t);
8557c478bd9Sstevel@tonic-gate t->t_bound_cpu = cp;
8567c478bd9Sstevel@tonic-gate t->t_disp_queue = cp->cpu_disp;
8577c478bd9Sstevel@tonic-gate t->t_affinitycnt = 1;
8587c478bd9Sstevel@tonic-gate t->t_preempt = 1;
8597c478bd9Sstevel@tonic-gate thread_unlock(t);
8607c478bd9Sstevel@tonic-gate cp->cpu_pause_thread = t;
8617c478bd9Sstevel@tonic-gate /*
8627c478bd9Sstevel@tonic-gate * Registering a thread in the callback table is usually done
8637c478bd9Sstevel@tonic-gate * in the initialization code of the thread. In this
8647c478bd9Sstevel@tonic-gate * case, we do it right after thread creation because the
8657c478bd9Sstevel@tonic-gate * thread itself may never run, and we need to register the
8667c478bd9Sstevel@tonic-gate * fact that it is safe for cpr suspend.
8677c478bd9Sstevel@tonic-gate */
8687c478bd9Sstevel@tonic-gate CALLB_CPR_INIT_SAFE(t, "cpu_pause");
8697c478bd9Sstevel@tonic-gate }
8707c478bd9Sstevel@tonic-gate
8717c478bd9Sstevel@tonic-gate /*
8727c478bd9Sstevel@tonic-gate * Free a pause thread for a CPU.
8737c478bd9Sstevel@tonic-gate */
8747c478bd9Sstevel@tonic-gate static void
cpu_pause_free(cpu_t * cp)8757c478bd9Sstevel@tonic-gate cpu_pause_free(cpu_t *cp)
8767c478bd9Sstevel@tonic-gate {
8777c478bd9Sstevel@tonic-gate kthread_id_t t;
8787c478bd9Sstevel@tonic-gate int cpun = cp->cpu_id;
8797c478bd9Sstevel@tonic-gate
8807c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
8817c478bd9Sstevel@tonic-gate /*
8827c478bd9Sstevel@tonic-gate * We have to get the thread and tell him to die.
8837c478bd9Sstevel@tonic-gate */
8847c478bd9Sstevel@tonic-gate if ((t = cp->cpu_pause_thread) == NULL) {
8857c478bd9Sstevel@tonic-gate ASSERT(safe_list[cpun] == PAUSE_IDLE);
8867c478bd9Sstevel@tonic-gate return;
8877c478bd9Sstevel@tonic-gate }
8887c478bd9Sstevel@tonic-gate thread_lock(t);
8897c478bd9Sstevel@tonic-gate t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */
8907c478bd9Sstevel@tonic-gate t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */
8917c478bd9Sstevel@tonic-gate t->t_pri = v.v_nglobpris - 1;
8927c478bd9Sstevel@tonic-gate ASSERT(safe_list[cpun] == PAUSE_IDLE);
8937c478bd9Sstevel@tonic-gate safe_list[cpun] = PAUSE_DIE;
8947c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t);
8957c478bd9Sstevel@tonic-gate setbackdq(t);
8967c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t);
8977c478bd9Sstevel@tonic-gate
8987c478bd9Sstevel@tonic-gate /*
8997c478bd9Sstevel@tonic-gate * If we don't wait for the thread to actually die, it may try to
9007c478bd9Sstevel@tonic-gate * run on the wrong cpu as part of an actual call to pause_cpus().
9017c478bd9Sstevel@tonic-gate */
9027c478bd9Sstevel@tonic-gate mutex_enter(&pause_free_mutex);
9037c478bd9Sstevel@tonic-gate while (safe_list[cpun] != PAUSE_DEAD) {
9047c478bd9Sstevel@tonic-gate cv_wait(&pause_free_cv, &pause_free_mutex);
9057c478bd9Sstevel@tonic-gate }
9067c478bd9Sstevel@tonic-gate mutex_exit(&pause_free_mutex);
9077c478bd9Sstevel@tonic-gate safe_list[cpun] = PAUSE_IDLE;
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate cp->cpu_pause_thread = NULL;
9107c478bd9Sstevel@tonic-gate }
9117c478bd9Sstevel@tonic-gate
9127c478bd9Sstevel@tonic-gate /*
9137c478bd9Sstevel@tonic-gate * Initialize basic structures for pausing CPUs.
9147c478bd9Sstevel@tonic-gate */
9157c478bd9Sstevel@tonic-gate void
cpu_pause_init()9167c478bd9Sstevel@tonic-gate cpu_pause_init()
9177c478bd9Sstevel@tonic-gate {
9187c478bd9Sstevel@tonic-gate sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL);
9197c478bd9Sstevel@tonic-gate /*
9207c478bd9Sstevel@tonic-gate * Create initial CPU pause thread.
9217c478bd9Sstevel@tonic-gate */
9227c478bd9Sstevel@tonic-gate cpu_pause_alloc(CPU);
9237c478bd9Sstevel@tonic-gate }
9247c478bd9Sstevel@tonic-gate
9257c478bd9Sstevel@tonic-gate /*
9267c478bd9Sstevel@tonic-gate * Start the threads used to pause another CPU.
9277c478bd9Sstevel@tonic-gate */
9287c478bd9Sstevel@tonic-gate static int
cpu_pause_start(processorid_t cpu_id)9297c478bd9Sstevel@tonic-gate cpu_pause_start(processorid_t cpu_id)
9307c478bd9Sstevel@tonic-gate {
9317c478bd9Sstevel@tonic-gate int i;
9327c478bd9Sstevel@tonic-gate int cpu_count = 0;
9337c478bd9Sstevel@tonic-gate
9347c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
9357c478bd9Sstevel@tonic-gate cpu_t *cp;
9367c478bd9Sstevel@tonic-gate kthread_id_t t;
9377c478bd9Sstevel@tonic-gate
9387c478bd9Sstevel@tonic-gate cp = cpu[i];
9397c478bd9Sstevel@tonic-gate if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) {
9407c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_WAIT;
9417c478bd9Sstevel@tonic-gate continue;
9427c478bd9Sstevel@tonic-gate }
9437c478bd9Sstevel@tonic-gate
9447c478bd9Sstevel@tonic-gate /*
9457c478bd9Sstevel@tonic-gate * Skip CPU if it is quiesced or not yet started.
9467c478bd9Sstevel@tonic-gate */
9477c478bd9Sstevel@tonic-gate if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) {
9487c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_WAIT;
9497c478bd9Sstevel@tonic-gate continue;
9507c478bd9Sstevel@tonic-gate }
9517c478bd9Sstevel@tonic-gate
9527c478bd9Sstevel@tonic-gate /*
9537c478bd9Sstevel@tonic-gate * Start this CPU's pause thread.
9547c478bd9Sstevel@tonic-gate */
9557c478bd9Sstevel@tonic-gate t = cp->cpu_pause_thread;
9567c478bd9Sstevel@tonic-gate thread_lock(t);
9577c478bd9Sstevel@tonic-gate /*
9587c478bd9Sstevel@tonic-gate * Reset the priority, since nglobpris may have
9597c478bd9Sstevel@tonic-gate * changed since the thread was created, if someone
9607c478bd9Sstevel@tonic-gate * has loaded the RT (or some other) scheduling
9617c478bd9Sstevel@tonic-gate * class.
9627c478bd9Sstevel@tonic-gate */
9637c478bd9Sstevel@tonic-gate t->t_pri = v.v_nglobpris - 1;
9647c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t);
9657c478bd9Sstevel@tonic-gate setbackdq(t);
9667c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t);
9677c478bd9Sstevel@tonic-gate ++cpu_count;
9687c478bd9Sstevel@tonic-gate }
9697c478bd9Sstevel@tonic-gate return (cpu_count);
9707c478bd9Sstevel@tonic-gate }
9717c478bd9Sstevel@tonic-gate
9727c478bd9Sstevel@tonic-gate
9737c478bd9Sstevel@tonic-gate /*
9747c478bd9Sstevel@tonic-gate * Pause all of the CPUs except the one we are on by creating a high
9757c478bd9Sstevel@tonic-gate * priority thread bound to those CPUs.
9767c478bd9Sstevel@tonic-gate *
9777c478bd9Sstevel@tonic-gate * Note that one must be extremely careful regarding code
9787c478bd9Sstevel@tonic-gate * executed while CPUs are paused. Since a CPU may be paused
9797c478bd9Sstevel@tonic-gate * while a thread scheduling on that CPU is holding an adaptive
9807c478bd9Sstevel@tonic-gate * lock, code executed with CPUs paused must not acquire adaptive
9817c478bd9Sstevel@tonic-gate * (or low-level spin) locks. Also, such code must not block,
9827c478bd9Sstevel@tonic-gate * since the thread that is supposed to initiate the wakeup may
9837c478bd9Sstevel@tonic-gate * never run.
9847c478bd9Sstevel@tonic-gate *
9857c478bd9Sstevel@tonic-gate * With a few exceptions, the restrictions on code executed with CPUs
9867c478bd9Sstevel@tonic-gate * paused match those for code executed at high-level interrupt
9877c478bd9Sstevel@tonic-gate * context.
9887c478bd9Sstevel@tonic-gate */
9897c478bd9Sstevel@tonic-gate void
pause_cpus(cpu_t * off_cp,void * (* func)(void *))990*bce835f2SJosef 'Jeff' Sipek pause_cpus(cpu_t *off_cp, void *(*func)(void *))
9917c478bd9Sstevel@tonic-gate {
9927c478bd9Sstevel@tonic-gate processorid_t cpu_id;
9937c478bd9Sstevel@tonic-gate int i;
9947c478bd9Sstevel@tonic-gate struct _cpu_pause_info *cpi = &cpu_pause_info;
9957c478bd9Sstevel@tonic-gate
9967c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
9977c478bd9Sstevel@tonic-gate ASSERT(cpi->cp_paused == NULL);
9987c478bd9Sstevel@tonic-gate cpi->cp_count = 0;
9997c478bd9Sstevel@tonic-gate cpi->cp_go = 0;
10007c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++)
10017c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_IDLE;
10027c478bd9Sstevel@tonic-gate kpreempt_disable();
10037c478bd9Sstevel@tonic-gate
1004*bce835f2SJosef 'Jeff' Sipek cpi->cp_func = func;
1005*bce835f2SJosef 'Jeff' Sipek
10067c478bd9Sstevel@tonic-gate /*
10077c478bd9Sstevel@tonic-gate * If running on the cpu that is going offline, get off it.
10087c478bd9Sstevel@tonic-gate * This is so that it won't be necessary to rechoose a CPU
10097c478bd9Sstevel@tonic-gate * when done.
10107c478bd9Sstevel@tonic-gate */
10117c478bd9Sstevel@tonic-gate if (CPU == off_cp)
10127c478bd9Sstevel@tonic-gate cpu_id = off_cp->cpu_next_part->cpu_id;
10137c478bd9Sstevel@tonic-gate else
10147c478bd9Sstevel@tonic-gate cpu_id = CPU->cpu_id;
10157c478bd9Sstevel@tonic-gate affinity_set(cpu_id);
10167c478bd9Sstevel@tonic-gate
10177c478bd9Sstevel@tonic-gate /*
10187c478bd9Sstevel@tonic-gate * Start the pause threads and record how many were started
10197c478bd9Sstevel@tonic-gate */
10207c478bd9Sstevel@tonic-gate cpi->cp_count = cpu_pause_start(cpu_id);
10217c478bd9Sstevel@tonic-gate
10227c478bd9Sstevel@tonic-gate /*
10237c478bd9Sstevel@tonic-gate * Now wait for all CPUs to be running the pause thread.
10247c478bd9Sstevel@tonic-gate */
10257c478bd9Sstevel@tonic-gate while (cpi->cp_count > 0) {
10267c478bd9Sstevel@tonic-gate /*
10277c478bd9Sstevel@tonic-gate * Spin reading the count without grabbing the disp
10287c478bd9Sstevel@tonic-gate * lock to make sure we don't prevent the pause
10297c478bd9Sstevel@tonic-gate * threads from getting the lock.
10307c478bd9Sstevel@tonic-gate */
10317c478bd9Sstevel@tonic-gate while (sema_held(&cpi->cp_sem))
10327c478bd9Sstevel@tonic-gate ;
10337c478bd9Sstevel@tonic-gate if (sema_tryp(&cpi->cp_sem))
10347c478bd9Sstevel@tonic-gate --cpi->cp_count;
10357c478bd9Sstevel@tonic-gate }
10367c478bd9Sstevel@tonic-gate cpi->cp_go = 1; /* all have reached cpu_pause */
10377c478bd9Sstevel@tonic-gate
10387c478bd9Sstevel@tonic-gate /*
10397c478bd9Sstevel@tonic-gate * Now wait for all CPUs to spl. (Transition from PAUSE_READY
10407c478bd9Sstevel@tonic-gate * to PAUSE_WAIT.)
10417c478bd9Sstevel@tonic-gate */
10427c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
10437c478bd9Sstevel@tonic-gate while (safe_list[i] != PAUSE_WAIT)
10447c478bd9Sstevel@tonic-gate ;
10457c478bd9Sstevel@tonic-gate }
10467c478bd9Sstevel@tonic-gate cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */
10477c478bd9Sstevel@tonic-gate cpi->cp_paused = curthread;
10487c478bd9Sstevel@tonic-gate }
10497c478bd9Sstevel@tonic-gate
10507c478bd9Sstevel@tonic-gate /*
10517c478bd9Sstevel@tonic-gate * Check whether the current thread has CPUs paused
10527c478bd9Sstevel@tonic-gate */
10537c478bd9Sstevel@tonic-gate int
cpus_paused(void)10547c478bd9Sstevel@tonic-gate cpus_paused(void)
10557c478bd9Sstevel@tonic-gate {
10567c478bd9Sstevel@tonic-gate if (cpu_pause_info.cp_paused != NULL) {
10577c478bd9Sstevel@tonic-gate ASSERT(cpu_pause_info.cp_paused == curthread);
10587c478bd9Sstevel@tonic-gate return (1);
10597c478bd9Sstevel@tonic-gate }
10607c478bd9Sstevel@tonic-gate return (0);
10617c478bd9Sstevel@tonic-gate }
10627c478bd9Sstevel@tonic-gate
10637c478bd9Sstevel@tonic-gate static cpu_t *
cpu_get_all(processorid_t cpun)10647c478bd9Sstevel@tonic-gate cpu_get_all(processorid_t cpun)
10657c478bd9Sstevel@tonic-gate {
10667c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
10677c478bd9Sstevel@tonic-gate
10687c478bd9Sstevel@tonic-gate if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun))
10697c478bd9Sstevel@tonic-gate return (NULL);
10707c478bd9Sstevel@tonic-gate return (cpu[cpun]);
10717c478bd9Sstevel@tonic-gate }
10727c478bd9Sstevel@tonic-gate
10737c478bd9Sstevel@tonic-gate /*
10747c478bd9Sstevel@tonic-gate * Check whether cpun is a valid processor id and whether it should be
10757c478bd9Sstevel@tonic-gate * visible from the current zone. If it is, return a pointer to the
10767c478bd9Sstevel@tonic-gate * associated CPU structure.
10777c478bd9Sstevel@tonic-gate */
10787c478bd9Sstevel@tonic-gate cpu_t *
cpu_get(processorid_t cpun)10797c478bd9Sstevel@tonic-gate cpu_get(processorid_t cpun)
10807c478bd9Sstevel@tonic-gate {
10817c478bd9Sstevel@tonic-gate cpu_t *c;
10827c478bd9Sstevel@tonic-gate
10837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
10847c478bd9Sstevel@tonic-gate c = cpu_get_all(cpun);
10857c478bd9Sstevel@tonic-gate if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
10867c478bd9Sstevel@tonic-gate zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c))
10877c478bd9Sstevel@tonic-gate return (NULL);
10887c478bd9Sstevel@tonic-gate return (c);
10897c478bd9Sstevel@tonic-gate }
10907c478bd9Sstevel@tonic-gate
10917c478bd9Sstevel@tonic-gate /*
10927c478bd9Sstevel@tonic-gate * The following functions should be used to check CPU states in the kernel.
10937c478bd9Sstevel@tonic-gate * They should be invoked with cpu_lock held. Kernel subsystems interested
10947c478bd9Sstevel@tonic-gate * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc
10957c478bd9Sstevel@tonic-gate * states. Those are for user-land (and system call) use only.
10967c478bd9Sstevel@tonic-gate */
10977c478bd9Sstevel@tonic-gate
10987c478bd9Sstevel@tonic-gate /*
10997c478bd9Sstevel@tonic-gate * Determine whether the CPU is online and handling interrupts.
11007c478bd9Sstevel@tonic-gate */
11017c478bd9Sstevel@tonic-gate int
cpu_is_online(cpu_t * cpu)11027c478bd9Sstevel@tonic-gate cpu_is_online(cpu_t *cpu)
11037c478bd9Sstevel@tonic-gate {
11047c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
11057c478bd9Sstevel@tonic-gate return (cpu_flagged_online(cpu->cpu_flags));
11067c478bd9Sstevel@tonic-gate }
11077c478bd9Sstevel@tonic-gate
11087c478bd9Sstevel@tonic-gate /*
11097c478bd9Sstevel@tonic-gate * Determine whether the CPU is offline (this includes spare and faulted).
11107c478bd9Sstevel@tonic-gate */
11117c478bd9Sstevel@tonic-gate int
cpu_is_offline(cpu_t * cpu)11127c478bd9Sstevel@tonic-gate cpu_is_offline(cpu_t *cpu)
11137c478bd9Sstevel@tonic-gate {
11147c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
11157c478bd9Sstevel@tonic-gate return (cpu_flagged_offline(cpu->cpu_flags));
11167c478bd9Sstevel@tonic-gate }
11177c478bd9Sstevel@tonic-gate
11187c478bd9Sstevel@tonic-gate /*
11197c478bd9Sstevel@tonic-gate * Determine whether the CPU is powered off.
11207c478bd9Sstevel@tonic-gate */
11217c478bd9Sstevel@tonic-gate int
cpu_is_poweredoff(cpu_t * cpu)11227c478bd9Sstevel@tonic-gate cpu_is_poweredoff(cpu_t *cpu)
11237c478bd9Sstevel@tonic-gate {
11247c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
11257c478bd9Sstevel@tonic-gate return (cpu_flagged_poweredoff(cpu->cpu_flags));
11267c478bd9Sstevel@tonic-gate }
11277c478bd9Sstevel@tonic-gate
11287c478bd9Sstevel@tonic-gate /*
11297c478bd9Sstevel@tonic-gate * Determine whether the CPU is handling interrupts.
11307c478bd9Sstevel@tonic-gate */
11317c478bd9Sstevel@tonic-gate int
cpu_is_nointr(cpu_t * cpu)11327c478bd9Sstevel@tonic-gate cpu_is_nointr(cpu_t *cpu)
11337c478bd9Sstevel@tonic-gate {
11347c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
11357c478bd9Sstevel@tonic-gate return (cpu_flagged_nointr(cpu->cpu_flags));
11367c478bd9Sstevel@tonic-gate }
11377c478bd9Sstevel@tonic-gate
11387c478bd9Sstevel@tonic-gate /*
11397c478bd9Sstevel@tonic-gate * Determine whether the CPU is active (scheduling threads).
11407c478bd9Sstevel@tonic-gate */
11417c478bd9Sstevel@tonic-gate int
cpu_is_active(cpu_t * cpu)11427c478bd9Sstevel@tonic-gate cpu_is_active(cpu_t *cpu)
11437c478bd9Sstevel@tonic-gate {
11447c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
11457c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu->cpu_flags));
11467c478bd9Sstevel@tonic-gate }
11477c478bd9Sstevel@tonic-gate
11487c478bd9Sstevel@tonic-gate /*
11497c478bd9Sstevel@tonic-gate * Same as above, but these require cpu_flags instead of cpu_t pointers.
11507c478bd9Sstevel@tonic-gate */
11517c478bd9Sstevel@tonic-gate int
cpu_flagged_online(cpu_flag_t cpu_flags)11527c478bd9Sstevel@tonic-gate cpu_flagged_online(cpu_flag_t cpu_flags)
11537c478bd9Sstevel@tonic-gate {
11547c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu_flags) &&
11557c478bd9Sstevel@tonic-gate (cpu_flags & CPU_ENABLE));
11567c478bd9Sstevel@tonic-gate }
11577c478bd9Sstevel@tonic-gate
11587c478bd9Sstevel@tonic-gate int
cpu_flagged_offline(cpu_flag_t cpu_flags)11597c478bd9Sstevel@tonic-gate cpu_flagged_offline(cpu_flag_t cpu_flags)
11607c478bd9Sstevel@tonic-gate {
11617c478bd9Sstevel@tonic-gate return (((cpu_flags & CPU_POWEROFF) == 0) &&
11627c478bd9Sstevel@tonic-gate ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY));
11637c478bd9Sstevel@tonic-gate }
11647c478bd9Sstevel@tonic-gate
11657c478bd9Sstevel@tonic-gate int
cpu_flagged_poweredoff(cpu_flag_t cpu_flags)11667c478bd9Sstevel@tonic-gate cpu_flagged_poweredoff(cpu_flag_t cpu_flags)
11677c478bd9Sstevel@tonic-gate {
11687c478bd9Sstevel@tonic-gate return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF);
11697c478bd9Sstevel@tonic-gate }
11707c478bd9Sstevel@tonic-gate
11717c478bd9Sstevel@tonic-gate int
cpu_flagged_nointr(cpu_flag_t cpu_flags)11727c478bd9Sstevel@tonic-gate cpu_flagged_nointr(cpu_flag_t cpu_flags)
11737c478bd9Sstevel@tonic-gate {
11747c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu_flags) &&
11757c478bd9Sstevel@tonic-gate (cpu_flags & CPU_ENABLE) == 0);
11767c478bd9Sstevel@tonic-gate }
11777c478bd9Sstevel@tonic-gate
11787c478bd9Sstevel@tonic-gate int
cpu_flagged_active(cpu_flag_t cpu_flags)11797c478bd9Sstevel@tonic-gate cpu_flagged_active(cpu_flag_t cpu_flags)
11807c478bd9Sstevel@tonic-gate {
11817c478bd9Sstevel@tonic-gate return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) &&
11827c478bd9Sstevel@tonic-gate ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY));
11837c478bd9Sstevel@tonic-gate }
11847c478bd9Sstevel@tonic-gate
11857c478bd9Sstevel@tonic-gate /*
11867c478bd9Sstevel@tonic-gate * Bring the indicated CPU online.
11877c478bd9Sstevel@tonic-gate */
11887c478bd9Sstevel@tonic-gate int
cpu_online(cpu_t * cp)11897c478bd9Sstevel@tonic-gate cpu_online(cpu_t *cp)
11907c478bd9Sstevel@tonic-gate {
11917c478bd9Sstevel@tonic-gate int error = 0;
11927c478bd9Sstevel@tonic-gate
11937c478bd9Sstevel@tonic-gate /*
11947c478bd9Sstevel@tonic-gate * Handle on-line request.
11957c478bd9Sstevel@tonic-gate * This code must put the new CPU on the active list before
11967c478bd9Sstevel@tonic-gate * starting it because it will not be paused, and will start
11977c478bd9Sstevel@tonic-gate * using the active list immediately. The real start occurs
11987c478bd9Sstevel@tonic-gate * when the CPU_QUIESCED flag is turned off.
11997c478bd9Sstevel@tonic-gate */
12007c478bd9Sstevel@tonic-gate
12017c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
12027c478bd9Sstevel@tonic-gate
12037c478bd9Sstevel@tonic-gate /*
12047c478bd9Sstevel@tonic-gate * Put all the cpus into a known safe place.
12057c478bd9Sstevel@tonic-gate * No mutexes can be entered while CPUs are paused.
12067c478bd9Sstevel@tonic-gate */
12077c478bd9Sstevel@tonic-gate error = mp_cpu_start(cp); /* arch-dep hook */
12087c478bd9Sstevel@tonic-gate if (error == 0) {
1209e196c24bSesaxe pg_cpupart_in(cp, cp->cpu_part);
1210*bce835f2SJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
12117c478bd9Sstevel@tonic-gate cpu_add_active_internal(cp);
12127c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) {
12137c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED;
12147c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp);
12157c478bd9Sstevel@tonic-gate }
12167c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
12177c478bd9Sstevel@tonic-gate CPU_SPARE);
1218b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp);
12197c478bd9Sstevel@tonic-gate start_cpus();
12207c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp);
12217c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp);
12227c478bd9Sstevel@tonic-gate lgrp_kstat_create(cp);
12237c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_ON);
12247c478bd9Sstevel@tonic-gate cpu_intr_enable(cp); /* arch-dep hook */
1225b885580bSAlexander Kolbasov cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1226ae115bc7Smrj cpu_set_state(cp);
12277c478bd9Sstevel@tonic-gate cyclic_online(cp);
122887a18d3fSMadhavan Venkataraman /*
122987a18d3fSMadhavan Venkataraman * This has to be called only after cyclic_online(). This
123087a18d3fSMadhavan Venkataraman * function uses cyclics.
123187a18d3fSMadhavan Venkataraman */
123287a18d3fSMadhavan Venkataraman callout_cpu_online(cp);
12337c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id);
12347c478bd9Sstevel@tonic-gate }
12357c478bd9Sstevel@tonic-gate
12367c478bd9Sstevel@tonic-gate return (error);
12377c478bd9Sstevel@tonic-gate }
12387c478bd9Sstevel@tonic-gate
12397c478bd9Sstevel@tonic-gate /*
12407c478bd9Sstevel@tonic-gate * Take the indicated CPU offline.
12417c478bd9Sstevel@tonic-gate */
12427c478bd9Sstevel@tonic-gate int
cpu_offline(cpu_t * cp,int flags)12437c478bd9Sstevel@tonic-gate cpu_offline(cpu_t *cp, int flags)
12447c478bd9Sstevel@tonic-gate {
12457c478bd9Sstevel@tonic-gate cpupart_t *pp;
12467c478bd9Sstevel@tonic-gate int error = 0;
12477c478bd9Sstevel@tonic-gate cpu_t *ncp;
12487c478bd9Sstevel@tonic-gate int intr_enable;
12497c478bd9Sstevel@tonic-gate int cyclic_off = 0;
1250454ab202SMadhavan Venkataraman int callout_off = 0;
12517c478bd9Sstevel@tonic-gate int loop_count;
12527c478bd9Sstevel@tonic-gate int no_quiesce = 0;
12537c478bd9Sstevel@tonic-gate int (*bound_func)(struct cpu *, int);
12547c478bd9Sstevel@tonic-gate kthread_t *t;
12557c478bd9Sstevel@tonic-gate lpl_t *cpu_lpl;
12567c478bd9Sstevel@tonic-gate proc_t *p;
12577c478bd9Sstevel@tonic-gate int lgrp_diff_lpl;
12580b70c467Sakolb boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0;
12597c478bd9Sstevel@tonic-gate
12607c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
12617c478bd9Sstevel@tonic-gate
12627c478bd9Sstevel@tonic-gate /*
12637c478bd9Sstevel@tonic-gate * If we're going from faulted or spare to offline, just
12647c478bd9Sstevel@tonic-gate * clear these flags and update CPU state.
12657c478bd9Sstevel@tonic-gate */
12667c478bd9Sstevel@tonic-gate if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) {
12677c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) {
12687c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED;
12697c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp);
12707c478bd9Sstevel@tonic-gate }
12717c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_SPARE;
12727c478bd9Sstevel@tonic-gate cpu_set_state(cp);
12737c478bd9Sstevel@tonic-gate return (0);
12747c478bd9Sstevel@tonic-gate }
12757c478bd9Sstevel@tonic-gate
12767c478bd9Sstevel@tonic-gate /*
12777c478bd9Sstevel@tonic-gate * Handle off-line request.
12787c478bd9Sstevel@tonic-gate */
12797c478bd9Sstevel@tonic-gate pp = cp->cpu_part;
12807c478bd9Sstevel@tonic-gate /*
12817c478bd9Sstevel@tonic-gate * Don't offline last online CPU in partition
12827c478bd9Sstevel@tonic-gate */
12837c478bd9Sstevel@tonic-gate if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2)
12847c478bd9Sstevel@tonic-gate return (EBUSY);
12857c478bd9Sstevel@tonic-gate /*
12860b70c467Sakolb * Unbind all soft-bound threads bound to our CPU and hard bound threads
12870b70c467Sakolb * if we were asked to.
12887c478bd9Sstevel@tonic-gate */
12890b70c467Sakolb error = cpu_unbind(cp->cpu_id, unbind_all_threads);
12900b70c467Sakolb if (error != 0)
12917c478bd9Sstevel@tonic-gate return (error);
12927c478bd9Sstevel@tonic-gate /*
12937c478bd9Sstevel@tonic-gate * We shouldn't be bound to this CPU ourselves.
12947c478bd9Sstevel@tonic-gate */
12957c478bd9Sstevel@tonic-gate if (curthread->t_bound_cpu == cp)
12967c478bd9Sstevel@tonic-gate return (EBUSY);
12977c478bd9Sstevel@tonic-gate
12987c478bd9Sstevel@tonic-gate /*
12997c478bd9Sstevel@tonic-gate * Tell interested parties that this CPU is going offline.
13007c478bd9Sstevel@tonic-gate */
1301b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp);
13027c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_OFF);
13037c478bd9Sstevel@tonic-gate
13047c478bd9Sstevel@tonic-gate /*
1305fb2f18f8Sesaxe * Tell the PG subsystem that the CPU is leaving the partition
1306fb2f18f8Sesaxe */
1307fb2f18f8Sesaxe pg_cpupart_out(cp, pp);
1308fb2f18f8Sesaxe
1309fb2f18f8Sesaxe /*
13107c478bd9Sstevel@tonic-gate * Take the CPU out of interrupt participation so we won't find
13117c478bd9Sstevel@tonic-gate * bound kernel threads. If the architecture cannot completely
13127c478bd9Sstevel@tonic-gate * shut off interrupts on the CPU, don't quiesce it, but don't
13137c478bd9Sstevel@tonic-gate * run anything but interrupt thread... this is indicated by
13147c478bd9Sstevel@tonic-gate * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being
13157c478bd9Sstevel@tonic-gate * off.
13167c478bd9Sstevel@tonic-gate */
13177c478bd9Sstevel@tonic-gate intr_enable = cp->cpu_flags & CPU_ENABLE;
13187c478bd9Sstevel@tonic-gate if (intr_enable)
13197c478bd9Sstevel@tonic-gate no_quiesce = cpu_intr_disable(cp);
13207c478bd9Sstevel@tonic-gate
13217c478bd9Sstevel@tonic-gate /*
13227c478bd9Sstevel@tonic-gate * Record that we are aiming to offline this cpu. This acts as
13237c478bd9Sstevel@tonic-gate * a barrier to further weak binding requests in thread_nomigrate
13247c478bd9Sstevel@tonic-gate * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to
13257c478bd9Sstevel@tonic-gate * lean away from this cpu. Further strong bindings are already
13267c478bd9Sstevel@tonic-gate * avoided since we hold cpu_lock. Since threads that are set
13277c478bd9Sstevel@tonic-gate * runnable around now and others coming off the target cpu are
13287c478bd9Sstevel@tonic-gate * directed away from the target, existing strong and weak bindings
13297c478bd9Sstevel@tonic-gate * (especially the latter) to the target cpu stand maximum chance of
13307c478bd9Sstevel@tonic-gate * being able to unbind during the short delay loop below (if other
13317c478bd9Sstevel@tonic-gate * unbound threads compete they may not see cpu in time to unbind
13327c478bd9Sstevel@tonic-gate * even if they would do so immediately.
13337c478bd9Sstevel@tonic-gate */
13347c478bd9Sstevel@tonic-gate cpu_inmotion = cp;
13357c478bd9Sstevel@tonic-gate membar_enter();
13367c478bd9Sstevel@tonic-gate
13377c478bd9Sstevel@tonic-gate /*
13387c478bd9Sstevel@tonic-gate * Check for kernel threads (strong or weak) bound to that CPU.
13397c478bd9Sstevel@tonic-gate * Strongly bound threads may not unbind, and we'll have to return
13407c478bd9Sstevel@tonic-gate * EBUSY. Weakly bound threads should always disappear - we've
13417c478bd9Sstevel@tonic-gate * stopped more weak binding with cpu_inmotion and existing
13427c478bd9Sstevel@tonic-gate * bindings will drain imminently (they may not block). Nonetheless
13437c478bd9Sstevel@tonic-gate * we will wait for a fixed period for all bound threads to disappear.
13447c478bd9Sstevel@tonic-gate * Inactive interrupt threads are OK (they'll be in TS_FREE
13457c478bd9Sstevel@tonic-gate * state). If test finds some bound threads, wait a few ticks
13467c478bd9Sstevel@tonic-gate * to give short-lived threads (such as interrupts) chance to
13477c478bd9Sstevel@tonic-gate * complete. Note that if no_quiesce is set, i.e. this cpu
13487c478bd9Sstevel@tonic-gate * is required to service interrupts, then we take the route
13497c478bd9Sstevel@tonic-gate * that permits interrupt threads to be active (or bypassed).
13507c478bd9Sstevel@tonic-gate */
13517c478bd9Sstevel@tonic-gate bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads;
13527c478bd9Sstevel@tonic-gate
13537c478bd9Sstevel@tonic-gate again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
13547c478bd9Sstevel@tonic-gate if (loop_count >= 5) {
13557c478bd9Sstevel@tonic-gate error = EBUSY; /* some threads still bound */
13567c478bd9Sstevel@tonic-gate break;
13577c478bd9Sstevel@tonic-gate }
13587c478bd9Sstevel@tonic-gate
13597c478bd9Sstevel@tonic-gate /*
13607c478bd9Sstevel@tonic-gate * If some threads were assigned, give them
13617c478bd9Sstevel@tonic-gate * a chance to complete or move.
13627c478bd9Sstevel@tonic-gate *
13637c478bd9Sstevel@tonic-gate * This assumes that the clock_thread is not bound
13647c478bd9Sstevel@tonic-gate * to any CPU, because the clock_thread is needed to
13657c478bd9Sstevel@tonic-gate * do the delay(hz/100).
13667c478bd9Sstevel@tonic-gate *
13677c478bd9Sstevel@tonic-gate * Note: we still hold the cpu_lock while waiting for
13687c478bd9Sstevel@tonic-gate * the next clock tick. This is OK since it isn't
13697c478bd9Sstevel@tonic-gate * needed for anything else except processor_bind(2),
13707c478bd9Sstevel@tonic-gate * and system initialization. If we drop the lock,
13717c478bd9Sstevel@tonic-gate * we would risk another p_online disabling the last
13727c478bd9Sstevel@tonic-gate * processor.
13737c478bd9Sstevel@tonic-gate */
13747c478bd9Sstevel@tonic-gate delay(hz/100);
13757c478bd9Sstevel@tonic-gate }
13767c478bd9Sstevel@tonic-gate
1377454ab202SMadhavan Venkataraman if (error == 0 && callout_off == 0) {
1378454ab202SMadhavan Venkataraman callout_cpu_offline(cp);
1379454ab202SMadhavan Venkataraman callout_off = 1;
1380454ab202SMadhavan Venkataraman }
1381454ab202SMadhavan Venkataraman
13827c478bd9Sstevel@tonic-gate if (error == 0 && cyclic_off == 0) {
13837c478bd9Sstevel@tonic-gate if (!cyclic_offline(cp)) {
13847c478bd9Sstevel@tonic-gate /*
13857c478bd9Sstevel@tonic-gate * We must have bound cyclics...
13867c478bd9Sstevel@tonic-gate */
13877c478bd9Sstevel@tonic-gate error = EBUSY;
13887c478bd9Sstevel@tonic-gate goto out;
13897c478bd9Sstevel@tonic-gate }
13907c478bd9Sstevel@tonic-gate cyclic_off = 1;
13917c478bd9Sstevel@tonic-gate }
13927c478bd9Sstevel@tonic-gate
13937c478bd9Sstevel@tonic-gate /*
13947c478bd9Sstevel@tonic-gate * Call mp_cpu_stop() to perform any special operations
13957c478bd9Sstevel@tonic-gate * needed for this machine architecture to offline a CPU.
13967c478bd9Sstevel@tonic-gate */
13977c478bd9Sstevel@tonic-gate if (error == 0)
13987c478bd9Sstevel@tonic-gate error = mp_cpu_stop(cp); /* arch-dep hook */
13997c478bd9Sstevel@tonic-gate
14007c478bd9Sstevel@tonic-gate /*
14017c478bd9Sstevel@tonic-gate * If that all worked, take the CPU offline and decrement
14027c478bd9Sstevel@tonic-gate * ncpus_online.
14037c478bd9Sstevel@tonic-gate */
14047c478bd9Sstevel@tonic-gate if (error == 0) {
14057c478bd9Sstevel@tonic-gate /*
14067c478bd9Sstevel@tonic-gate * Put all the cpus into a known safe place.
14077c478bd9Sstevel@tonic-gate * No mutexes can be entered while CPUs are paused.
14087c478bd9Sstevel@tonic-gate */
1409*bce835f2SJosef 'Jeff' Sipek pause_cpus(cp, NULL);
14107c478bd9Sstevel@tonic-gate /*
14117c478bd9Sstevel@tonic-gate * Repeat the operation, if necessary, to make sure that
14127c478bd9Sstevel@tonic-gate * all outstanding low-level interrupts run to completion
14137c478bd9Sstevel@tonic-gate * before we set the CPU_QUIESCED flag. It's also possible
14147c478bd9Sstevel@tonic-gate * that a thread has weak bound to the cpu despite our raising
14157c478bd9Sstevel@tonic-gate * cpu_inmotion above since it may have loaded that
14167c478bd9Sstevel@tonic-gate * value before the barrier became visible (this would have
14177c478bd9Sstevel@tonic-gate * to be the thread that was on the target cpu at the time
14187c478bd9Sstevel@tonic-gate * we raised the barrier).
14197c478bd9Sstevel@tonic-gate */
14207c478bd9Sstevel@tonic-gate if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
14217c478bd9Sstevel@tonic-gate (*bound_func)(cp, 1)) {
14227c478bd9Sstevel@tonic-gate start_cpus();
14237c478bd9Sstevel@tonic-gate (void) mp_cpu_start(cp);
14247c478bd9Sstevel@tonic-gate goto again;
14257c478bd9Sstevel@tonic-gate }
14267c478bd9Sstevel@tonic-gate ncp = cp->cpu_next_part;
14277c478bd9Sstevel@tonic-gate cpu_lpl = cp->cpu_lpl;
14287c478bd9Sstevel@tonic-gate ASSERT(cpu_lpl != NULL);
14297c478bd9Sstevel@tonic-gate
14307c478bd9Sstevel@tonic-gate /*
14317c478bd9Sstevel@tonic-gate * Remove the CPU from the list of active CPUs.
14327c478bd9Sstevel@tonic-gate */
14337c478bd9Sstevel@tonic-gate cpu_remove_active(cp);
14347c478bd9Sstevel@tonic-gate
14357c478bd9Sstevel@tonic-gate /*
14367c478bd9Sstevel@tonic-gate * Walk the active process list and look for threads
14377c478bd9Sstevel@tonic-gate * whose home lgroup needs to be updated, or
14387c478bd9Sstevel@tonic-gate * the last CPU they run on is the one being offlined now.
14397c478bd9Sstevel@tonic-gate */
14407c478bd9Sstevel@tonic-gate
14417c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpu != cp);
14427c478bd9Sstevel@tonic-gate for (p = practive; p != NULL; p = p->p_next) {
14437c478bd9Sstevel@tonic-gate
14447c478bd9Sstevel@tonic-gate t = p->p_tlist;
14457c478bd9Sstevel@tonic-gate
14467c478bd9Sstevel@tonic-gate if (t == NULL)
14477c478bd9Sstevel@tonic-gate continue;
14487c478bd9Sstevel@tonic-gate
14497c478bd9Sstevel@tonic-gate lgrp_diff_lpl = 0;
14507c478bd9Sstevel@tonic-gate
14517c478bd9Sstevel@tonic-gate do {
14527c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl != NULL);
14537c478bd9Sstevel@tonic-gate /*
14547c478bd9Sstevel@tonic-gate * Taking last CPU in lpl offline
14557c478bd9Sstevel@tonic-gate * Rehome thread if it is in this lpl
14567c478bd9Sstevel@tonic-gate * Otherwise, update the count of how many
14577c478bd9Sstevel@tonic-gate * threads are in this CPU's lgroup but have
14587c478bd9Sstevel@tonic-gate * a different lpl.
14597c478bd9Sstevel@tonic-gate */
14607c478bd9Sstevel@tonic-gate
14617c478bd9Sstevel@tonic-gate if (cpu_lpl->lpl_ncpu == 0) {
14627c478bd9Sstevel@tonic-gate if (t->t_lpl == cpu_lpl)
14637c478bd9Sstevel@tonic-gate lgrp_move_thread(t,
14647c478bd9Sstevel@tonic-gate lgrp_choose(t,
14657c478bd9Sstevel@tonic-gate t->t_cpupart), 0);
14667c478bd9Sstevel@tonic-gate else if (t->t_lpl->lpl_lgrpid ==
14677c478bd9Sstevel@tonic-gate cpu_lpl->lpl_lgrpid)
14687c478bd9Sstevel@tonic-gate lgrp_diff_lpl++;
14697c478bd9Sstevel@tonic-gate }
14707c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl->lpl_ncpu > 0);
14717c478bd9Sstevel@tonic-gate
14727c478bd9Sstevel@tonic-gate /*
14737c478bd9Sstevel@tonic-gate * Update CPU last ran on if it was this CPU
14747c478bd9Sstevel@tonic-gate */
14757c478bd9Sstevel@tonic-gate if (t->t_cpu == cp && t->t_bound_cpu != cp)
1476cf74e62bSmh27603 t->t_cpu = disp_lowpri_cpu(ncp,
1477cf74e62bSmh27603 t->t_lpl, t->t_pri, NULL);
14787c478bd9Sstevel@tonic-gate ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
14797c478bd9Sstevel@tonic-gate t->t_weakbound_cpu == cp);
14807c478bd9Sstevel@tonic-gate
14817c478bd9Sstevel@tonic-gate t = t->t_forw;
14827c478bd9Sstevel@tonic-gate } while (t != p->p_tlist);
14837c478bd9Sstevel@tonic-gate
14847c478bd9Sstevel@tonic-gate /*
14857c478bd9Sstevel@tonic-gate * Didn't find any threads in the same lgroup as this
14867c478bd9Sstevel@tonic-gate * CPU with a different lpl, so remove the lgroup from
14877c478bd9Sstevel@tonic-gate * the process lgroup bitmask.
14887c478bd9Sstevel@tonic-gate */
14897c478bd9Sstevel@tonic-gate
14907c478bd9Sstevel@tonic-gate if (lgrp_diff_lpl == 0)
14917c478bd9Sstevel@tonic-gate klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
14927c478bd9Sstevel@tonic-gate }
14937c478bd9Sstevel@tonic-gate
14947c478bd9Sstevel@tonic-gate /*
14957c478bd9Sstevel@tonic-gate * Walk thread list looking for threads that need to be
14967c478bd9Sstevel@tonic-gate * rehomed, since there are some threads that are not in
14977c478bd9Sstevel@tonic-gate * their process's p_tlist.
14987c478bd9Sstevel@tonic-gate */
14997c478bd9Sstevel@tonic-gate
15007c478bd9Sstevel@tonic-gate t = curthread;
15017c478bd9Sstevel@tonic-gate do {
15027c478bd9Sstevel@tonic-gate ASSERT(t != NULL && t->t_lpl != NULL);
15037c478bd9Sstevel@tonic-gate
15047c478bd9Sstevel@tonic-gate /*
15057c478bd9Sstevel@tonic-gate * Rehome threads with same lpl as this CPU when this
15067c478bd9Sstevel@tonic-gate * is the last CPU in the lpl.
15077c478bd9Sstevel@tonic-gate */
15087c478bd9Sstevel@tonic-gate
15097c478bd9Sstevel@tonic-gate if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
15107c478bd9Sstevel@tonic-gate lgrp_move_thread(t,
15117c478bd9Sstevel@tonic-gate lgrp_choose(t, t->t_cpupart), 1);
15127c478bd9Sstevel@tonic-gate
15137c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl->lpl_ncpu > 0);
15147c478bd9Sstevel@tonic-gate
15157c478bd9Sstevel@tonic-gate /*
15167c478bd9Sstevel@tonic-gate * Update CPU last ran on if it was this CPU
15177c478bd9Sstevel@tonic-gate */
15187c478bd9Sstevel@tonic-gate
15197c478bd9Sstevel@tonic-gate if (t->t_cpu == cp && t->t_bound_cpu != cp) {
15207c478bd9Sstevel@tonic-gate t->t_cpu = disp_lowpri_cpu(ncp,
15217c478bd9Sstevel@tonic-gate t->t_lpl, t->t_pri, NULL);
15227c478bd9Sstevel@tonic-gate }
15237c478bd9Sstevel@tonic-gate ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
15247c478bd9Sstevel@tonic-gate t->t_weakbound_cpu == cp);
15257c478bd9Sstevel@tonic-gate t = t->t_next;
15267c478bd9Sstevel@tonic-gate
15277c478bd9Sstevel@tonic-gate } while (t != curthread);
15287c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
15297c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_OFFLINE;
15307c478bd9Sstevel@tonic-gate disp_cpu_inactive(cp);
15317c478bd9Sstevel@tonic-gate if (!no_quiesce)
15327c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_QUIESCED;
15337c478bd9Sstevel@tonic-gate ncpus_online--;
15347c478bd9Sstevel@tonic-gate cpu_set_state(cp);
15357c478bd9Sstevel@tonic-gate cpu_inmotion = NULL;
15367c478bd9Sstevel@tonic-gate start_cpus();
15377c478bd9Sstevel@tonic-gate cpu_stats_kstat_destroy(cp);
15387c478bd9Sstevel@tonic-gate cpu_delete_intrstat(cp);
15397c478bd9Sstevel@tonic-gate lgrp_kstat_destroy(cp);
15407c478bd9Sstevel@tonic-gate }
15417c478bd9Sstevel@tonic-gate
15427c478bd9Sstevel@tonic-gate out:
15437c478bd9Sstevel@tonic-gate cpu_inmotion = NULL;
15447c478bd9Sstevel@tonic-gate
15457c478bd9Sstevel@tonic-gate /*
15467c478bd9Sstevel@tonic-gate * If we failed, re-enable interrupts.
15477c478bd9Sstevel@tonic-gate * Do this even if cpu_intr_disable returned an error, because
15487c478bd9Sstevel@tonic-gate * it may have partially disabled interrupts.
15497c478bd9Sstevel@tonic-gate */
15507c478bd9Sstevel@tonic-gate if (error && intr_enable)
15517c478bd9Sstevel@tonic-gate cpu_intr_enable(cp);
15527c478bd9Sstevel@tonic-gate
15537c478bd9Sstevel@tonic-gate /*
15547c478bd9Sstevel@tonic-gate * If we failed, but managed to offline the cyclic subsystem on this
15557c478bd9Sstevel@tonic-gate * CPU, bring it back online.
15567c478bd9Sstevel@tonic-gate */
15577c478bd9Sstevel@tonic-gate if (error && cyclic_off)
15587c478bd9Sstevel@tonic-gate cyclic_online(cp);
15597c478bd9Sstevel@tonic-gate
15607c478bd9Sstevel@tonic-gate /*
1561454ab202SMadhavan Venkataraman * If we failed, but managed to offline callouts on this CPU,
1562454ab202SMadhavan Venkataraman * bring it back online.
1563454ab202SMadhavan Venkataraman */
1564454ab202SMadhavan Venkataraman if (error && callout_off)
1565454ab202SMadhavan Venkataraman callout_cpu_online(cp);
1566454ab202SMadhavan Venkataraman
1567454ab202SMadhavan Venkataraman /*
1568fb2f18f8Sesaxe * If we failed, tell the PG subsystem that the CPU is back
1569fb2f18f8Sesaxe */
1570fb2f18f8Sesaxe pg_cpupart_in(cp, pp);
1571fb2f18f8Sesaxe
1572fb2f18f8Sesaxe /*
15737c478bd9Sstevel@tonic-gate * If we failed, we need to notify everyone that this CPU is back on.
15747c478bd9Sstevel@tonic-gate */
1575b885580bSAlexander Kolbasov if (error != 0) {
1576b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp);
15777c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_ON);
1578b885580bSAlexander Kolbasov cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1579b885580bSAlexander Kolbasov }
15807c478bd9Sstevel@tonic-gate
15817c478bd9Sstevel@tonic-gate return (error);
15827c478bd9Sstevel@tonic-gate }
15837c478bd9Sstevel@tonic-gate
15847c478bd9Sstevel@tonic-gate /*
15857c478bd9Sstevel@tonic-gate * Mark the indicated CPU as faulted, taking it offline.
15867c478bd9Sstevel@tonic-gate */
15877c478bd9Sstevel@tonic-gate int
cpu_faulted(cpu_t * cp,int flags)15887c478bd9Sstevel@tonic-gate cpu_faulted(cpu_t *cp, int flags)
15897c478bd9Sstevel@tonic-gate {
15907c478bd9Sstevel@tonic-gate int error = 0;
15917c478bd9Sstevel@tonic-gate
15927c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
15937c478bd9Sstevel@tonic-gate ASSERT(!cpu_is_poweredoff(cp));
15947c478bd9Sstevel@tonic-gate
15957c478bd9Sstevel@tonic-gate if (cpu_is_offline(cp)) {
15967c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_SPARE;
15977c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_FAULTED;
15987c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(cp);
15997c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16007c478bd9Sstevel@tonic-gate return (0);
16017c478bd9Sstevel@tonic-gate }
16027c478bd9Sstevel@tonic-gate
16037c478bd9Sstevel@tonic-gate if ((error = cpu_offline(cp, flags)) == 0) {
16047c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_FAULTED;
16057c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(cp);
16067c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16077c478bd9Sstevel@tonic-gate }
16087c478bd9Sstevel@tonic-gate
16097c478bd9Sstevel@tonic-gate return (error);
16107c478bd9Sstevel@tonic-gate }
16117c478bd9Sstevel@tonic-gate
16127c478bd9Sstevel@tonic-gate /*
16137c478bd9Sstevel@tonic-gate * Mark the indicated CPU as a spare, taking it offline.
16147c478bd9Sstevel@tonic-gate */
16157c478bd9Sstevel@tonic-gate int
cpu_spare(cpu_t * cp,int flags)16167c478bd9Sstevel@tonic-gate cpu_spare(cpu_t *cp, int flags)
16177c478bd9Sstevel@tonic-gate {
16187c478bd9Sstevel@tonic-gate int error = 0;
16197c478bd9Sstevel@tonic-gate
16207c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
16217c478bd9Sstevel@tonic-gate ASSERT(!cpu_is_poweredoff(cp));
16227c478bd9Sstevel@tonic-gate
16237c478bd9Sstevel@tonic-gate if (cpu_is_offline(cp)) {
16247c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) {
16257c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED;
16267c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp);
16277c478bd9Sstevel@tonic-gate }
16287c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_SPARE;
16297c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16307c478bd9Sstevel@tonic-gate return (0);
16317c478bd9Sstevel@tonic-gate }
16327c478bd9Sstevel@tonic-gate
16337c478bd9Sstevel@tonic-gate if ((error = cpu_offline(cp, flags)) == 0) {
16347c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_SPARE;
16357c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16367c478bd9Sstevel@tonic-gate }
16377c478bd9Sstevel@tonic-gate
16387c478bd9Sstevel@tonic-gate return (error);
16397c478bd9Sstevel@tonic-gate }
16407c478bd9Sstevel@tonic-gate
16417c478bd9Sstevel@tonic-gate /*
16427c478bd9Sstevel@tonic-gate * Take the indicated CPU from poweroff to offline.
16437c478bd9Sstevel@tonic-gate */
16447c478bd9Sstevel@tonic-gate int
cpu_poweron(cpu_t * cp)16457c478bd9Sstevel@tonic-gate cpu_poweron(cpu_t *cp)
16467c478bd9Sstevel@tonic-gate {
16477c478bd9Sstevel@tonic-gate int error = ENOTSUP;
16487c478bd9Sstevel@tonic-gate
16497c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
16507c478bd9Sstevel@tonic-gate ASSERT(cpu_is_poweredoff(cp));
16517c478bd9Sstevel@tonic-gate
16527c478bd9Sstevel@tonic-gate error = mp_cpu_poweron(cp); /* arch-dep hook */
16537c478bd9Sstevel@tonic-gate if (error == 0)
16547c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16557c478bd9Sstevel@tonic-gate
16567c478bd9Sstevel@tonic-gate return (error);
16577c478bd9Sstevel@tonic-gate }
16587c478bd9Sstevel@tonic-gate
16597c478bd9Sstevel@tonic-gate /*
16607c478bd9Sstevel@tonic-gate * Take the indicated CPU from any inactive state to powered off.
16617c478bd9Sstevel@tonic-gate */
16627c478bd9Sstevel@tonic-gate int
cpu_poweroff(cpu_t * cp)16637c478bd9Sstevel@tonic-gate cpu_poweroff(cpu_t *cp)
16647c478bd9Sstevel@tonic-gate {
16657c478bd9Sstevel@tonic-gate int error = ENOTSUP;
16667c478bd9Sstevel@tonic-gate
16677c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
16687c478bd9Sstevel@tonic-gate ASSERT(cpu_is_offline(cp));
16697c478bd9Sstevel@tonic-gate
16707c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_QUIESCED))
16717c478bd9Sstevel@tonic-gate return (EBUSY); /* not completely idle */
16727c478bd9Sstevel@tonic-gate
16737c478bd9Sstevel@tonic-gate error = mp_cpu_poweroff(cp); /* arch-dep hook */
16747c478bd9Sstevel@tonic-gate if (error == 0)
16757c478bd9Sstevel@tonic-gate cpu_set_state(cp);
16767c478bd9Sstevel@tonic-gate
16777c478bd9Sstevel@tonic-gate return (error);
16787c478bd9Sstevel@tonic-gate }
16797c478bd9Sstevel@tonic-gate
16807c478bd9Sstevel@tonic-gate /*
16816890d023SEric Saxe * Initialize the Sequential CPU id lookup table
16826890d023SEric Saxe */
16836890d023SEric Saxe void
cpu_seq_tbl_init()16846890d023SEric Saxe cpu_seq_tbl_init()
16856890d023SEric Saxe {
16866890d023SEric Saxe cpu_t **tbl;
16876890d023SEric Saxe
16886890d023SEric Saxe tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP);
16896890d023SEric Saxe tbl[0] = CPU;
16906890d023SEric Saxe
16916890d023SEric Saxe cpu_seq = tbl;
16926890d023SEric Saxe }
16936890d023SEric Saxe
16946890d023SEric Saxe /*
16957c478bd9Sstevel@tonic-gate * Initialize the CPU lists for the first CPU.
16967c478bd9Sstevel@tonic-gate */
16977c478bd9Sstevel@tonic-gate void
cpu_list_init(cpu_t * cp)16987c478bd9Sstevel@tonic-gate cpu_list_init(cpu_t *cp)
16997c478bd9Sstevel@tonic-gate {
17007c478bd9Sstevel@tonic-gate cp->cpu_next = cp;
17017c478bd9Sstevel@tonic-gate cp->cpu_prev = cp;
17027c478bd9Sstevel@tonic-gate cpu_list = cp;
1703c97ad5cdSakolb clock_cpu_list = cp;
17047c478bd9Sstevel@tonic-gate
17057c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp;
17067c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp;
17077c478bd9Sstevel@tonic-gate cpu_active = cp;
17087c478bd9Sstevel@tonic-gate
17097c478bd9Sstevel@tonic-gate cp->cpu_seqid = 0;
17107c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_seqid_inuse, 0);
17116890d023SEric Saxe
17126890d023SEric Saxe /*
17136890d023SEric Saxe * Bootstrap cpu_seq using cpu_list
17146890d023SEric Saxe * The cpu_seq[] table will be dynamically allocated
17156890d023SEric Saxe * when kmem later becomes available (but before going MP)
17166890d023SEric Saxe */
17176890d023SEric Saxe cpu_seq = &cpu_list;
17186890d023SEric Saxe
17192af6eb52SMichael Corcoran cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
17207c478bd9Sstevel@tonic-gate cp_default.cp_cpulist = cp;
17217c478bd9Sstevel@tonic-gate cp_default.cp_ncpus = 1;
17227c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp;
17237c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp;
17247c478bd9Sstevel@tonic-gate cp->cpu_part = &cp_default;
17257c478bd9Sstevel@tonic-gate
17267c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_available, cp->cpu_id);
17277c478bd9Sstevel@tonic-gate }
17287c478bd9Sstevel@tonic-gate
17297c478bd9Sstevel@tonic-gate /*
17307c478bd9Sstevel@tonic-gate * Insert a CPU into the list of available CPUs.
17317c478bd9Sstevel@tonic-gate */
17327c478bd9Sstevel@tonic-gate void
cpu_add_unit(cpu_t * cp)17337c478bd9Sstevel@tonic-gate cpu_add_unit(cpu_t *cp)
17347c478bd9Sstevel@tonic-gate {
17357c478bd9Sstevel@tonic-gate int seqid;
17367c478bd9Sstevel@tonic-gate
17377c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
17387c478bd9Sstevel@tonic-gate ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
17397c478bd9Sstevel@tonic-gate
17407c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0);
17417c478bd9Sstevel@tonic-gate
17427c478bd9Sstevel@tonic-gate /*
17437c478bd9Sstevel@tonic-gate * Note: most users of the cpu_list will grab the
17447c478bd9Sstevel@tonic-gate * cpu_lock to insure that it isn't modified. However,
17457c478bd9Sstevel@tonic-gate * certain users can't or won't do that. To allow this
17467c478bd9Sstevel@tonic-gate * we pause the other cpus. Users who walk the list
17477c478bd9Sstevel@tonic-gate * without cpu_lock, must disable kernel preemption
17487c478bd9Sstevel@tonic-gate * to insure that the list isn't modified underneath
17497c478bd9Sstevel@tonic-gate * them. Also, any cached pointers to cpu structures
17507c478bd9Sstevel@tonic-gate * must be revalidated by checking to see if the
17517c478bd9Sstevel@tonic-gate * cpu_next pointer points to itself. This check must
17527c478bd9Sstevel@tonic-gate * be done with the cpu_lock held or kernel preemption
17537c478bd9Sstevel@tonic-gate * disabled. This check relies upon the fact that
17547c478bd9Sstevel@tonic-gate * old cpu structures are not free'ed or cleared after
17557c478bd9Sstevel@tonic-gate * then are removed from the cpu_list.
17567c478bd9Sstevel@tonic-gate *
17577c478bd9Sstevel@tonic-gate * Note that the clock code walks the cpu list dereferencing
17587c478bd9Sstevel@tonic-gate * the cpu_part pointer, so we need to initialize it before
17597c478bd9Sstevel@tonic-gate * adding the cpu to the list.
17607c478bd9Sstevel@tonic-gate */
17617c478bd9Sstevel@tonic-gate cp->cpu_part = &cp_default;
1762*bce835f2SJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
17637c478bd9Sstevel@tonic-gate cp->cpu_next = cpu_list;
17647c478bd9Sstevel@tonic-gate cp->cpu_prev = cpu_list->cpu_prev;
17657c478bd9Sstevel@tonic-gate cpu_list->cpu_prev->cpu_next = cp;
17667c478bd9Sstevel@tonic-gate cpu_list->cpu_prev = cp;
17677c478bd9Sstevel@tonic-gate start_cpus();
17687c478bd9Sstevel@tonic-gate
17697c478bd9Sstevel@tonic-gate for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
17707c478bd9Sstevel@tonic-gate continue;
17717c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_seqid_inuse, seqid);
17727c478bd9Sstevel@tonic-gate cp->cpu_seqid = seqid;
1773b52a336eSPavel Tatashin
1774b52a336eSPavel Tatashin if (seqid > max_cpu_seqid_ever)
1775b52a336eSPavel Tatashin max_cpu_seqid_ever = seqid;
1776b52a336eSPavel Tatashin
17777c478bd9Sstevel@tonic-gate ASSERT(ncpus < max_ncpus);
17787c478bd9Sstevel@tonic-gate ncpus++;
17792af6eb52SMichael Corcoran cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
17807c478bd9Sstevel@tonic-gate cpu[cp->cpu_id] = cp;
17817c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_available, cp->cpu_id);
17826890d023SEric Saxe cpu_seq[cp->cpu_seqid] = cp;
17837c478bd9Sstevel@tonic-gate
17847c478bd9Sstevel@tonic-gate /*
17857c478bd9Sstevel@tonic-gate * allocate a pause thread for this CPU.
17867c478bd9Sstevel@tonic-gate */
17877c478bd9Sstevel@tonic-gate cpu_pause_alloc(cp);
17887c478bd9Sstevel@tonic-gate
17897c478bd9Sstevel@tonic-gate /*
17907c478bd9Sstevel@tonic-gate * So that new CPUs won't have NULL prev_onln and next_onln pointers,
17917c478bd9Sstevel@tonic-gate * link them into a list of just that CPU.
17927c478bd9Sstevel@tonic-gate * This is so that disp_lowpri_cpu will work for thread_create in
17937c478bd9Sstevel@tonic-gate * pause_cpus() when called from the startup thread in a new CPU.
17947c478bd9Sstevel@tonic-gate */
17957c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp;
17967c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp;
17977c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cp);
17987c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp;
17997c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp;
18007c478bd9Sstevel@tonic-gate
18017c478bd9Sstevel@tonic-gate init_cpu_mstate(cp, CMS_SYSTEM);
18027c478bd9Sstevel@tonic-gate
18037c478bd9Sstevel@tonic-gate pool_pset_mod = gethrtime();
18047c478bd9Sstevel@tonic-gate }
18057c478bd9Sstevel@tonic-gate
18067c478bd9Sstevel@tonic-gate /*
18077c478bd9Sstevel@tonic-gate * Do the opposite of cpu_add_unit().
18087c478bd9Sstevel@tonic-gate */
18097c478bd9Sstevel@tonic-gate void
cpu_del_unit(int cpuid)18107c478bd9Sstevel@tonic-gate cpu_del_unit(int cpuid)
18117c478bd9Sstevel@tonic-gate {
18127c478bd9Sstevel@tonic-gate struct cpu *cp, *cpnext;
18137c478bd9Sstevel@tonic-gate
18147c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
18157c478bd9Sstevel@tonic-gate cp = cpu[cpuid];
18167c478bd9Sstevel@tonic-gate ASSERT(cp != NULL);
18177c478bd9Sstevel@tonic-gate
18187c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_onln == cp);
18197c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_onln == cp);
18207c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_part == cp);
18217c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_part == cp);
18227c478bd9Sstevel@tonic-gate
1823fb2f18f8Sesaxe /*
1824fb2f18f8Sesaxe * Tear down the CPU's physical ID cache, and update any
1825fb2f18f8Sesaxe * processor groups
1826fb2f18f8Sesaxe */
1827023e71deSHaik Aftandilian pg_cpu_fini(cp, NULL);
1828fb2f18f8Sesaxe pghw_physid_destroy(cp);
18297c478bd9Sstevel@tonic-gate
18307c478bd9Sstevel@tonic-gate /*
18317c478bd9Sstevel@tonic-gate * Destroy kstat stuff.
18327c478bd9Sstevel@tonic-gate */
18337c478bd9Sstevel@tonic-gate cpu_info_kstat_destroy(cp);
18347c478bd9Sstevel@tonic-gate term_cpu_mstate(cp);
18357c478bd9Sstevel@tonic-gate /*
18367c478bd9Sstevel@tonic-gate * Free up pause thread.
18377c478bd9Sstevel@tonic-gate */
18387c478bd9Sstevel@tonic-gate cpu_pause_free(cp);
18397c478bd9Sstevel@tonic-gate CPUSET_DEL(cpu_available, cp->cpu_id);
18407c478bd9Sstevel@tonic-gate cpu[cp->cpu_id] = NULL;
18416890d023SEric Saxe cpu_seq[cp->cpu_seqid] = NULL;
18426890d023SEric Saxe
18437c478bd9Sstevel@tonic-gate /*
18447c478bd9Sstevel@tonic-gate * The clock thread and mutex_vector_enter cannot hold the
18457c478bd9Sstevel@tonic-gate * cpu_lock while traversing the cpu list, therefore we pause
18467c478bd9Sstevel@tonic-gate * all other threads by pausing the other cpus. These, and any
18477c478bd9Sstevel@tonic-gate * other routines holding cpu pointers while possibly sleeping
18487c478bd9Sstevel@tonic-gate * must be sure to call kpreempt_disable before processing the
18497c478bd9Sstevel@tonic-gate * list and be sure to check that the cpu has not been deleted
18507c478bd9Sstevel@tonic-gate * after any sleeps (check cp->cpu_next != NULL). We guarantee
18517c478bd9Sstevel@tonic-gate * to keep the deleted cpu structure around.
18527c478bd9Sstevel@tonic-gate *
18537c478bd9Sstevel@tonic-gate * Note that this MUST be done AFTER cpu_available
18547c478bd9Sstevel@tonic-gate * has been updated so that we don't waste time
18557c478bd9Sstevel@tonic-gate * trying to pause the cpu we're trying to delete.
18567c478bd9Sstevel@tonic-gate */
1857*bce835f2SJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
18587c478bd9Sstevel@tonic-gate
18597c478bd9Sstevel@tonic-gate cpnext = cp->cpu_next;
18607c478bd9Sstevel@tonic-gate cp->cpu_prev->cpu_next = cp->cpu_next;
18617c478bd9Sstevel@tonic-gate cp->cpu_next->cpu_prev = cp->cpu_prev;
18627c478bd9Sstevel@tonic-gate if (cp == cpu_list)
18637c478bd9Sstevel@tonic-gate cpu_list = cpnext;
18647c478bd9Sstevel@tonic-gate
18657c478bd9Sstevel@tonic-gate /*
18667c478bd9Sstevel@tonic-gate * Signals that the cpu has been deleted (see above).
18677c478bd9Sstevel@tonic-gate */
18687c478bd9Sstevel@tonic-gate cp->cpu_next = NULL;
18697c478bd9Sstevel@tonic-gate cp->cpu_prev = NULL;
18707c478bd9Sstevel@tonic-gate
18717c478bd9Sstevel@tonic-gate start_cpus();
18727c478bd9Sstevel@tonic-gate
18737c478bd9Sstevel@tonic-gate CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
18747c478bd9Sstevel@tonic-gate ncpus--;
18757c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
18767c478bd9Sstevel@tonic-gate
18777c478bd9Sstevel@tonic-gate pool_pset_mod = gethrtime();
18787c478bd9Sstevel@tonic-gate }
18797c478bd9Sstevel@tonic-gate
18807c478bd9Sstevel@tonic-gate /*
18817c478bd9Sstevel@tonic-gate * Add a CPU to the list of active CPUs.
18827c478bd9Sstevel@tonic-gate * This routine must not get any locks, because other CPUs are paused.
18837c478bd9Sstevel@tonic-gate */
18847c478bd9Sstevel@tonic-gate static void
cpu_add_active_internal(cpu_t * cp)18857c478bd9Sstevel@tonic-gate cpu_add_active_internal(cpu_t *cp)
18867c478bd9Sstevel@tonic-gate {
18877c478bd9Sstevel@tonic-gate cpupart_t *pp = cp->cpu_part;
18887c478bd9Sstevel@tonic-gate
18897c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
18907c478bd9Sstevel@tonic-gate ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
18917c478bd9Sstevel@tonic-gate
18927c478bd9Sstevel@tonic-gate ncpus_online++;
18937c478bd9Sstevel@tonic-gate cpu_set_state(cp);
18947c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cpu_active;
18957c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
18967c478bd9Sstevel@tonic-gate cpu_active->cpu_prev_onln->cpu_next_onln = cp;
18977c478bd9Sstevel@tonic-gate cpu_active->cpu_prev_onln = cp;
18987c478bd9Sstevel@tonic-gate
18997c478bd9Sstevel@tonic-gate if (pp->cp_cpulist) {
19007c478bd9Sstevel@tonic-gate cp->cpu_next_part = pp->cp_cpulist;
19017c478bd9Sstevel@tonic-gate cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part;
19027c478bd9Sstevel@tonic-gate pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp;
19037c478bd9Sstevel@tonic-gate pp->cp_cpulist->cpu_prev_part = cp;
19047c478bd9Sstevel@tonic-gate } else {
19057c478bd9Sstevel@tonic-gate ASSERT(pp->cp_ncpus == 0);
19067c478bd9Sstevel@tonic-gate pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
19077c478bd9Sstevel@tonic-gate }
19087c478bd9Sstevel@tonic-gate pp->cp_ncpus++;
19097c478bd9Sstevel@tonic-gate if (pp->cp_ncpus == 1) {
19107c478bd9Sstevel@tonic-gate cp_numparts_nonempty++;
19117c478bd9Sstevel@tonic-gate ASSERT(cp_numparts_nonempty != 0);
19127c478bd9Sstevel@tonic-gate }
19137c478bd9Sstevel@tonic-gate
1914fb2f18f8Sesaxe pg_cpu_active(cp);
19157c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
19167c478bd9Sstevel@tonic-gate
19177c478bd9Sstevel@tonic-gate bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
19187c478bd9Sstevel@tonic-gate }
19197c478bd9Sstevel@tonic-gate
19207c478bd9Sstevel@tonic-gate /*
19217c478bd9Sstevel@tonic-gate * Add a CPU to the list of active CPUs.
19227c478bd9Sstevel@tonic-gate * This is called from machine-dependent layers when a new CPU is started.
19237c478bd9Sstevel@tonic-gate */
19247c478bd9Sstevel@tonic-gate void
cpu_add_active(cpu_t * cp)19257c478bd9Sstevel@tonic-gate cpu_add_active(cpu_t *cp)
19267c478bd9Sstevel@tonic-gate {
1927fb2f18f8Sesaxe pg_cpupart_in(cp, cp->cpu_part);
1928fb2f18f8Sesaxe
1929*bce835f2SJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
19307c478bd9Sstevel@tonic-gate cpu_add_active_internal(cp);
19317c478bd9Sstevel@tonic-gate start_cpus();
1932fb2f18f8Sesaxe
19337c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp);
19347c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp);
19357c478bd9Sstevel@tonic-gate lgrp_kstat_create(cp);
19367c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_INIT);
19377c478bd9Sstevel@tonic-gate }
19387c478bd9Sstevel@tonic-gate
19397c478bd9Sstevel@tonic-gate
19407c478bd9Sstevel@tonic-gate /*
19417c478bd9Sstevel@tonic-gate * Remove a CPU from the list of active CPUs.
19427c478bd9Sstevel@tonic-gate * This routine must not get any locks, because other CPUs are paused.
19437c478bd9Sstevel@tonic-gate */
19447c478bd9Sstevel@tonic-gate /* ARGSUSED */
19457c478bd9Sstevel@tonic-gate static void
cpu_remove_active(cpu_t * cp)19467c478bd9Sstevel@tonic-gate cpu_remove_active(cpu_t *cp)
19477c478bd9Sstevel@tonic-gate {
19487c478bd9Sstevel@tonic-gate cpupart_t *pp = cp->cpu_part;
19497c478bd9Sstevel@tonic-gate
19507c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
19517c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_onln != cp); /* not the last one */
19527c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_onln != cp); /* not the last one */
19537c478bd9Sstevel@tonic-gate
1954fb2f18f8Sesaxe pg_cpu_inactive(cp);
19557c478bd9Sstevel@tonic-gate
19567c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0);
19577c478bd9Sstevel@tonic-gate
19582850d85bSmv143129 if (cp == clock_cpu_list)
19592850d85bSmv143129 clock_cpu_list = cp->cpu_next_onln;
19602850d85bSmv143129
19617c478bd9Sstevel@tonic-gate cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln;
19627c478bd9Sstevel@tonic-gate cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln;
19637c478bd9Sstevel@tonic-gate if (cpu_active == cp) {
19647c478bd9Sstevel@tonic-gate cpu_active = cp->cpu_next_onln;
19657c478bd9Sstevel@tonic-gate }
19667c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp;
19677c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp;
19687c478bd9Sstevel@tonic-gate
19697c478bd9Sstevel@tonic-gate cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
19707c478bd9Sstevel@tonic-gate cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
19717c478bd9Sstevel@tonic-gate if (pp->cp_cpulist == cp) {
19727c478bd9Sstevel@tonic-gate pp->cp_cpulist = cp->cpu_next_part;
19737c478bd9Sstevel@tonic-gate ASSERT(pp->cp_cpulist != cp);
19747c478bd9Sstevel@tonic-gate }
19757c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp;
19767c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp;
19777c478bd9Sstevel@tonic-gate pp->cp_ncpus--;
19787c478bd9Sstevel@tonic-gate if (pp->cp_ncpus == 0) {
19797c478bd9Sstevel@tonic-gate cp_numparts_nonempty--;
19807c478bd9Sstevel@tonic-gate ASSERT(cp_numparts_nonempty != 0);
19817c478bd9Sstevel@tonic-gate }
19827c478bd9Sstevel@tonic-gate }
19837c478bd9Sstevel@tonic-gate
19847c478bd9Sstevel@tonic-gate /*
19857c478bd9Sstevel@tonic-gate * Routine used to setup a newly inserted CPU in preparation for starting
19867c478bd9Sstevel@tonic-gate * it running code.
19877c478bd9Sstevel@tonic-gate */
19887c478bd9Sstevel@tonic-gate int
cpu_configure(int cpuid)19897c478bd9Sstevel@tonic-gate cpu_configure(int cpuid)
19907c478bd9Sstevel@tonic-gate {
19917c478bd9Sstevel@tonic-gate int retval = 0;
19927c478bd9Sstevel@tonic-gate
19937c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
19947c478bd9Sstevel@tonic-gate
19957c478bd9Sstevel@tonic-gate /*
19967c478bd9Sstevel@tonic-gate * Some structures are statically allocated based upon
19977c478bd9Sstevel@tonic-gate * the maximum number of cpus the system supports. Do not
19987c478bd9Sstevel@tonic-gate * try to add anything beyond this limit.
19997c478bd9Sstevel@tonic-gate */
20007c478bd9Sstevel@tonic-gate if (cpuid < 0 || cpuid >= NCPU) {
20017c478bd9Sstevel@tonic-gate return (EINVAL);
20027c478bd9Sstevel@tonic-gate }
20037c478bd9Sstevel@tonic-gate
20047c478bd9Sstevel@tonic-gate if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) {
20057c478bd9Sstevel@tonic-gate return (EALREADY);
20067c478bd9Sstevel@tonic-gate }
20077c478bd9Sstevel@tonic-gate
20087c478bd9Sstevel@tonic-gate if ((retval = mp_cpu_configure(cpuid)) != 0) {
20097c478bd9Sstevel@tonic-gate return (retval);
20107c478bd9Sstevel@tonic-gate }
20117c478bd9Sstevel@tonic-gate
20127c478bd9Sstevel@tonic-gate cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF;
20137c478bd9Sstevel@tonic-gate cpu_set_state(cpu[cpuid]);
20147c478bd9Sstevel@tonic-gate retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG);
20157c478bd9Sstevel@tonic-gate if (retval != 0)
20167c478bd9Sstevel@tonic-gate (void) mp_cpu_unconfigure(cpuid);
20177c478bd9Sstevel@tonic-gate
20187c478bd9Sstevel@tonic-gate return (retval);
20197c478bd9Sstevel@tonic-gate }
20207c478bd9Sstevel@tonic-gate
20217c478bd9Sstevel@tonic-gate /*
20227c478bd9Sstevel@tonic-gate * Routine used to cleanup a CPU that has been powered off. This will
20237c478bd9Sstevel@tonic-gate * destroy all per-cpu information related to this cpu.
20247c478bd9Sstevel@tonic-gate */
20257c478bd9Sstevel@tonic-gate int
cpu_unconfigure(int cpuid)20267c478bd9Sstevel@tonic-gate cpu_unconfigure(int cpuid)
20277c478bd9Sstevel@tonic-gate {
20287c478bd9Sstevel@tonic-gate int error;
20297c478bd9Sstevel@tonic-gate
20307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
20317c478bd9Sstevel@tonic-gate
20327c478bd9Sstevel@tonic-gate if (cpu[cpuid] == NULL) {
20337c478bd9Sstevel@tonic-gate return (ENODEV);
20347c478bd9Sstevel@tonic-gate }
20357c478bd9Sstevel@tonic-gate
20367c478bd9Sstevel@tonic-gate if (cpu[cpuid]->cpu_flags == 0) {
20377c478bd9Sstevel@tonic-gate return (EALREADY);
20387c478bd9Sstevel@tonic-gate }
20397c478bd9Sstevel@tonic-gate
20407c478bd9Sstevel@tonic-gate if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) {
20417c478bd9Sstevel@tonic-gate return (EBUSY);
20427c478bd9Sstevel@tonic-gate }
20437c478bd9Sstevel@tonic-gate
20447c478bd9Sstevel@tonic-gate if (cpu[cpuid]->cpu_props != NULL) {
20457c478bd9Sstevel@tonic-gate (void) nvlist_free(cpu[cpuid]->cpu_props);
20467c478bd9Sstevel@tonic-gate cpu[cpuid]->cpu_props = NULL;
20477c478bd9Sstevel@tonic-gate }
20487c478bd9Sstevel@tonic-gate
20497c478bd9Sstevel@tonic-gate error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG);
20507c478bd9Sstevel@tonic-gate
20517c478bd9Sstevel@tonic-gate if (error != 0)
20527c478bd9Sstevel@tonic-gate return (error);
20537c478bd9Sstevel@tonic-gate
20547c478bd9Sstevel@tonic-gate return (mp_cpu_unconfigure(cpuid));
20557c478bd9Sstevel@tonic-gate }
20567c478bd9Sstevel@tonic-gate
20577c478bd9Sstevel@tonic-gate /*
20587c478bd9Sstevel@tonic-gate * Routines for registering and de-registering cpu_setup callback functions.
20597c478bd9Sstevel@tonic-gate *
20607c478bd9Sstevel@tonic-gate * Caller's context
20617c478bd9Sstevel@tonic-gate * These routines must not be called from a driver's attach(9E) or
20627c478bd9Sstevel@tonic-gate * detach(9E) entry point.
20637c478bd9Sstevel@tonic-gate *
20647c478bd9Sstevel@tonic-gate * NOTE: CPU callbacks should not block. They are called with cpu_lock held.
20657c478bd9Sstevel@tonic-gate */
20667c478bd9Sstevel@tonic-gate
20677c478bd9Sstevel@tonic-gate /*
20687c478bd9Sstevel@tonic-gate * Ideally, these would be dynamically allocated and put into a linked
20697c478bd9Sstevel@tonic-gate * list; however that is not feasible because the registration routine
20707c478bd9Sstevel@tonic-gate * has to be available before the kmem allocator is working (in fact,
20717c478bd9Sstevel@tonic-gate * it is called by the kmem allocator init code). In any case, there
20727c478bd9Sstevel@tonic-gate * are quite a few extra entries for future users.
20737c478bd9Sstevel@tonic-gate */
20741aa15ad6Sjkennedy #define NCPU_SETUPS 20
20757c478bd9Sstevel@tonic-gate
20767c478bd9Sstevel@tonic-gate struct cpu_setup {
20777c478bd9Sstevel@tonic-gate cpu_setup_func_t *func;
20787c478bd9Sstevel@tonic-gate void *arg;
20797c478bd9Sstevel@tonic-gate } cpu_setups[NCPU_SETUPS];
20807c478bd9Sstevel@tonic-gate
20817c478bd9Sstevel@tonic-gate void
register_cpu_setup_func(cpu_setup_func_t * func,void * arg)20827c478bd9Sstevel@tonic-gate register_cpu_setup_func(cpu_setup_func_t *func, void *arg)
20837c478bd9Sstevel@tonic-gate {
20847c478bd9Sstevel@tonic-gate int i;
20857c478bd9Sstevel@tonic-gate
20867c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
20877c478bd9Sstevel@tonic-gate
20887c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++)
20897c478bd9Sstevel@tonic-gate if (cpu_setups[i].func == NULL)
20907c478bd9Sstevel@tonic-gate break;
20917c478bd9Sstevel@tonic-gate if (i >= NCPU_SETUPS)
20927c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries");
20937c478bd9Sstevel@tonic-gate
20947c478bd9Sstevel@tonic-gate cpu_setups[i].func = func;
20957c478bd9Sstevel@tonic-gate cpu_setups[i].arg = arg;
20967c478bd9Sstevel@tonic-gate }
20977c478bd9Sstevel@tonic-gate
20987c478bd9Sstevel@tonic-gate void
unregister_cpu_setup_func(cpu_setup_func_t * func,void * arg)20997c478bd9Sstevel@tonic-gate unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg)
21007c478bd9Sstevel@tonic-gate {
21017c478bd9Sstevel@tonic-gate int i;
21027c478bd9Sstevel@tonic-gate
21037c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
21047c478bd9Sstevel@tonic-gate
21057c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++)
21067c478bd9Sstevel@tonic-gate if ((cpu_setups[i].func == func) &&
21077c478bd9Sstevel@tonic-gate (cpu_setups[i].arg == arg))
21087c478bd9Sstevel@tonic-gate break;
21097c478bd9Sstevel@tonic-gate if (i >= NCPU_SETUPS)
21107c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Could not find cpu_setup callback to "
21117c478bd9Sstevel@tonic-gate "deregister");
21127c478bd9Sstevel@tonic-gate
21137c478bd9Sstevel@tonic-gate cpu_setups[i].func = NULL;
21147c478bd9Sstevel@tonic-gate cpu_setups[i].arg = 0;
21157c478bd9Sstevel@tonic-gate }
21167c478bd9Sstevel@tonic-gate
21177c478bd9Sstevel@tonic-gate /*
21187c478bd9Sstevel@tonic-gate * Call any state change hooks for this CPU, ignore any errors.
21197c478bd9Sstevel@tonic-gate */
21207c478bd9Sstevel@tonic-gate void
cpu_state_change_notify(int id,cpu_setup_t what)21217c478bd9Sstevel@tonic-gate cpu_state_change_notify(int id, cpu_setup_t what)
21227c478bd9Sstevel@tonic-gate {
21237c478bd9Sstevel@tonic-gate int i;
21247c478bd9Sstevel@tonic-gate
21257c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
21267c478bd9Sstevel@tonic-gate
21277c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) {
21287c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL) {
21297c478bd9Sstevel@tonic-gate cpu_setups[i].func(what, id, cpu_setups[i].arg);
21307c478bd9Sstevel@tonic-gate }
21317c478bd9Sstevel@tonic-gate }
21327c478bd9Sstevel@tonic-gate }
21337c478bd9Sstevel@tonic-gate
21347c478bd9Sstevel@tonic-gate /*
21357c478bd9Sstevel@tonic-gate * Call any state change hooks for this CPU, undo it if error found.
21367c478bd9Sstevel@tonic-gate */
21377c478bd9Sstevel@tonic-gate static int
cpu_state_change_hooks(int id,cpu_setup_t what,cpu_setup_t undo)21387c478bd9Sstevel@tonic-gate cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo)
21397c478bd9Sstevel@tonic-gate {
21407c478bd9Sstevel@tonic-gate int i;
21417c478bd9Sstevel@tonic-gate int retval = 0;
21427c478bd9Sstevel@tonic-gate
21437c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
21447c478bd9Sstevel@tonic-gate
21457c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) {
21467c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL) {
21477c478bd9Sstevel@tonic-gate retval = cpu_setups[i].func(what, id,
21487c478bd9Sstevel@tonic-gate cpu_setups[i].arg);
21497c478bd9Sstevel@tonic-gate if (retval) {
21507c478bd9Sstevel@tonic-gate for (i--; i >= 0; i--) {
21517c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL)
21527c478bd9Sstevel@tonic-gate cpu_setups[i].func(undo,
21537c478bd9Sstevel@tonic-gate id, cpu_setups[i].arg);
21547c478bd9Sstevel@tonic-gate }
21557c478bd9Sstevel@tonic-gate break;
21567c478bd9Sstevel@tonic-gate }
21577c478bd9Sstevel@tonic-gate }
21587c478bd9Sstevel@tonic-gate }
21597c478bd9Sstevel@tonic-gate return (retval);
21607c478bd9Sstevel@tonic-gate }
21617c478bd9Sstevel@tonic-gate
21627c478bd9Sstevel@tonic-gate /*
21637c478bd9Sstevel@tonic-gate * Export information about this CPU via the kstat mechanism.
21647c478bd9Sstevel@tonic-gate */
21657c478bd9Sstevel@tonic-gate static struct {
21667c478bd9Sstevel@tonic-gate kstat_named_t ci_state;
21677c478bd9Sstevel@tonic-gate kstat_named_t ci_state_begin;
21687c478bd9Sstevel@tonic-gate kstat_named_t ci_cpu_type;
21697c478bd9Sstevel@tonic-gate kstat_named_t ci_fpu_type;
21707c478bd9Sstevel@tonic-gate kstat_named_t ci_clock_MHz;
21717c478bd9Sstevel@tonic-gate kstat_named_t ci_chip_id;
21727c478bd9Sstevel@tonic-gate kstat_named_t ci_implementation;
21737aec1d6eScindi kstat_named_t ci_brandstr;
21747aec1d6eScindi kstat_named_t ci_core_id;
21755cff7825Smh27603 kstat_named_t ci_curr_clock_Hz;
21765cff7825Smh27603 kstat_named_t ci_supp_freq_Hz;
2177b885580bSAlexander Kolbasov kstat_named_t ci_pg_id;
21787aec1d6eScindi #if defined(__sparcv9)
21797c478bd9Sstevel@tonic-gate kstat_named_t ci_device_ID;
21807c478bd9Sstevel@tonic-gate kstat_named_t ci_cpu_fru;
21817c478bd9Sstevel@tonic-gate #endif
2182ae115bc7Smrj #if defined(__x86)
21837aec1d6eScindi kstat_named_t ci_vendorstr;
21847aec1d6eScindi kstat_named_t ci_family;
21857aec1d6eScindi kstat_named_t ci_model;
21867aec1d6eScindi kstat_named_t ci_step;
21877aec1d6eScindi kstat_named_t ci_clogid;
218810569901Sgavinm kstat_named_t ci_pkg_core_id;
218920c794b3Sgavinm kstat_named_t ci_ncpuperchip;
219020c794b3Sgavinm kstat_named_t ci_ncoreperchip;
21910e751525SEric Saxe kstat_named_t ci_max_cstates;
21920e751525SEric Saxe kstat_named_t ci_curr_cstate;
2193b885580bSAlexander Kolbasov kstat_named_t ci_cacheid;
219489e921d5SKuriakose Kuruvilla kstat_named_t ci_sktstr;
21957aec1d6eScindi #endif
21967c478bd9Sstevel@tonic-gate } cpu_info_template = {
21977c478bd9Sstevel@tonic-gate { "state", KSTAT_DATA_CHAR },
21987c478bd9Sstevel@tonic-gate { "state_begin", KSTAT_DATA_LONG },
21997c478bd9Sstevel@tonic-gate { "cpu_type", KSTAT_DATA_CHAR },
22007c478bd9Sstevel@tonic-gate { "fpu_type", KSTAT_DATA_CHAR },
22017c478bd9Sstevel@tonic-gate { "clock_MHz", KSTAT_DATA_LONG },
22027c478bd9Sstevel@tonic-gate { "chip_id", KSTAT_DATA_LONG },
22037c478bd9Sstevel@tonic-gate { "implementation", KSTAT_DATA_STRING },
22047aec1d6eScindi { "brand", KSTAT_DATA_STRING },
22057aec1d6eScindi { "core_id", KSTAT_DATA_LONG },
22065cff7825Smh27603 { "current_clock_Hz", KSTAT_DATA_UINT64 },
22075cff7825Smh27603 { "supported_frequencies_Hz", KSTAT_DATA_STRING },
2208b885580bSAlexander Kolbasov { "pg_id", KSTAT_DATA_LONG },
22097aec1d6eScindi #if defined(__sparcv9)
22107c478bd9Sstevel@tonic-gate { "device_ID", KSTAT_DATA_UINT64 },
22117c478bd9Sstevel@tonic-gate { "cpu_fru", KSTAT_DATA_STRING },
22127c478bd9Sstevel@tonic-gate #endif
2213ae115bc7Smrj #if defined(__x86)
22147aec1d6eScindi { "vendor_id", KSTAT_DATA_STRING },
22157aec1d6eScindi { "family", KSTAT_DATA_INT32 },
22167aec1d6eScindi { "model", KSTAT_DATA_INT32 },
22177aec1d6eScindi { "stepping", KSTAT_DATA_INT32 },
22187aec1d6eScindi { "clog_id", KSTAT_DATA_INT32 },
221910569901Sgavinm { "pkg_core_id", KSTAT_DATA_LONG },
222020c794b3Sgavinm { "ncpu_per_chip", KSTAT_DATA_INT32 },
222120c794b3Sgavinm { "ncore_per_chip", KSTAT_DATA_INT32 },
22220e751525SEric Saxe { "supported_max_cstates", KSTAT_DATA_INT32 },
22230e751525SEric Saxe { "current_cstate", KSTAT_DATA_INT32 },
2224b885580bSAlexander Kolbasov { "cache_id", KSTAT_DATA_INT32 },
222589e921d5SKuriakose Kuruvilla { "socket_type", KSTAT_DATA_STRING },
22267aec1d6eScindi #endif
22277c478bd9Sstevel@tonic-gate };
22287c478bd9Sstevel@tonic-gate
22297c478bd9Sstevel@tonic-gate static kmutex_t cpu_info_template_lock;
22307c478bd9Sstevel@tonic-gate
22317c478bd9Sstevel@tonic-gate static int
cpu_info_kstat_update(kstat_t * ksp,int rw)22327c478bd9Sstevel@tonic-gate cpu_info_kstat_update(kstat_t *ksp, int rw)
22337c478bd9Sstevel@tonic-gate {
22347c478bd9Sstevel@tonic-gate cpu_t *cp = ksp->ks_private;
22357c478bd9Sstevel@tonic-gate const char *pi_state;
22367c478bd9Sstevel@tonic-gate
22377c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE)
22387c478bd9Sstevel@tonic-gate return (EACCES);
22397c478bd9Sstevel@tonic-gate
22407c208b9dSSurya Prakki #if defined(__x86)
22417c208b9dSSurya Prakki /* Is the cpu still initialising itself? */
22427c208b9dSSurya Prakki if (cpuid_checkpass(cp, 1) == 0)
22437c208b9dSSurya Prakki return (ENXIO);
22447c208b9dSSurya Prakki #endif
22457c478bd9Sstevel@tonic-gate switch (cp->cpu_type_info.pi_state) {
22467c478bd9Sstevel@tonic-gate case P_ONLINE:
22477c478bd9Sstevel@tonic-gate pi_state = PS_ONLINE;
22487c478bd9Sstevel@tonic-gate break;
22497c478bd9Sstevel@tonic-gate case P_POWEROFF:
22507c478bd9Sstevel@tonic-gate pi_state = PS_POWEROFF;
22517c478bd9Sstevel@tonic-gate break;
22527c478bd9Sstevel@tonic-gate case P_NOINTR:
22537c478bd9Sstevel@tonic-gate pi_state = PS_NOINTR;
22547c478bd9Sstevel@tonic-gate break;
22557c478bd9Sstevel@tonic-gate case P_FAULTED:
22567c478bd9Sstevel@tonic-gate pi_state = PS_FAULTED;
22577c478bd9Sstevel@tonic-gate break;
22587c478bd9Sstevel@tonic-gate case P_SPARE:
22597c478bd9Sstevel@tonic-gate pi_state = PS_SPARE;
22607c478bd9Sstevel@tonic-gate break;
22617c478bd9Sstevel@tonic-gate case P_OFFLINE:
22627c478bd9Sstevel@tonic-gate pi_state = PS_OFFLINE;
22637c478bd9Sstevel@tonic-gate break;
22647c478bd9Sstevel@tonic-gate default:
22657c478bd9Sstevel@tonic-gate pi_state = "unknown";
22667c478bd9Sstevel@tonic-gate }
22677c478bd9Sstevel@tonic-gate (void) strcpy(cpu_info_template.ci_state.value.c, pi_state);
22687c478bd9Sstevel@tonic-gate cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin;
22697c478bd9Sstevel@tonic-gate (void) strncpy(cpu_info_template.ci_cpu_type.value.c,
22707c478bd9Sstevel@tonic-gate cp->cpu_type_info.pi_processor_type, 15);
22717c478bd9Sstevel@tonic-gate (void) strncpy(cpu_info_template.ci_fpu_type.value.c,
22727c478bd9Sstevel@tonic-gate cp->cpu_type_info.pi_fputypes, 15);
22737c478bd9Sstevel@tonic-gate cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock;
2274fb2f18f8Sesaxe cpu_info_template.ci_chip_id.value.l =
2275fb2f18f8Sesaxe pg_plat_hw_instance_id(cp, PGHW_CHIP);
22767c478bd9Sstevel@tonic-gate kstat_named_setstr(&cpu_info_template.ci_implementation,
22777c478bd9Sstevel@tonic-gate cp->cpu_idstr);
22787aec1d6eScindi kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr);
2279fb2f18f8Sesaxe cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp);
22805cff7825Smh27603 cpu_info_template.ci_curr_clock_Hz.value.ui64 =
2281cf74e62bSmh27603 cp->cpu_curr_clock;
2282b885580bSAlexander Kolbasov cpu_info_template.ci_pg_id.value.l =
2283b885580bSAlexander Kolbasov cp->cpu_pg && cp->cpu_pg->cmt_lineage ?
2284b885580bSAlexander Kolbasov cp->cpu_pg->cmt_lineage->pg_id : -1;
22855cff7825Smh27603 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz,
2286cf74e62bSmh27603 cp->cpu_supp_freqs);
22877aec1d6eScindi #if defined(__sparcv9)
22887c478bd9Sstevel@tonic-gate cpu_info_template.ci_device_ID.value.ui64 =
22897c478bd9Sstevel@tonic-gate cpunodes[cp->cpu_id].device_id;
22907c478bd9Sstevel@tonic-gate kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp));
22917c478bd9Sstevel@tonic-gate #endif
2292ae115bc7Smrj #if defined(__x86)
22937aec1d6eScindi kstat_named_setstr(&cpu_info_template.ci_vendorstr,
22947aec1d6eScindi cpuid_getvendorstr(cp));
22957aec1d6eScindi cpu_info_template.ci_family.value.l = cpuid_getfamily(cp);
22967aec1d6eScindi cpu_info_template.ci_model.value.l = cpuid_getmodel(cp);
22977aec1d6eScindi cpu_info_template.ci_step.value.l = cpuid_getstep(cp);
2298fb2f18f8Sesaxe cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp);
229920c794b3Sgavinm cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp);
230020c794b3Sgavinm cpu_info_template.ci_ncoreperchip.value.l =
230120c794b3Sgavinm cpuid_get_ncore_per_chip(cp);
230210569901Sgavinm cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp);
23030e751525SEric Saxe cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates;
2304fb2caebeSRandy Fishel cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp);
2305b885580bSAlexander Kolbasov cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp);
230689e921d5SKuriakose Kuruvilla kstat_named_setstr(&cpu_info_template.ci_sktstr,
230789e921d5SKuriakose Kuruvilla cpuid_getsocketstr(cp));
23087aec1d6eScindi #endif
23097aec1d6eScindi
23107c478bd9Sstevel@tonic-gate return (0);
23117c478bd9Sstevel@tonic-gate }
23127c478bd9Sstevel@tonic-gate
23137c478bd9Sstevel@tonic-gate static void
cpu_info_kstat_create(cpu_t * cp)23147c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cpu_t *cp)
23157c478bd9Sstevel@tonic-gate {
23167c478bd9Sstevel@tonic-gate zoneid_t zoneid;
23177c478bd9Sstevel@tonic-gate
23187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
23197c478bd9Sstevel@tonic-gate
23207c478bd9Sstevel@tonic-gate if (pool_pset_enabled())
23217c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID;
23227c478bd9Sstevel@tonic-gate else
23237c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES;
23247c478bd9Sstevel@tonic-gate if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id,
23257c478bd9Sstevel@tonic-gate NULL, "misc", KSTAT_TYPE_NAMED,
23267c478bd9Sstevel@tonic-gate sizeof (cpu_info_template) / sizeof (kstat_named_t),
23277c208b9dSSurya Prakki KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) {
23287c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN;
23297aec1d6eScindi #if defined(__sparcv9)
23307c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data_size +=
23317c478bd9Sstevel@tonic-gate strlen(cpu_fru_fmri(cp)) + 1;
23327c478bd9Sstevel@tonic-gate #endif
2333ae115bc7Smrj #if defined(__x86)
23347aec1d6eScindi cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN;
23357aec1d6eScindi #endif
23364e93b15cSmh27603 if (cp->cpu_supp_freqs != NULL)
23374e93b15cSmh27603 cp->cpu_info_kstat->ks_data_size +=
23384e93b15cSmh27603 strlen(cp->cpu_supp_freqs) + 1;
23397c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock;
23407c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data = &cpu_info_template;
23417c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_private = cp;
23427c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_update = cpu_info_kstat_update;
23437c478bd9Sstevel@tonic-gate kstat_install(cp->cpu_info_kstat);
23447c478bd9Sstevel@tonic-gate }
23457c478bd9Sstevel@tonic-gate }
23467c478bd9Sstevel@tonic-gate
23477c478bd9Sstevel@tonic-gate static void
cpu_info_kstat_destroy(cpu_t * cp)23487c478bd9Sstevel@tonic-gate cpu_info_kstat_destroy(cpu_t *cp)
23497c478bd9Sstevel@tonic-gate {
23507c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
23517c478bd9Sstevel@tonic-gate
23527c478bd9Sstevel@tonic-gate kstat_delete(cp->cpu_info_kstat);
23537c478bd9Sstevel@tonic-gate cp->cpu_info_kstat = NULL;
23547c478bd9Sstevel@tonic-gate }
23557c478bd9Sstevel@tonic-gate
23567c478bd9Sstevel@tonic-gate /*
23577c478bd9Sstevel@tonic-gate * Create and install kstats for the boot CPU.
23587c478bd9Sstevel@tonic-gate */
23597c478bd9Sstevel@tonic-gate void
cpu_kstat_init(cpu_t * cp)23607c478bd9Sstevel@tonic-gate cpu_kstat_init(cpu_t *cp)
23617c478bd9Sstevel@tonic-gate {
23627c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
23637c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cp);
23647c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp);
23657c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp);
23667c478bd9Sstevel@tonic-gate cpu_set_state(cp);
23677c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
23687c478bd9Sstevel@tonic-gate }
23697c478bd9Sstevel@tonic-gate
23707c478bd9Sstevel@tonic-gate /*
23717c478bd9Sstevel@tonic-gate * Make visible to the zone that subset of the cpu information that would be
23727c478bd9Sstevel@tonic-gate * initialized when a cpu is configured (but still offline).
23737c478bd9Sstevel@tonic-gate */
23747c478bd9Sstevel@tonic-gate void
cpu_visibility_configure(cpu_t * cp,zone_t * zone)23757c478bd9Sstevel@tonic-gate cpu_visibility_configure(cpu_t *cp, zone_t *zone)
23767c478bd9Sstevel@tonic-gate {
23777c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
23787c478bd9Sstevel@tonic-gate
23797c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
23807c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled());
23817c478bd9Sstevel@tonic-gate ASSERT(cp != NULL);
23827c478bd9Sstevel@tonic-gate
23837c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
23847c478bd9Sstevel@tonic-gate zone->zone_ncpus++;
23857c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus <= ncpus);
23867c478bd9Sstevel@tonic-gate }
23877c478bd9Sstevel@tonic-gate if (cp->cpu_info_kstat != NULL)
23887c478bd9Sstevel@tonic-gate kstat_zone_add(cp->cpu_info_kstat, zoneid);
23897c478bd9Sstevel@tonic-gate }
23907c478bd9Sstevel@tonic-gate
23917c478bd9Sstevel@tonic-gate /*
23927c478bd9Sstevel@tonic-gate * Make visible to the zone that subset of the cpu information that would be
23937c478bd9Sstevel@tonic-gate * initialized when a previously configured cpu is onlined.
23947c478bd9Sstevel@tonic-gate */
23957c478bd9Sstevel@tonic-gate void
cpu_visibility_online(cpu_t * cp,zone_t * zone)23967c478bd9Sstevel@tonic-gate cpu_visibility_online(cpu_t *cp, zone_t *zone)
23977c478bd9Sstevel@tonic-gate {
23987c478bd9Sstevel@tonic-gate kstat_t *ksp;
23997c478bd9Sstevel@tonic-gate char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
24007c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
24017c478bd9Sstevel@tonic-gate processorid_t cpun;
24027c478bd9Sstevel@tonic-gate
24037c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
24047c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled());
24057c478bd9Sstevel@tonic-gate ASSERT(cp != NULL);
24067c478bd9Sstevel@tonic-gate ASSERT(cpu_is_active(cp));
24077c478bd9Sstevel@tonic-gate
24087c478bd9Sstevel@tonic-gate cpun = cp->cpu_id;
24097c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
24107c478bd9Sstevel@tonic-gate zone->zone_ncpus_online++;
24117c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus_online <= ncpus_online);
24127c478bd9Sstevel@tonic-gate }
24137c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
24147c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
24157c478bd9Sstevel@tonic-gate != NULL) {
24167c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid);
24177c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24187c478bd9Sstevel@tonic-gate }
24197c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
24207c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid);
24217c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24227c478bd9Sstevel@tonic-gate }
24237c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
24247c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid);
24257c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24267c478bd9Sstevel@tonic-gate }
24277c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
24287c478bd9Sstevel@tonic-gate NULL) {
24297c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid);
24307c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24317c478bd9Sstevel@tonic-gate }
24327c478bd9Sstevel@tonic-gate }
24337c478bd9Sstevel@tonic-gate
24347c478bd9Sstevel@tonic-gate /*
24357c478bd9Sstevel@tonic-gate * Update relevant kstats such that cpu is now visible to processes
24367c478bd9Sstevel@tonic-gate * executing in specified zone.
24377c478bd9Sstevel@tonic-gate */
24387c478bd9Sstevel@tonic-gate void
cpu_visibility_add(cpu_t * cp,zone_t * zone)24397c478bd9Sstevel@tonic-gate cpu_visibility_add(cpu_t *cp, zone_t *zone)
24407c478bd9Sstevel@tonic-gate {
24417c478bd9Sstevel@tonic-gate cpu_visibility_configure(cp, zone);
24427c478bd9Sstevel@tonic-gate if (cpu_is_active(cp))
24437c478bd9Sstevel@tonic-gate cpu_visibility_online(cp, zone);
24447c478bd9Sstevel@tonic-gate }
24457c478bd9Sstevel@tonic-gate
24467c478bd9Sstevel@tonic-gate /*
24477c478bd9Sstevel@tonic-gate * Make invisible to the zone that subset of the cpu information that would be
24487c478bd9Sstevel@tonic-gate * torn down when a previously offlined cpu is unconfigured.
24497c478bd9Sstevel@tonic-gate */
24507c478bd9Sstevel@tonic-gate void
cpu_visibility_unconfigure(cpu_t * cp,zone_t * zone)24517c478bd9Sstevel@tonic-gate cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone)
24527c478bd9Sstevel@tonic-gate {
24537c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
24547c478bd9Sstevel@tonic-gate
24557c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
24567c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled());
24577c478bd9Sstevel@tonic-gate ASSERT(cp != NULL);
24587c478bd9Sstevel@tonic-gate
24597c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
24607c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus != 0);
24617c478bd9Sstevel@tonic-gate zone->zone_ncpus--;
24627c478bd9Sstevel@tonic-gate }
24637c478bd9Sstevel@tonic-gate if (cp->cpu_info_kstat)
24647c478bd9Sstevel@tonic-gate kstat_zone_remove(cp->cpu_info_kstat, zoneid);
24657c478bd9Sstevel@tonic-gate }
24667c478bd9Sstevel@tonic-gate
24677c478bd9Sstevel@tonic-gate /*
24687c478bd9Sstevel@tonic-gate * Make invisible to the zone that subset of the cpu information that would be
24697c478bd9Sstevel@tonic-gate * torn down when a cpu is offlined (but still configured).
24707c478bd9Sstevel@tonic-gate */
24717c478bd9Sstevel@tonic-gate void
cpu_visibility_offline(cpu_t * cp,zone_t * zone)24727c478bd9Sstevel@tonic-gate cpu_visibility_offline(cpu_t *cp, zone_t *zone)
24737c478bd9Sstevel@tonic-gate {
24747c478bd9Sstevel@tonic-gate kstat_t *ksp;
24757c478bd9Sstevel@tonic-gate char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
24767c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
24777c478bd9Sstevel@tonic-gate processorid_t cpun;
24787c478bd9Sstevel@tonic-gate
24797c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
24807c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled());
24817c478bd9Sstevel@tonic-gate ASSERT(cp != NULL);
24827c478bd9Sstevel@tonic-gate ASSERT(cpu_is_active(cp));
24837c478bd9Sstevel@tonic-gate
24847c478bd9Sstevel@tonic-gate cpun = cp->cpu_id;
24857c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
24867c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus_online != 0);
24877c478bd9Sstevel@tonic-gate zone->zone_ncpus_online--;
24887c478bd9Sstevel@tonic-gate }
24897c478bd9Sstevel@tonic-gate
24907c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
24917c478bd9Sstevel@tonic-gate NULL) {
24927c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid);
24937c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24947c478bd9Sstevel@tonic-gate }
24957c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
24967c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid);
24977c478bd9Sstevel@tonic-gate kstat_rele(ksp);
24987c478bd9Sstevel@tonic-gate }
24997c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
25007c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid);
25017c478bd9Sstevel@tonic-gate kstat_rele(ksp);
25027c478bd9Sstevel@tonic-gate }
25037c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
25047c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
25057c478bd9Sstevel@tonic-gate != NULL) {
25067c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid);
25077c478bd9Sstevel@tonic-gate kstat_rele(ksp);
25087c478bd9Sstevel@tonic-gate }
25097c478bd9Sstevel@tonic-gate }
25107c478bd9Sstevel@tonic-gate
25117c478bd9Sstevel@tonic-gate /*
25127c478bd9Sstevel@tonic-gate * Update relevant kstats such that cpu is no longer visible to processes
25137c478bd9Sstevel@tonic-gate * executing in specified zone.
25147c478bd9Sstevel@tonic-gate */
25157c478bd9Sstevel@tonic-gate void
cpu_visibility_remove(cpu_t * cp,zone_t * zone)25167c478bd9Sstevel@tonic-gate cpu_visibility_remove(cpu_t *cp, zone_t *zone)
25177c478bd9Sstevel@tonic-gate {
25187c478bd9Sstevel@tonic-gate if (cpu_is_active(cp))
25197c478bd9Sstevel@tonic-gate cpu_visibility_offline(cp, zone);
25207c478bd9Sstevel@tonic-gate cpu_visibility_unconfigure(cp, zone);
25217c478bd9Sstevel@tonic-gate }
25227c478bd9Sstevel@tonic-gate
25237c478bd9Sstevel@tonic-gate /*
25247c478bd9Sstevel@tonic-gate * Bind a thread to a CPU as requested.
25257c478bd9Sstevel@tonic-gate */
25267c478bd9Sstevel@tonic-gate int
cpu_bind_thread(kthread_id_t tp,processorid_t bind,processorid_t * obind,int * error)25277c478bd9Sstevel@tonic-gate cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
25287c478bd9Sstevel@tonic-gate int *error)
25297c478bd9Sstevel@tonic-gate {
25307c478bd9Sstevel@tonic-gate processorid_t binding;
25310b70c467Sakolb cpu_t *cp = NULL;
25327c478bd9Sstevel@tonic-gate
25337c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
25347c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
25357c478bd9Sstevel@tonic-gate
25367c478bd9Sstevel@tonic-gate thread_lock(tp);
25377c478bd9Sstevel@tonic-gate
25387c478bd9Sstevel@tonic-gate /*
25397c478bd9Sstevel@tonic-gate * Record old binding, but change the obind, which was initialized
25407c478bd9Sstevel@tonic-gate * to PBIND_NONE, only if this thread has a binding. This avoids
25417c478bd9Sstevel@tonic-gate * reporting PBIND_NONE for a process when some LWPs are bound.
25427c478bd9Sstevel@tonic-gate */
25437c478bd9Sstevel@tonic-gate binding = tp->t_bind_cpu;
25443eea75d7SAlexander Kolbasov if (binding != PBIND_NONE)
25453eea75d7SAlexander Kolbasov *obind = binding; /* record old binding */
25467c478bd9Sstevel@tonic-gate
25470b70c467Sakolb switch (bind) {
25480b70c467Sakolb case PBIND_QUERY:
25490b70c467Sakolb /* Just return the old binding */
25507c478bd9Sstevel@tonic-gate thread_unlock(tp);
25517c478bd9Sstevel@tonic-gate return (0);
25520b70c467Sakolb
25530b70c467Sakolb case PBIND_QUERY_TYPE:
25540b70c467Sakolb /* Return the binding type */
25550b70c467Sakolb *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD;
25560b70c467Sakolb thread_unlock(tp);
25570b70c467Sakolb return (0);
25580b70c467Sakolb
25590b70c467Sakolb case PBIND_SOFT:
25600b70c467Sakolb /*
25610b70c467Sakolb * Set soft binding for this thread and return the actual
25620b70c467Sakolb * binding
25630b70c467Sakolb */
25640b70c467Sakolb TB_CPU_SOFT_SET(tp);
25650b70c467Sakolb thread_unlock(tp);
25660b70c467Sakolb return (0);
25670b70c467Sakolb
25680b70c467Sakolb case PBIND_HARD:
25690b70c467Sakolb /*
25700b70c467Sakolb * Set hard binding for this thread and return the actual
25710b70c467Sakolb * binding
25720b70c467Sakolb */
25730b70c467Sakolb TB_CPU_HARD_SET(tp);
25740b70c467Sakolb thread_unlock(tp);
25750b70c467Sakolb return (0);
25760b70c467Sakolb
25770b70c467Sakolb default:
25780b70c467Sakolb break;
25797c478bd9Sstevel@tonic-gate }
25807c478bd9Sstevel@tonic-gate
25817c478bd9Sstevel@tonic-gate /*
25827c478bd9Sstevel@tonic-gate * If this thread/LWP cannot be bound because of permission
25837c478bd9Sstevel@tonic-gate * problems, just note that and return success so that the
25847c478bd9Sstevel@tonic-gate * other threads/LWPs will be bound. This is the way
25857c478bd9Sstevel@tonic-gate * processor_bind() is defined to work.
25867c478bd9Sstevel@tonic-gate *
25877c478bd9Sstevel@tonic-gate * Binding will get EPERM if the thread is of system class
25887c478bd9Sstevel@tonic-gate * or hasprocperm() fails.
25897c478bd9Sstevel@tonic-gate */
25907c478bd9Sstevel@tonic-gate if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) {
25917c478bd9Sstevel@tonic-gate *error = EPERM;
25927c478bd9Sstevel@tonic-gate thread_unlock(tp);
25937c478bd9Sstevel@tonic-gate return (0);
25947c478bd9Sstevel@tonic-gate }
25957c478bd9Sstevel@tonic-gate
25967c478bd9Sstevel@tonic-gate binding = bind;
25977c478bd9Sstevel@tonic-gate if (binding != PBIND_NONE) {
25980b70c467Sakolb cp = cpu_get((processorid_t)binding);
25997c478bd9Sstevel@tonic-gate /*
26000b70c467Sakolb * Make sure binding is valid and is in right partition.
26017c478bd9Sstevel@tonic-gate */
26020b70c467Sakolb if (cp == NULL || tp->t_cpupart != cp->cpu_part) {
26037c478bd9Sstevel@tonic-gate *error = EINVAL;
26047c478bd9Sstevel@tonic-gate thread_unlock(tp);
26057c478bd9Sstevel@tonic-gate return (0);
26067c478bd9Sstevel@tonic-gate }
26077c478bd9Sstevel@tonic-gate }
26087c478bd9Sstevel@tonic-gate tp->t_bind_cpu = binding; /* set new binding */
26097c478bd9Sstevel@tonic-gate
26107c478bd9Sstevel@tonic-gate /*
26117c478bd9Sstevel@tonic-gate * If there is no system-set reason for affinity, set
26127c478bd9Sstevel@tonic-gate * the t_bound_cpu field to reflect the binding.
26137c478bd9Sstevel@tonic-gate */
26147c478bd9Sstevel@tonic-gate if (tp->t_affinitycnt == 0) {
26157c478bd9Sstevel@tonic-gate if (binding == PBIND_NONE) {
26167c478bd9Sstevel@tonic-gate /*
26177c478bd9Sstevel@tonic-gate * We may need to adjust disp_max_unbound_pri
26187c478bd9Sstevel@tonic-gate * since we're becoming unbound.
26197c478bd9Sstevel@tonic-gate */
26207c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(tp);
26217c478bd9Sstevel@tonic-gate
26227c478bd9Sstevel@tonic-gate tp->t_bound_cpu = NULL; /* set new binding */
26237c478bd9Sstevel@tonic-gate
26247c478bd9Sstevel@tonic-gate /*
26257c478bd9Sstevel@tonic-gate * Move thread to lgroup with strongest affinity
26267c478bd9Sstevel@tonic-gate * after unbinding
26277c478bd9Sstevel@tonic-gate */
26287c478bd9Sstevel@tonic-gate if (tp->t_lgrp_affinity)
26297c478bd9Sstevel@tonic-gate lgrp_move_thread(tp,
26307c478bd9Sstevel@tonic-gate lgrp_choose(tp, tp->t_cpupart), 1);
26317c478bd9Sstevel@tonic-gate
26327c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC &&
26337c478bd9Sstevel@tonic-gate tp->t_cpu->cpu_part != tp->t_cpupart)
26347c478bd9Sstevel@tonic-gate cpu_surrender(tp);
26357c478bd9Sstevel@tonic-gate } else {
26367c478bd9Sstevel@tonic-gate lpl_t *lpl;
26377c478bd9Sstevel@tonic-gate
26387c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp;
26397c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_lpl != NULL);
26407c478bd9Sstevel@tonic-gate
26417c478bd9Sstevel@tonic-gate /*
26427c478bd9Sstevel@tonic-gate * Set home to lgroup with most affinity containing CPU
26437c478bd9Sstevel@tonic-gate * that thread is being bound or minimum bounding
26447c478bd9Sstevel@tonic-gate * lgroup if no affinities set
26457c478bd9Sstevel@tonic-gate */
26467c478bd9Sstevel@tonic-gate if (tp->t_lgrp_affinity)
264703400a71Sjjc lpl = lgrp_affinity_best(tp, tp->t_cpupart,
264803400a71Sjjc LGRP_NONE, B_FALSE);
26497c478bd9Sstevel@tonic-gate else
26507c478bd9Sstevel@tonic-gate lpl = cp->cpu_lpl;
26517c478bd9Sstevel@tonic-gate
26527c478bd9Sstevel@tonic-gate if (tp->t_lpl != lpl) {
26537c478bd9Sstevel@tonic-gate /* can't grab cpu_lock */
26547c478bd9Sstevel@tonic-gate lgrp_move_thread(tp, lpl, 1);
26557c478bd9Sstevel@tonic-gate }
26567c478bd9Sstevel@tonic-gate
26577c478bd9Sstevel@tonic-gate /*
26587c478bd9Sstevel@tonic-gate * Make the thread switch to the bound CPU.
26597c478bd9Sstevel@tonic-gate * If the thread is runnable, we need to
26607c478bd9Sstevel@tonic-gate * requeue it even if t_cpu is already set
26617c478bd9Sstevel@tonic-gate * to the right CPU, since it may be on a
26627c478bd9Sstevel@tonic-gate * kpreempt queue and need to move to a local
26637c478bd9Sstevel@tonic-gate * queue. We could check t_disp_queue to
26647c478bd9Sstevel@tonic-gate * avoid unnecessary overhead if it's already
26657c478bd9Sstevel@tonic-gate * on the right queue, but since this isn't
26667c478bd9Sstevel@tonic-gate * a performance-critical operation it doesn't
26677c478bd9Sstevel@tonic-gate * seem worth the extra code and complexity.
26687c478bd9Sstevel@tonic-gate *
26697c478bd9Sstevel@tonic-gate * If the thread is weakbound to the cpu then it will
26707c478bd9Sstevel@tonic-gate * resist the new binding request until the weak
26717c478bd9Sstevel@tonic-gate * binding drops. The cpu_surrender or requeueing
26727c478bd9Sstevel@tonic-gate * below could be skipped in such cases (since it
26737c478bd9Sstevel@tonic-gate * will have no effect), but that would require
26747c478bd9Sstevel@tonic-gate * thread_allowmigrate to acquire thread_lock so
26757c478bd9Sstevel@tonic-gate * we'll take the very occasional hit here instead.
26767c478bd9Sstevel@tonic-gate */
26777c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC) {
26787c478bd9Sstevel@tonic-gate cpu_surrender(tp);
26797c478bd9Sstevel@tonic-gate } else if (tp->t_state == TS_RUN) {
26807c478bd9Sstevel@tonic-gate cpu_t *ocp = tp->t_cpu;
26817c478bd9Sstevel@tonic-gate
26827c478bd9Sstevel@tonic-gate (void) dispdeq(tp);
26837c478bd9Sstevel@tonic-gate setbackdq(tp);
26847c478bd9Sstevel@tonic-gate /*
26857c478bd9Sstevel@tonic-gate * Either on the bound CPU's disp queue now,
26867c478bd9Sstevel@tonic-gate * or swapped out or on the swap queue.
26877c478bd9Sstevel@tonic-gate */
26887c478bd9Sstevel@tonic-gate ASSERT(tp->t_disp_queue == cp->cpu_disp ||
26897c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == ocp ||
26907c478bd9Sstevel@tonic-gate (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ))
26917c478bd9Sstevel@tonic-gate != TS_LOAD);
26927c478bd9Sstevel@tonic-gate }
26937c478bd9Sstevel@tonic-gate }
26947c478bd9Sstevel@tonic-gate }
26957c478bd9Sstevel@tonic-gate
26967c478bd9Sstevel@tonic-gate /*
26977c478bd9Sstevel@tonic-gate * Our binding has changed; set TP_CHANGEBIND.
26987c478bd9Sstevel@tonic-gate */
26997c478bd9Sstevel@tonic-gate tp->t_proc_flag |= TP_CHANGEBIND;
27007c478bd9Sstevel@tonic-gate aston(tp);
27017c478bd9Sstevel@tonic-gate
27027c478bd9Sstevel@tonic-gate thread_unlock(tp);
27037c478bd9Sstevel@tonic-gate
27047c478bd9Sstevel@tonic-gate return (0);
27057c478bd9Sstevel@tonic-gate }
27067c478bd9Sstevel@tonic-gate
27077c478bd9Sstevel@tonic-gate #if CPUSET_WORDS > 1
27087c478bd9Sstevel@tonic-gate
27097c478bd9Sstevel@tonic-gate /*
27107c478bd9Sstevel@tonic-gate * Functions for implementing cpuset operations when a cpuset is more
27117c478bd9Sstevel@tonic-gate * than one word. On platforms where a cpuset is a single word these
27127c478bd9Sstevel@tonic-gate * are implemented as macros in cpuvar.h.
27137c478bd9Sstevel@tonic-gate */
27147c478bd9Sstevel@tonic-gate
27157c478bd9Sstevel@tonic-gate void
cpuset_all(cpuset_t * s)27167c478bd9Sstevel@tonic-gate cpuset_all(cpuset_t *s)
27177c478bd9Sstevel@tonic-gate {
27187c478bd9Sstevel@tonic-gate int i;
27197c478bd9Sstevel@tonic-gate
27207c478bd9Sstevel@tonic-gate for (i = 0; i < CPUSET_WORDS; i++)
27217c478bd9Sstevel@tonic-gate s->cpub[i] = ~0UL;
27227c478bd9Sstevel@tonic-gate }
27237c478bd9Sstevel@tonic-gate
27247c478bd9Sstevel@tonic-gate void
cpuset_all_but(cpuset_t * s,uint_t cpu)27257c478bd9Sstevel@tonic-gate cpuset_all_but(cpuset_t *s, uint_t cpu)
27267c478bd9Sstevel@tonic-gate {
27277c478bd9Sstevel@tonic-gate cpuset_all(s);
27287c478bd9Sstevel@tonic-gate CPUSET_DEL(*s, cpu);
27297c478bd9Sstevel@tonic-gate }
27307c478bd9Sstevel@tonic-gate
27317c478bd9Sstevel@tonic-gate void
cpuset_only(cpuset_t * s,uint_t cpu)27327c478bd9Sstevel@tonic-gate cpuset_only(cpuset_t *s, uint_t cpu)
27337c478bd9Sstevel@tonic-gate {
27347c478bd9Sstevel@tonic-gate CPUSET_ZERO(*s);
27357c478bd9Sstevel@tonic-gate CPUSET_ADD(*s, cpu);
27367c478bd9Sstevel@tonic-gate }
27377c478bd9Sstevel@tonic-gate
27387c478bd9Sstevel@tonic-gate int
cpuset_isnull(cpuset_t * s)27397c478bd9Sstevel@tonic-gate cpuset_isnull(cpuset_t *s)
27407c478bd9Sstevel@tonic-gate {
27417c478bd9Sstevel@tonic-gate int i;
27427c478bd9Sstevel@tonic-gate
27437c478bd9Sstevel@tonic-gate for (i = 0; i < CPUSET_WORDS; i++)
27447c478bd9Sstevel@tonic-gate if (s->cpub[i] != 0)
27457c478bd9Sstevel@tonic-gate return (0);
27467c478bd9Sstevel@tonic-gate return (1);
27477c478bd9Sstevel@tonic-gate }
27487c478bd9Sstevel@tonic-gate
27497c478bd9Sstevel@tonic-gate int
cpuset_cmp(cpuset_t * s1,cpuset_t * s2)27507c478bd9Sstevel@tonic-gate cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
27517c478bd9Sstevel@tonic-gate {
27527c478bd9Sstevel@tonic-gate int i;
27537c478bd9Sstevel@tonic-gate
27547c478bd9Sstevel@tonic-gate for (i = 0; i < CPUSET_WORDS; i++)
27557c478bd9Sstevel@tonic-gate if (s1->cpub[i] != s2->cpub[i])
27567c478bd9Sstevel@tonic-gate return (0);
27577c478bd9Sstevel@tonic-gate return (1);
27587c478bd9Sstevel@tonic-gate }
27597c478bd9Sstevel@tonic-gate
27607c478bd9Sstevel@tonic-gate uint_t
cpuset_find(cpuset_t * s)27617c478bd9Sstevel@tonic-gate cpuset_find(cpuset_t *s)
27627c478bd9Sstevel@tonic-gate {
27637c478bd9Sstevel@tonic-gate
27647c478bd9Sstevel@tonic-gate uint_t i;
27657c478bd9Sstevel@tonic-gate uint_t cpu = (uint_t)-1;
27667c478bd9Sstevel@tonic-gate
27677c478bd9Sstevel@tonic-gate /*
27687c478bd9Sstevel@tonic-gate * Find a cpu in the cpuset
27697c478bd9Sstevel@tonic-gate */
277025cf1a30Sjl139090 for (i = 0; i < CPUSET_WORDS; i++) {
27717c478bd9Sstevel@tonic-gate cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
277225cf1a30Sjl139090 if (cpu != (uint_t)-1) {
277325cf1a30Sjl139090 cpu += i * BT_NBIPUL;
277425cf1a30Sjl139090 break;
277525cf1a30Sjl139090 }
277625cf1a30Sjl139090 }
27777c478bd9Sstevel@tonic-gate return (cpu);
27787c478bd9Sstevel@tonic-gate }
27797c478bd9Sstevel@tonic-gate
278000423197Sha137994 void
cpuset_bounds(cpuset_t * s,uint_t * smallestid,uint_t * largestid)278100423197Sha137994 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid)
278200423197Sha137994 {
278300423197Sha137994 int i, j;
278400423197Sha137994 uint_t bit;
278500423197Sha137994
278600423197Sha137994 /*
278700423197Sha137994 * First, find the smallest cpu id in the set.
278800423197Sha137994 */
278900423197Sha137994 for (i = 0; i < CPUSET_WORDS; i++) {
279000423197Sha137994 if (s->cpub[i] != 0) {
279100423197Sha137994 bit = (uint_t)(lowbit(s->cpub[i]) - 1);
279200423197Sha137994 ASSERT(bit != (uint_t)-1);
279300423197Sha137994 *smallestid = bit + (i * BT_NBIPUL);
279400423197Sha137994
279500423197Sha137994 /*
279600423197Sha137994 * Now find the largest cpu id in
279700423197Sha137994 * the set and return immediately.
279800423197Sha137994 * Done in an inner loop to avoid
279900423197Sha137994 * having to break out of the first
280000423197Sha137994 * loop.
280100423197Sha137994 */
280200423197Sha137994 for (j = CPUSET_WORDS - 1; j >= i; j--) {
280300423197Sha137994 if (s->cpub[j] != 0) {
280400423197Sha137994 bit = (uint_t)(highbit(s->cpub[j]) - 1);
280500423197Sha137994 ASSERT(bit != (uint_t)-1);
280600423197Sha137994 *largestid = bit + (j * BT_NBIPUL);
280700423197Sha137994 ASSERT(*largestid >= *smallestid);
280800423197Sha137994 return;
280900423197Sha137994 }
281000423197Sha137994 }
281100423197Sha137994
281200423197Sha137994 /*
281300423197Sha137994 * If this code is reached, a
281400423197Sha137994 * smallestid was found, but not a
281500423197Sha137994 * largestid. The cpuset must have
281600423197Sha137994 * been changed during the course
281700423197Sha137994 * of this function call.
281800423197Sha137994 */
281900423197Sha137994 ASSERT(0);
282000423197Sha137994 }
282100423197Sha137994 }
282200423197Sha137994 *smallestid = *largestid = CPUSET_NOTINSET;
282300423197Sha137994 }
282400423197Sha137994
28257c478bd9Sstevel@tonic-gate #endif /* CPUSET_WORDS */
28267c478bd9Sstevel@tonic-gate
28277c478bd9Sstevel@tonic-gate /*
28280b70c467Sakolb * Unbind threads bound to specified CPU.
28290b70c467Sakolb *
28300b70c467Sakolb * If `unbind_all_threads' is true, unbind all user threads bound to a given
28310b70c467Sakolb * CPU. Otherwise unbind all soft-bound user threads.
28327c478bd9Sstevel@tonic-gate */
28337c478bd9Sstevel@tonic-gate int
cpu_unbind(processorid_t cpu,boolean_t unbind_all_threads)28340b70c467Sakolb cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads)
28357c478bd9Sstevel@tonic-gate {
28367c478bd9Sstevel@tonic-gate processorid_t obind;
28377c478bd9Sstevel@tonic-gate kthread_t *tp;
28387c478bd9Sstevel@tonic-gate int ret = 0;
28397c478bd9Sstevel@tonic-gate proc_t *pp;
28407c478bd9Sstevel@tonic-gate int err, berr = 0;
28417c478bd9Sstevel@tonic-gate
28427c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
28437c478bd9Sstevel@tonic-gate
28447c478bd9Sstevel@tonic-gate mutex_enter(&pidlock);
28457c478bd9Sstevel@tonic-gate for (pp = practive; pp != NULL; pp = pp->p_next) {
28467c478bd9Sstevel@tonic-gate mutex_enter(&pp->p_lock);
28477c478bd9Sstevel@tonic-gate tp = pp->p_tlist;
28487c478bd9Sstevel@tonic-gate /*
28497c478bd9Sstevel@tonic-gate * Skip zombies, kernel processes, and processes in
28507c478bd9Sstevel@tonic-gate * other zones, if called from a non-global zone.
28517c478bd9Sstevel@tonic-gate */
28527c478bd9Sstevel@tonic-gate if (tp == NULL || (pp->p_flag & SSYS) ||
28537c478bd9Sstevel@tonic-gate !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
28547c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_lock);
28557c478bd9Sstevel@tonic-gate continue;
28567c478bd9Sstevel@tonic-gate }
28577c478bd9Sstevel@tonic-gate do {
28587c478bd9Sstevel@tonic-gate if (tp->t_bind_cpu != cpu)
28597c478bd9Sstevel@tonic-gate continue;
28600b70c467Sakolb /*
28610b70c467Sakolb * Skip threads with hard binding when
28620b70c467Sakolb * `unbind_all_threads' is not specified.
28630b70c467Sakolb */
28640b70c467Sakolb if (!unbind_all_threads && TB_CPU_IS_HARD(tp))
28650b70c467Sakolb continue;
28667c478bd9Sstevel@tonic-gate err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr);
28677c478bd9Sstevel@tonic-gate if (ret == 0)
28687c478bd9Sstevel@tonic-gate ret = err;
28697c478bd9Sstevel@tonic-gate } while ((tp = tp->t_forw) != pp->p_tlist);
28707c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_lock);
28717c478bd9Sstevel@tonic-gate }
28727c478bd9Sstevel@tonic-gate mutex_exit(&pidlock);
28737c478bd9Sstevel@tonic-gate if (ret == 0)
28747c478bd9Sstevel@tonic-gate ret = berr;
28757c478bd9Sstevel@tonic-gate return (ret);
28767c478bd9Sstevel@tonic-gate }
28777c478bd9Sstevel@tonic-gate
28787c478bd9Sstevel@tonic-gate
28797c478bd9Sstevel@tonic-gate /*
28807c478bd9Sstevel@tonic-gate * Destroy all remaining bound threads on a cpu.
28817c478bd9Sstevel@tonic-gate */
28827c478bd9Sstevel@tonic-gate void
cpu_destroy_bound_threads(cpu_t * cp)28837c478bd9Sstevel@tonic-gate cpu_destroy_bound_threads(cpu_t *cp)
28847c478bd9Sstevel@tonic-gate {
28857c478bd9Sstevel@tonic-gate extern id_t syscid;
28867c478bd9Sstevel@tonic-gate register kthread_id_t t, tlist, tnext;
28877c478bd9Sstevel@tonic-gate
28887c478bd9Sstevel@tonic-gate /*
28897c478bd9Sstevel@tonic-gate * Destroy all remaining bound threads on the cpu. This
28907c478bd9Sstevel@tonic-gate * should include both the interrupt threads and the idle thread.
28917c478bd9Sstevel@tonic-gate * This requires some care, since we need to traverse the
28927c478bd9Sstevel@tonic-gate * thread list with the pidlock mutex locked, but thread_free
28937c478bd9Sstevel@tonic-gate * also locks the pidlock mutex. So, we collect the threads
28947c478bd9Sstevel@tonic-gate * we're going to reap in a list headed by "tlist", then we
28957c478bd9Sstevel@tonic-gate * unlock the pidlock mutex and traverse the tlist list,
28967c478bd9Sstevel@tonic-gate * doing thread_free's on the thread's. Simple, n'est pas?
28977c478bd9Sstevel@tonic-gate * Also, this depends on thread_free not mucking with the
28987c478bd9Sstevel@tonic-gate * t_next and t_prev links of the thread.
28997c478bd9Sstevel@tonic-gate */
29007c478bd9Sstevel@tonic-gate
29017c478bd9Sstevel@tonic-gate if ((t = curthread) != NULL) {
29027c478bd9Sstevel@tonic-gate
29037c478bd9Sstevel@tonic-gate tlist = NULL;
29047c478bd9Sstevel@tonic-gate mutex_enter(&pidlock);
29057c478bd9Sstevel@tonic-gate do {
29067c478bd9Sstevel@tonic-gate tnext = t->t_next;
29077c478bd9Sstevel@tonic-gate if (t->t_bound_cpu == cp) {
29087c478bd9Sstevel@tonic-gate
29097c478bd9Sstevel@tonic-gate /*
29107c478bd9Sstevel@tonic-gate * We've found a bound thread, carefully unlink
29117c478bd9Sstevel@tonic-gate * it out of the thread list, and add it to
29127c478bd9Sstevel@tonic-gate * our "tlist". We "know" we don't have to
29137c478bd9Sstevel@tonic-gate * worry about unlinking curthread (the thread
29147c478bd9Sstevel@tonic-gate * that is executing this code).
29157c478bd9Sstevel@tonic-gate */
29167c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev;
29177c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next;
29187c478bd9Sstevel@tonic-gate t->t_next = tlist;
29197c478bd9Sstevel@tonic-gate tlist = t;
29207c478bd9Sstevel@tonic-gate ASSERT(t->t_cid == syscid);
29217c478bd9Sstevel@tonic-gate /* wake up anyone blocked in thread_join */
29227c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv);
29237c478bd9Sstevel@tonic-gate /*
29247c478bd9Sstevel@tonic-gate * t_lwp set by interrupt threads and not
29257c478bd9Sstevel@tonic-gate * cleared.
29267c478bd9Sstevel@tonic-gate */
29277c478bd9Sstevel@tonic-gate t->t_lwp = NULL;
29287c478bd9Sstevel@tonic-gate /*
29297c478bd9Sstevel@tonic-gate * Pause and idle threads always have
29307c478bd9Sstevel@tonic-gate * t_state set to TS_ONPROC.
29317c478bd9Sstevel@tonic-gate */
29327c478bd9Sstevel@tonic-gate t->t_state = TS_FREE;
29337c478bd9Sstevel@tonic-gate t->t_prev = NULL; /* Just in case */
29347c478bd9Sstevel@tonic-gate }
29357c478bd9Sstevel@tonic-gate
29367c478bd9Sstevel@tonic-gate } while ((t = tnext) != curthread);
29377c478bd9Sstevel@tonic-gate
29387c478bd9Sstevel@tonic-gate mutex_exit(&pidlock);
29397c478bd9Sstevel@tonic-gate
2940575a7426Spt157919 mutex_sync();
29417c478bd9Sstevel@tonic-gate for (t = tlist; t != NULL; t = tnext) {
29427c478bd9Sstevel@tonic-gate tnext = t->t_next;
29437c478bd9Sstevel@tonic-gate thread_free(t);
29447c478bd9Sstevel@tonic-gate }
29457c478bd9Sstevel@tonic-gate }
29467c478bd9Sstevel@tonic-gate }
29477c478bd9Sstevel@tonic-gate
29487c478bd9Sstevel@tonic-gate /*
294968afbec1Smh27603 * Update the cpu_supp_freqs of this cpu. This information is returned
29504e93b15cSmh27603 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then
29514e93b15cSmh27603 * maintain the kstat data size.
295268afbec1Smh27603 */
295368afbec1Smh27603 void
cpu_set_supp_freqs(cpu_t * cp,const char * freqs)295468afbec1Smh27603 cpu_set_supp_freqs(cpu_t *cp, const char *freqs)
295568afbec1Smh27603 {
295668afbec1Smh27603 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */
295768afbec1Smh27603 const char *lfreqs = clkstr;
29584e93b15cSmh27603 boolean_t kstat_exists = B_FALSE;
29594e93b15cSmh27603 kstat_t *ksp;
29604e93b15cSmh27603 size_t len;
296168afbec1Smh27603
296268afbec1Smh27603 /*
296368afbec1Smh27603 * A NULL pointer means we only support one speed.
296468afbec1Smh27603 */
296568afbec1Smh27603 if (freqs == NULL)
296668afbec1Smh27603 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64,
296768afbec1Smh27603 cp->cpu_curr_clock);
296868afbec1Smh27603 else
296968afbec1Smh27603 lfreqs = freqs;
297068afbec1Smh27603
297168afbec1Smh27603 /*
297268afbec1Smh27603 * Make sure the frequency doesn't change while a snapshot is
2973e40c2cd2Smh27603 * going on. Of course, we only need to worry about this if
2974e40c2cd2Smh27603 * the kstat exists.
297568afbec1Smh27603 */
29764e93b15cSmh27603 if ((ksp = cp->cpu_info_kstat) != NULL) {
29774e93b15cSmh27603 mutex_enter(ksp->ks_lock);
29784e93b15cSmh27603 kstat_exists = B_TRUE;
2979e40c2cd2Smh27603 }
298068afbec1Smh27603
298168afbec1Smh27603 /*
29824e93b15cSmh27603 * Free any previously allocated string and if the kstat
29834e93b15cSmh27603 * already exists, then update its data size.
298468afbec1Smh27603 */
29854e93b15cSmh27603 if (cp->cpu_supp_freqs != NULL) {
29864e93b15cSmh27603 len = strlen(cp->cpu_supp_freqs) + 1;
29874e93b15cSmh27603 kmem_free(cp->cpu_supp_freqs, len);
29884e93b15cSmh27603 if (kstat_exists)
29894e93b15cSmh27603 ksp->ks_data_size -= len;
29904e93b15cSmh27603 }
299168afbec1Smh27603
299268afbec1Smh27603 /*
299368afbec1Smh27603 * Allocate the new string and set the pointer.
299468afbec1Smh27603 */
29954e93b15cSmh27603 len = strlen(lfreqs) + 1;
29964e93b15cSmh27603 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP);
299768afbec1Smh27603 (void) strcpy(cp->cpu_supp_freqs, lfreqs);
299868afbec1Smh27603
299968afbec1Smh27603 /*
30004e93b15cSmh27603 * If the kstat already exists then update the data size and
30014e93b15cSmh27603 * free the lock.
300268afbec1Smh27603 */
30034e93b15cSmh27603 if (kstat_exists) {
30044e93b15cSmh27603 ksp->ks_data_size += len;
30054e93b15cSmh27603 mutex_exit(ksp->ks_lock);
30064e93b15cSmh27603 }
300768afbec1Smh27603 }
300868afbec1Smh27603
300968afbec1Smh27603 /*
30100e751525SEric Saxe * Indicate the current CPU's clock freqency (in Hz).
30110e751525SEric Saxe * The calling context must be such that CPU references are safe.
30120e751525SEric Saxe */
30130e751525SEric Saxe void
cpu_set_curr_clock(uint64_t new_clk)30140e751525SEric Saxe cpu_set_curr_clock(uint64_t new_clk)
30150e751525SEric Saxe {
30160e751525SEric Saxe uint64_t old_clk;
30170e751525SEric Saxe
30180e751525SEric Saxe old_clk = CPU->cpu_curr_clock;
30190e751525SEric Saxe CPU->cpu_curr_clock = new_clk;
30200e751525SEric Saxe
30210e751525SEric Saxe /*
30220e751525SEric Saxe * The cpu-change-speed DTrace probe exports the frequency in Hz
30230e751525SEric Saxe */
30240e751525SEric Saxe DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id,
30250e751525SEric Saxe uint64_t, old_clk, uint64_t, new_clk);
30260e751525SEric Saxe }
30270e751525SEric Saxe
30280e751525SEric Saxe /*
30297c478bd9Sstevel@tonic-gate * processor_info(2) and p_online(2) status support functions
30307c478bd9Sstevel@tonic-gate * The constants returned by the cpu_get_state() and cpu_get_state_str() are
30317c478bd9Sstevel@tonic-gate * for use in communicating processor state information to userland. Kernel
30327c478bd9Sstevel@tonic-gate * subsystems should only be using the cpu_flags value directly. Subsystems
30337c478bd9Sstevel@tonic-gate * modifying cpu_flags should record the state change via a call to the
30347c478bd9Sstevel@tonic-gate * cpu_set_state().
30357c478bd9Sstevel@tonic-gate */
30367c478bd9Sstevel@tonic-gate
30377c478bd9Sstevel@tonic-gate /*
30387c478bd9Sstevel@tonic-gate * Update the pi_state of this CPU. This function provides the CPU status for
30397c478bd9Sstevel@tonic-gate * the information returned by processor_info(2).
30407c478bd9Sstevel@tonic-gate */
30417c478bd9Sstevel@tonic-gate void
cpu_set_state(cpu_t * cpu)30427c478bd9Sstevel@tonic-gate cpu_set_state(cpu_t *cpu)
30437c478bd9Sstevel@tonic-gate {
30447c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
30457c478bd9Sstevel@tonic-gate cpu->cpu_type_info.pi_state = cpu_get_state(cpu);
30467c478bd9Sstevel@tonic-gate cpu->cpu_state_begin = gethrestime_sec();
30477c478bd9Sstevel@tonic-gate pool_cpu_mod = gethrtime();
30487c478bd9Sstevel@tonic-gate }
30497c478bd9Sstevel@tonic-gate
30507c478bd9Sstevel@tonic-gate /*
30517c478bd9Sstevel@tonic-gate * Return offline/online/other status for the indicated CPU. Use only for
30527c478bd9Sstevel@tonic-gate * communication with user applications; cpu_flags provides the in-kernel
30537c478bd9Sstevel@tonic-gate * interface.
30547c478bd9Sstevel@tonic-gate */
30557c478bd9Sstevel@tonic-gate int
cpu_get_state(cpu_t * cpu)30567c478bd9Sstevel@tonic-gate cpu_get_state(cpu_t *cpu)
30577c478bd9Sstevel@tonic-gate {
30587c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
30597c478bd9Sstevel@tonic-gate if (cpu->cpu_flags & CPU_POWEROFF)
30607c478bd9Sstevel@tonic-gate return (P_POWEROFF);
30617c478bd9Sstevel@tonic-gate else if (cpu->cpu_flags & CPU_FAULTED)
30627c478bd9Sstevel@tonic-gate return (P_FAULTED);
30637c478bd9Sstevel@tonic-gate else if (cpu->cpu_flags & CPU_SPARE)
30647c478bd9Sstevel@tonic-gate return (P_SPARE);
30657c478bd9Sstevel@tonic-gate else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)
30667c478bd9Sstevel@tonic-gate return (P_OFFLINE);
30677c478bd9Sstevel@tonic-gate else if (cpu->cpu_flags & CPU_ENABLE)
30687c478bd9Sstevel@tonic-gate return (P_ONLINE);
30697c478bd9Sstevel@tonic-gate else
30707c478bd9Sstevel@tonic-gate return (P_NOINTR);
30717c478bd9Sstevel@tonic-gate }
30727c478bd9Sstevel@tonic-gate
30737c478bd9Sstevel@tonic-gate /*
30747c478bd9Sstevel@tonic-gate * Return processor_info(2) state as a string.
30757c478bd9Sstevel@tonic-gate */
30767c478bd9Sstevel@tonic-gate const char *
cpu_get_state_str(cpu_t * cpu)30777c478bd9Sstevel@tonic-gate cpu_get_state_str(cpu_t *cpu)
30787c478bd9Sstevel@tonic-gate {
30797c478bd9Sstevel@tonic-gate const char *string;
30807c478bd9Sstevel@tonic-gate
30817c478bd9Sstevel@tonic-gate switch (cpu_get_state(cpu)) {
30827c478bd9Sstevel@tonic-gate case P_ONLINE:
30837c478bd9Sstevel@tonic-gate string = PS_ONLINE;
30847c478bd9Sstevel@tonic-gate break;
30857c478bd9Sstevel@tonic-gate case P_POWEROFF:
30867c478bd9Sstevel@tonic-gate string = PS_POWEROFF;
30877c478bd9Sstevel@tonic-gate break;
30887c478bd9Sstevel@tonic-gate case P_NOINTR:
30897c478bd9Sstevel@tonic-gate string = PS_NOINTR;
30907c478bd9Sstevel@tonic-gate break;
30917c478bd9Sstevel@tonic-gate case P_SPARE:
30927c478bd9Sstevel@tonic-gate string = PS_SPARE;
30937c478bd9Sstevel@tonic-gate break;
30947c478bd9Sstevel@tonic-gate case P_FAULTED:
30957c478bd9Sstevel@tonic-gate string = PS_FAULTED;
30967c478bd9Sstevel@tonic-gate break;
30977c478bd9Sstevel@tonic-gate case P_OFFLINE:
30987c478bd9Sstevel@tonic-gate string = PS_OFFLINE;
30997c478bd9Sstevel@tonic-gate break;
31007c478bd9Sstevel@tonic-gate default:
31017c478bd9Sstevel@tonic-gate string = "unknown";
31027c478bd9Sstevel@tonic-gate break;
31037c478bd9Sstevel@tonic-gate }
31047c478bd9Sstevel@tonic-gate return (string);
31057c478bd9Sstevel@tonic-gate }
31067c478bd9Sstevel@tonic-gate
31077c478bd9Sstevel@tonic-gate /*
31087c478bd9Sstevel@tonic-gate * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named
31097c478bd9Sstevel@tonic-gate * kstats, respectively. This is done when a CPU is initialized or placed
31107c478bd9Sstevel@tonic-gate * online via p_online(2).
31117c478bd9Sstevel@tonic-gate */
31127c478bd9Sstevel@tonic-gate static void
cpu_stats_kstat_create(cpu_t * cp)31137c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cpu_t *cp)
31147c478bd9Sstevel@tonic-gate {
31157c478bd9Sstevel@tonic-gate int instance = cp->cpu_id;
31167c478bd9Sstevel@tonic-gate char *module = "cpu";
31177c478bd9Sstevel@tonic-gate char *class = "misc";
31187c478bd9Sstevel@tonic-gate kstat_t *ksp;
31197c478bd9Sstevel@tonic-gate zoneid_t zoneid;
31207c478bd9Sstevel@tonic-gate
31217c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
31227c478bd9Sstevel@tonic-gate
31237c478bd9Sstevel@tonic-gate if (pool_pset_enabled())
31247c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID;
31257c478bd9Sstevel@tonic-gate else
31267c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES;
31277c478bd9Sstevel@tonic-gate /*
31287c478bd9Sstevel@tonic-gate * Create named kstats
31297c478bd9Sstevel@tonic-gate */
31307c478bd9Sstevel@tonic-gate #define CPU_STATS_KS_CREATE(name, tsize, update_func) \
31317c478bd9Sstevel@tonic-gate ksp = kstat_create_zone(module, instance, (name), class, \
31327c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \
31337c478bd9Sstevel@tonic-gate zoneid); \
31347c478bd9Sstevel@tonic-gate if (ksp != NULL) { \
31357c478bd9Sstevel@tonic-gate ksp->ks_private = cp; \
31367c478bd9Sstevel@tonic-gate ksp->ks_update = (update_func); \
31377c478bd9Sstevel@tonic-gate kstat_install(ksp); \
31387c478bd9Sstevel@tonic-gate } else \
31397c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \
31407c478bd9Sstevel@tonic-gate module, instance, (name));
31417c478bd9Sstevel@tonic-gate
31427c478bd9Sstevel@tonic-gate CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template),
31437c478bd9Sstevel@tonic-gate cpu_sys_stats_ks_update);
31447c478bd9Sstevel@tonic-gate CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template),
31457c478bd9Sstevel@tonic-gate cpu_vm_stats_ks_update);
31467c478bd9Sstevel@tonic-gate
31477c478bd9Sstevel@tonic-gate /*
31487c478bd9Sstevel@tonic-gate * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat.
31497c478bd9Sstevel@tonic-gate */
31507c478bd9Sstevel@tonic-gate ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL,
31517c478bd9Sstevel@tonic-gate "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid);
31527c478bd9Sstevel@tonic-gate if (ksp != NULL) {
31537c478bd9Sstevel@tonic-gate ksp->ks_update = cpu_stat_ks_update;
31547c478bd9Sstevel@tonic-gate ksp->ks_private = cp;
31557c478bd9Sstevel@tonic-gate kstat_install(ksp);
31567c478bd9Sstevel@tonic-gate }
31577c478bd9Sstevel@tonic-gate }
31587c478bd9Sstevel@tonic-gate
31597c478bd9Sstevel@tonic-gate static void
cpu_stats_kstat_destroy(cpu_t * cp)31607c478bd9Sstevel@tonic-gate cpu_stats_kstat_destroy(cpu_t *cp)
31617c478bd9Sstevel@tonic-gate {
31627c478bd9Sstevel@tonic-gate char ks_name[KSTAT_STRLEN];
31637c478bd9Sstevel@tonic-gate
31647c478bd9Sstevel@tonic-gate (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id);
31657c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name);
31667c478bd9Sstevel@tonic-gate
31677c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu", cp->cpu_id, "sys");
31687c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu", cp->cpu_id, "vm");
31697c478bd9Sstevel@tonic-gate }
31707c478bd9Sstevel@tonic-gate
31717c478bd9Sstevel@tonic-gate static int
cpu_sys_stats_ks_update(kstat_t * ksp,int rw)31727c478bd9Sstevel@tonic-gate cpu_sys_stats_ks_update(kstat_t *ksp, int rw)
31737c478bd9Sstevel@tonic-gate {
31747c478bd9Sstevel@tonic-gate cpu_t *cp = (cpu_t *)ksp->ks_private;
31757c478bd9Sstevel@tonic-gate struct cpu_sys_stats_ks_data *csskd;
31767c478bd9Sstevel@tonic-gate cpu_sys_stats_t *css;
3177eda89462Sesolom hrtime_t msnsecs[NCMSTATES];
31787c478bd9Sstevel@tonic-gate int i;
31797c478bd9Sstevel@tonic-gate
31807c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE)
31817c478bd9Sstevel@tonic-gate return (EACCES);
31827c478bd9Sstevel@tonic-gate
31837c478bd9Sstevel@tonic-gate csskd = ksp->ks_data;
31847c478bd9Sstevel@tonic-gate css = &cp->cpu_stats.sys;
31857c478bd9Sstevel@tonic-gate
3186eda89462Sesolom /*
3187eda89462Sesolom * Read CPU mstate, but compare with the last values we
3188eda89462Sesolom * received to make sure that the returned kstats never
3189eda89462Sesolom * decrease.
3190eda89462Sesolom */
3191eda89462Sesolom
3192eda89462Sesolom get_cpu_mstate(cp, msnsecs);
3193eda89462Sesolom if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE])
3194eda89462Sesolom msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64;
3195eda89462Sesolom if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER])
3196eda89462Sesolom msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64;
3197eda89462Sesolom if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM])
3198eda89462Sesolom msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64;
3199eda89462Sesolom
32007c478bd9Sstevel@tonic-gate bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data,
32017c478bd9Sstevel@tonic-gate sizeof (cpu_sys_stats_ks_data_template));
3202eda89462Sesolom
32037c478bd9Sstevel@tonic-gate csskd->cpu_ticks_wait.value.ui64 = 0;
32047c478bd9Sstevel@tonic-gate csskd->wait_ticks_io.value.ui64 = 0;
32057c478bd9Sstevel@tonic-gate
3206eda89462Sesolom csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE];
3207eda89462Sesolom csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER];
3208eda89462Sesolom csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM];
32097c478bd9Sstevel@tonic-gate csskd->cpu_ticks_idle.value.ui64 =
32107c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64);
32117c478bd9Sstevel@tonic-gate csskd->cpu_ticks_user.value.ui64 =
32127c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64);
32137c478bd9Sstevel@tonic-gate csskd->cpu_ticks_kernel.value.ui64 =
32147c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64);
32151f9f06cfSMatthew Ahrens csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec;
32161f9f06cfSMatthew Ahrens csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes;
32173aedfe0bSmishra csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast;
32183aedfe0bSmishra csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload;
32197c478bd9Sstevel@tonic-gate csskd->bread.value.ui64 = css->bread;
32207c478bd9Sstevel@tonic-gate csskd->bwrite.value.ui64 = css->bwrite;
32217c478bd9Sstevel@tonic-gate csskd->lread.value.ui64 = css->lread;
32227c478bd9Sstevel@tonic-gate csskd->lwrite.value.ui64 = css->lwrite;
32237c478bd9Sstevel@tonic-gate csskd->phread.value.ui64 = css->phread;
32247c478bd9Sstevel@tonic-gate csskd->phwrite.value.ui64 = css->phwrite;
32257c478bd9Sstevel@tonic-gate csskd->pswitch.value.ui64 = css->pswitch;
32267c478bd9Sstevel@tonic-gate csskd->trap.value.ui64 = css->trap;
32277c478bd9Sstevel@tonic-gate csskd->intr.value.ui64 = 0;
32287c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++)
32297c478bd9Sstevel@tonic-gate csskd->intr.value.ui64 += css->intr[i];
32307c478bd9Sstevel@tonic-gate csskd->syscall.value.ui64 = css->syscall;
32317c478bd9Sstevel@tonic-gate csskd->sysread.value.ui64 = css->sysread;
32327c478bd9Sstevel@tonic-gate csskd->syswrite.value.ui64 = css->syswrite;
32337c478bd9Sstevel@tonic-gate csskd->sysfork.value.ui64 = css->sysfork;
32347c478bd9Sstevel@tonic-gate csskd->sysvfork.value.ui64 = css->sysvfork;
32357c478bd9Sstevel@tonic-gate csskd->sysexec.value.ui64 = css->sysexec;
32367c478bd9Sstevel@tonic-gate csskd->readch.value.ui64 = css->readch;
32377c478bd9Sstevel@tonic-gate csskd->writech.value.ui64 = css->writech;
32387c478bd9Sstevel@tonic-gate csskd->rcvint.value.ui64 = css->rcvint;
32397c478bd9Sstevel@tonic-gate csskd->xmtint.value.ui64 = css->xmtint;
32407c478bd9Sstevel@tonic-gate csskd->mdmint.value.ui64 = css->mdmint;
32417c478bd9Sstevel@tonic-gate csskd->rawch.value.ui64 = css->rawch;
32427c478bd9Sstevel@tonic-gate csskd->canch.value.ui64 = css->canch;
32437c478bd9Sstevel@tonic-gate csskd->outch.value.ui64 = css->outch;
32447c478bd9Sstevel@tonic-gate csskd->msg.value.ui64 = css->msg;
32457c478bd9Sstevel@tonic-gate csskd->sema.value.ui64 = css->sema;
32467c478bd9Sstevel@tonic-gate csskd->namei.value.ui64 = css->namei;
32477c478bd9Sstevel@tonic-gate csskd->ufsiget.value.ui64 = css->ufsiget;
32487c478bd9Sstevel@tonic-gate csskd->ufsdirblk.value.ui64 = css->ufsdirblk;
32497c478bd9Sstevel@tonic-gate csskd->ufsipage.value.ui64 = css->ufsipage;
32507c478bd9Sstevel@tonic-gate csskd->ufsinopage.value.ui64 = css->ufsinopage;
32517c478bd9Sstevel@tonic-gate csskd->procovf.value.ui64 = css->procovf;
32527c478bd9Sstevel@tonic-gate csskd->intrthread.value.ui64 = 0;
325386e8def1Sethindra for (i = 0; i < LOCK_LEVEL - 1; i++)
32547c478bd9Sstevel@tonic-gate csskd->intrthread.value.ui64 += css->intr[i];
32557c478bd9Sstevel@tonic-gate csskd->intrblk.value.ui64 = css->intrblk;
32567c478bd9Sstevel@tonic-gate csskd->intrunpin.value.ui64 = css->intrunpin;
32577c478bd9Sstevel@tonic-gate csskd->idlethread.value.ui64 = css->idlethread;
32587c478bd9Sstevel@tonic-gate csskd->inv_swtch.value.ui64 = css->inv_swtch;
32597c478bd9Sstevel@tonic-gate csskd->nthreads.value.ui64 = css->nthreads;
32607c478bd9Sstevel@tonic-gate csskd->cpumigrate.value.ui64 = css->cpumigrate;
32617c478bd9Sstevel@tonic-gate csskd->xcalls.value.ui64 = css->xcalls;
32627c478bd9Sstevel@tonic-gate csskd->mutex_adenters.value.ui64 = css->mutex_adenters;
32637c478bd9Sstevel@tonic-gate csskd->rw_rdfails.value.ui64 = css->rw_rdfails;
32647c478bd9Sstevel@tonic-gate csskd->rw_wrfails.value.ui64 = css->rw_wrfails;
32657c478bd9Sstevel@tonic-gate csskd->modload.value.ui64 = css->modload;
32667c478bd9Sstevel@tonic-gate csskd->modunload.value.ui64 = css->modunload;
32677c478bd9Sstevel@tonic-gate csskd->bawrite.value.ui64 = css->bawrite;
3268ae115bc7Smrj csskd->iowait.value.ui64 = css->iowait;
32697c478bd9Sstevel@tonic-gate
32707c478bd9Sstevel@tonic-gate return (0);
32717c478bd9Sstevel@tonic-gate }
32727c478bd9Sstevel@tonic-gate
32737c478bd9Sstevel@tonic-gate static int
cpu_vm_stats_ks_update(kstat_t * ksp,int rw)32747c478bd9Sstevel@tonic-gate cpu_vm_stats_ks_update(kstat_t *ksp, int rw)
32757c478bd9Sstevel@tonic-gate {
32767c478bd9Sstevel@tonic-gate cpu_t *cp = (cpu_t *)ksp->ks_private;
32777c478bd9Sstevel@tonic-gate struct cpu_vm_stats_ks_data *cvskd;
32787c478bd9Sstevel@tonic-gate cpu_vm_stats_t *cvs;
32797c478bd9Sstevel@tonic-gate
32807c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE)
32817c478bd9Sstevel@tonic-gate return (EACCES);
32827c478bd9Sstevel@tonic-gate
32837c478bd9Sstevel@tonic-gate cvs = &cp->cpu_stats.vm;
32847c478bd9Sstevel@tonic-gate cvskd = ksp->ks_data;
32857c478bd9Sstevel@tonic-gate
32867c478bd9Sstevel@tonic-gate bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data,
32877c478bd9Sstevel@tonic-gate sizeof (cpu_vm_stats_ks_data_template));
32887c478bd9Sstevel@tonic-gate cvskd->pgrec.value.ui64 = cvs->pgrec;
32897c478bd9Sstevel@tonic-gate cvskd->pgfrec.value.ui64 = cvs->pgfrec;
32907c478bd9Sstevel@tonic-gate cvskd->pgin.value.ui64 = cvs->pgin;
32917c478bd9Sstevel@tonic-gate cvskd->pgpgin.value.ui64 = cvs->pgpgin;
32927c478bd9Sstevel@tonic-gate cvskd->pgout.value.ui64 = cvs->pgout;
32937c478bd9Sstevel@tonic-gate cvskd->pgpgout.value.ui64 = cvs->pgpgout;
32947c478bd9Sstevel@tonic-gate cvskd->swapin.value.ui64 = cvs->swapin;
32957c478bd9Sstevel@tonic-gate cvskd->pgswapin.value.ui64 = cvs->pgswapin;
32967c478bd9Sstevel@tonic-gate cvskd->swapout.value.ui64 = cvs->swapout;
32977c478bd9Sstevel@tonic-gate cvskd->pgswapout.value.ui64 = cvs->pgswapout;
32987c478bd9Sstevel@tonic-gate cvskd->zfod.value.ui64 = cvs->zfod;
32997c478bd9Sstevel@tonic-gate cvskd->dfree.value.ui64 = cvs->dfree;
33007c478bd9Sstevel@tonic-gate cvskd->scan.value.ui64 = cvs->scan;
33017c478bd9Sstevel@tonic-gate cvskd->rev.value.ui64 = cvs->rev;
33027c478bd9Sstevel@tonic-gate cvskd->hat_fault.value.ui64 = cvs->hat_fault;
33037c478bd9Sstevel@tonic-gate cvskd->as_fault.value.ui64 = cvs->as_fault;
33047c478bd9Sstevel@tonic-gate cvskd->maj_fault.value.ui64 = cvs->maj_fault;
33057c478bd9Sstevel@tonic-gate cvskd->cow_fault.value.ui64 = cvs->cow_fault;
33067c478bd9Sstevel@tonic-gate cvskd->prot_fault.value.ui64 = cvs->prot_fault;
33077c478bd9Sstevel@tonic-gate cvskd->softlock.value.ui64 = cvs->softlock;
33087c478bd9Sstevel@tonic-gate cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt;
33097c478bd9Sstevel@tonic-gate cvskd->pgrrun.value.ui64 = cvs->pgrrun;
33107c478bd9Sstevel@tonic-gate cvskd->execpgin.value.ui64 = cvs->execpgin;
33117c478bd9Sstevel@tonic-gate cvskd->execpgout.value.ui64 = cvs->execpgout;
33127c478bd9Sstevel@tonic-gate cvskd->execfree.value.ui64 = cvs->execfree;
33137c478bd9Sstevel@tonic-gate cvskd->anonpgin.value.ui64 = cvs->anonpgin;
33147c478bd9Sstevel@tonic-gate cvskd->anonpgout.value.ui64 = cvs->anonpgout;
33157c478bd9Sstevel@tonic-gate cvskd->anonfree.value.ui64 = cvs->anonfree;
33167c478bd9Sstevel@tonic-gate cvskd->fspgin.value.ui64 = cvs->fspgin;
33177c478bd9Sstevel@tonic-gate cvskd->fspgout.value.ui64 = cvs->fspgout;
33187c478bd9Sstevel@tonic-gate cvskd->fsfree.value.ui64 = cvs->fsfree;
33197c478bd9Sstevel@tonic-gate
33207c478bd9Sstevel@tonic-gate return (0);
33217c478bd9Sstevel@tonic-gate }
33227c478bd9Sstevel@tonic-gate
33237c478bd9Sstevel@tonic-gate static int
cpu_stat_ks_update(kstat_t * ksp,int rw)33247c478bd9Sstevel@tonic-gate cpu_stat_ks_update(kstat_t *ksp, int rw)
33257c478bd9Sstevel@tonic-gate {
33267c478bd9Sstevel@tonic-gate cpu_stat_t *cso;
33277c478bd9Sstevel@tonic-gate cpu_t *cp;
33287c478bd9Sstevel@tonic-gate int i;
3329eda89462Sesolom hrtime_t msnsecs[NCMSTATES];
33307c478bd9Sstevel@tonic-gate
33317c478bd9Sstevel@tonic-gate cso = (cpu_stat_t *)ksp->ks_data;
33327c478bd9Sstevel@tonic-gate cp = (cpu_t *)ksp->ks_private;
33337c478bd9Sstevel@tonic-gate
33347c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE)
33357c478bd9Sstevel@tonic-gate return (EACCES);
33367c478bd9Sstevel@tonic-gate
33377c478bd9Sstevel@tonic-gate /*
3338eda89462Sesolom * Read CPU mstate, but compare with the last values we
3339eda89462Sesolom * received to make sure that the returned kstats never
3340eda89462Sesolom * decrease.
33417c478bd9Sstevel@tonic-gate */
33427c478bd9Sstevel@tonic-gate
3343eda89462Sesolom get_cpu_mstate(cp, msnsecs);
3344eda89462Sesolom msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]);
3345eda89462Sesolom msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]);
3346eda89462Sesolom msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]);
3347eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE])
3348eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE];
3349eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER])
3350eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER];
3351eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM])
3352eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM];
33537c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.cpu[CPU_WAIT] = 0;
33547c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_IO] = 0;
33557c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_SWAP] = 0;
33567c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_PIO] = 0;
33577c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread);
33587c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite);
33597c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread);
33607c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite);
33617c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread);
33627c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite);
33637c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch);
33647c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap);
33657c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intr = 0;
33667c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++)
33677c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]);
33687c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall);
33697c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread);
33707c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite);
33717c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork);
33727c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork);
33737c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec);
33747c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch);
33757c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech);
33767c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint);
33777c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint);
33787c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint);
33797c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch);
33807c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch);
33817c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch);
33827c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg);
33837c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema);
33847c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei);
33857c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget);
33867c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk);
33877c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage);
33887c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage);
33897c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.inodeovf = 0;
33907c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.fileovf = 0;
33917c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf);
33927c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrthread = 0;
339386e8def1Sethindra for (i = 0; i < LOCK_LEVEL - 1; i++)
33947c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]);
33957c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk);
33967c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread);
33977c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch);
33987c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads);
33997c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate);
34007c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls);
34017c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters);
34027c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails);
34037c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails);
34047c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload);
34057c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload);
34067c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite);
34077c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_enters = 0;
34087c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_uo_cnt = 0;
34097c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_uu_cnt = 0;
34107c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_so_cnt = 0;
34117c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_su_cnt = 0;
34127c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_suo_cnt = 0;
34137c478bd9Sstevel@tonic-gate
3414ae115bc7Smrj cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait);
34157c478bd9Sstevel@tonic-gate cso->cpu_syswait.swap = 0;
34167c478bd9Sstevel@tonic-gate cso->cpu_syswait.physio = 0;
34177c478bd9Sstevel@tonic-gate
34187c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec);
34197c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec);
34207c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin);
34217c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin);
34227c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout);
34237c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout);
34247c478bd9Sstevel@tonic-gate cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin);
34257c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin);
34267c478bd9Sstevel@tonic-gate cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout);
34277c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout);
34287c478bd9Sstevel@tonic-gate cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod);
34297c478bd9Sstevel@tonic-gate cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree);
34307c478bd9Sstevel@tonic-gate cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan);
34317c478bd9Sstevel@tonic-gate cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev);
34327c478bd9Sstevel@tonic-gate cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault);
34337c478bd9Sstevel@tonic-gate cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault);
34347c478bd9Sstevel@tonic-gate cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault);
34357c478bd9Sstevel@tonic-gate cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault);
34367c478bd9Sstevel@tonic-gate cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault);
34377c478bd9Sstevel@tonic-gate cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock);
34387c478bd9Sstevel@tonic-gate cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt);
34397c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun);
34407c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin);
34417c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout);
34427c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree);
34437c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin);
34447c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout);
34457c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree);
34467c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin);
34477c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout);
34487c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree);
34497c478bd9Sstevel@tonic-gate
34507c478bd9Sstevel@tonic-gate return (0);
34517c478bd9Sstevel@tonic-gate }
3452