17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 589c0ae93Scindi * Common Development and Distribution License (the "License"). 689c0ae93Scindi * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22b52a336eSPavel Tatashin * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 231f9f06cfSMatthew Ahrens * Copyright (c) 2012 by Delphix. All rights reserved. 2489574a1fSPatrick Mooney * Copyright 2019 Joyent, Inc. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* 287c478bd9Sstevel@tonic-gate * Architecture-independent CPU control functions. 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #include <sys/types.h> 327c478bd9Sstevel@tonic-gate #include <sys/param.h> 337c478bd9Sstevel@tonic-gate #include <sys/var.h> 347c478bd9Sstevel@tonic-gate #include <sys/thread.h> 357c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 36fb2caebeSRandy Fishel #include <sys/cpu_event.h> 377c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 387c478bd9Sstevel@tonic-gate #include <sys/uadmin.h> 397c478bd9Sstevel@tonic-gate #include <sys/systm.h> 407c478bd9Sstevel@tonic-gate #include <sys/errno.h> 417c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 427c478bd9Sstevel@tonic-gate #include <sys/procset.h> 437c478bd9Sstevel@tonic-gate #include <sys/processor.h> 447c478bd9Sstevel@tonic-gate #include <sys/debug.h> 457c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 467c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 477c478bd9Sstevel@tonic-gate #include <sys/pset.h> 48fb2f18f8Sesaxe #include <sys/pghw.h> 497c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 507c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */ 517c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 527c478bd9Sstevel@tonic-gate #include <sys/callb.h> 537c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 547c478bd9Sstevel@tonic-gate #include <sys/cyclic.h> 557c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 567c478bd9Sstevel@tonic-gate #include <sys/nvpair.h> 577c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h> 587c478bd9Sstevel@tonic-gate #include <sys/msacct.h> 597c478bd9Sstevel@tonic-gate #include <sys/time.h> 607c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 610e751525SEric Saxe #include <sys/sdt.h> 62*c3377ee9SJohn Levon #include <sys/smt.h> 63575a7426Spt157919 #if defined(__x86) || defined(__amd64) 647aec1d6eScindi #include <sys/x86_archext.h> 657aec1d6eScindi #endif 6687a18d3fSMadhavan Venkataraman #include <sys/callo.h> 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate extern int mp_cpu_start(cpu_t *); 697c478bd9Sstevel@tonic-gate extern int mp_cpu_stop(cpu_t *); 707c478bd9Sstevel@tonic-gate extern int mp_cpu_poweron(cpu_t *); 717c478bd9Sstevel@tonic-gate extern int mp_cpu_poweroff(cpu_t *); 727c478bd9Sstevel@tonic-gate extern int mp_cpu_configure(int); 737c478bd9Sstevel@tonic-gate extern int mp_cpu_unconfigure(int); 747c478bd9Sstevel@tonic-gate extern void mp_cpu_faulted_enter(cpu_t *); 757c478bd9Sstevel@tonic-gate extern void mp_cpu_faulted_exit(cpu_t *); 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate extern int cmp_cpu_to_chip(processorid_t cpuid); 787c478bd9Sstevel@tonic-gate #ifdef __sparcv9 797c478bd9Sstevel@tonic-gate extern char *cpu_fru_fmri(cpu_t *cp); 807c478bd9Sstevel@tonic-gate #endif 817c478bd9Sstevel@tonic-gate 827c478bd9Sstevel@tonic-gate static void cpu_add_active_internal(cpu_t *cp); 837c478bd9Sstevel@tonic-gate static void cpu_remove_active(cpu_t *cp); 847c478bd9Sstevel@tonic-gate static void cpu_info_kstat_create(cpu_t *cp); 857c478bd9Sstevel@tonic-gate static void cpu_info_kstat_destroy(cpu_t *cp); 867c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_create(cpu_t *cp); 877c478bd9Sstevel@tonic-gate static void cpu_stats_kstat_destroy(cpu_t *cp); 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw); 907c478bd9Sstevel@tonic-gate static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw); 917c478bd9Sstevel@tonic-gate static int cpu_stat_ks_update(kstat_t *ksp, int rw); 927c478bd9Sstevel@tonic-gate static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t); 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate /* 957c478bd9Sstevel@tonic-gate * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active, 96b52a336eSPavel Tatashin * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with 97b52a336eSPavel Tatashin * respect to related locks is: 987c478bd9Sstevel@tonic-gate * 997c478bd9Sstevel@tonic-gate * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock() 1007c478bd9Sstevel@tonic-gate * 1017c478bd9Sstevel@tonic-gate * Warning: Certain sections of code do not use the cpu_lock when 1027c478bd9Sstevel@tonic-gate * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since 1037c478bd9Sstevel@tonic-gate * all cpus are paused during modifications to this list, a solution 1047c478bd9Sstevel@tonic-gate * to protect the list is too either disable kernel preemption while 1057c478bd9Sstevel@tonic-gate * walking the list, *or* recheck the cpu_next pointer at each 1067c478bd9Sstevel@tonic-gate * iteration in the loop. Note that in no cases can any cached 1077c478bd9Sstevel@tonic-gate * copies of the cpu pointers be kept as they may become invalid. 1087c478bd9Sstevel@tonic-gate */ 1097c478bd9Sstevel@tonic-gate kmutex_t cpu_lock; 1107c478bd9Sstevel@tonic-gate cpu_t *cpu_list; /* list of all CPUs */ 111c97ad5cdSakolb cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ 1127c478bd9Sstevel@tonic-gate cpu_t *cpu_active; /* list of active CPUs */ 11389574a1fSPatrick Mooney cpuset_t cpu_active_set; /* cached set of active CPUs */ 1147c478bd9Sstevel@tonic-gate static cpuset_t cpu_available; /* set of available CPUs */ 1157c478bd9Sstevel@tonic-gate cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ 1167c478bd9Sstevel@tonic-gate 1176890d023SEric Saxe cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */ 1186890d023SEric Saxe 1197c478bd9Sstevel@tonic-gate /* 1207c478bd9Sstevel@tonic-gate * max_ncpus keeps the max cpus the system can have. Initially 1217c478bd9Sstevel@tonic-gate * it's NCPU, but since most archs scan the devtree for cpus 1227c478bd9Sstevel@tonic-gate * fairly early on during boot, the real max can be known before 1237c478bd9Sstevel@tonic-gate * ncpus is set (useful for early NCPU based allocations). 1247c478bd9Sstevel@tonic-gate */ 1257c478bd9Sstevel@tonic-gate int max_ncpus = NCPU; 1267c478bd9Sstevel@tonic-gate /* 1277c478bd9Sstevel@tonic-gate * platforms that set max_ncpus to maxiumum number of cpus that can be 1287c478bd9Sstevel@tonic-gate * dynamically added will set boot_max_ncpus to the number of cpus found 1297c478bd9Sstevel@tonic-gate * at device tree scan time during boot. 1307c478bd9Sstevel@tonic-gate */ 1317c478bd9Sstevel@tonic-gate int boot_max_ncpus = -1; 13206fb6a36Sdv142724 int boot_ncpus = -1; 1337c478bd9Sstevel@tonic-gate /* 1347c478bd9Sstevel@tonic-gate * Maximum possible CPU id. This can never be >= NCPU since NCPU is 1357c478bd9Sstevel@tonic-gate * used to size arrays that are indexed by CPU id. 1367c478bd9Sstevel@tonic-gate */ 1377c478bd9Sstevel@tonic-gate processorid_t max_cpuid = NCPU - 1; 1387c478bd9Sstevel@tonic-gate 139b52a336eSPavel Tatashin /* 140b52a336eSPavel Tatashin * Maximum cpu_seqid was given. This number can only grow and never shrink. It 141b52a336eSPavel Tatashin * can be used to optimize NCPU loops to avoid going through CPUs which were 142b52a336eSPavel Tatashin * never on-line. 143b52a336eSPavel Tatashin */ 144b52a336eSPavel Tatashin processorid_t max_cpu_seqid_ever = 0; 145b52a336eSPavel Tatashin 1467c478bd9Sstevel@tonic-gate int ncpus = 1; 1477c478bd9Sstevel@tonic-gate int ncpus_online = 1; 148*c3377ee9SJohn Levon int ncpus_intr_enabled = 1; 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate /* 1517c478bd9Sstevel@tonic-gate * CPU that we're trying to offline. Protected by cpu_lock. 1527c478bd9Sstevel@tonic-gate */ 1537c478bd9Sstevel@tonic-gate cpu_t *cpu_inmotion; 1547c478bd9Sstevel@tonic-gate 1557c478bd9Sstevel@tonic-gate /* 1567c478bd9Sstevel@tonic-gate * Can be raised to suppress further weakbinding, which are instead 1577c478bd9Sstevel@tonic-gate * satisfied by disabling preemption. Must be raised/lowered under cpu_lock, 158fb2caebeSRandy Fishel * while individual thread weakbinding synchronization is done under thread 1597c478bd9Sstevel@tonic-gate * lock. 1607c478bd9Sstevel@tonic-gate */ 1617c478bd9Sstevel@tonic-gate int weakbindingbarrier; 1627c478bd9Sstevel@tonic-gate 1637c478bd9Sstevel@tonic-gate /* 1647c478bd9Sstevel@tonic-gate * Variables used in pause_cpus(). 1657c478bd9Sstevel@tonic-gate */ 1667c478bd9Sstevel@tonic-gate static volatile char safe_list[NCPU]; 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate static struct _cpu_pause_info { 1697c478bd9Sstevel@tonic-gate int cp_spl; /* spl saved in pause_cpus() */ 1707c478bd9Sstevel@tonic-gate volatile int cp_go; /* Go signal sent after all ready */ 1717c478bd9Sstevel@tonic-gate int cp_count; /* # of CPUs to pause */ 1727c478bd9Sstevel@tonic-gate ksema_t cp_sem; /* synch pause_cpus & cpu_pause */ 1737c478bd9Sstevel@tonic-gate kthread_id_t cp_paused; 1740ed5c46eSJosef 'Jeff' Sipek void *(*cp_func)(void *); 1757c478bd9Sstevel@tonic-gate } cpu_pause_info; 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate static kmutex_t pause_free_mutex; 1787c478bd9Sstevel@tonic-gate static kcondvar_t pause_free_cv; 1797c478bd9Sstevel@tonic-gate 1802df1fe9cSrandyf 1817c478bd9Sstevel@tonic-gate static struct cpu_sys_stats_ks_data { 1827c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_idle; 1837c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_user; 1847c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_kernel; 1857c478bd9Sstevel@tonic-gate kstat_named_t cpu_ticks_wait; 1867c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_idle; 1877c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_user; 1887c478bd9Sstevel@tonic-gate kstat_named_t cpu_nsec_kernel; 1891f9f06cfSMatthew Ahrens kstat_named_t cpu_nsec_dtrace; 1903aedfe0bSmishra kstat_named_t cpu_nsec_intr; 1913aedfe0bSmishra kstat_named_t cpu_load_intr; 1927c478bd9Sstevel@tonic-gate kstat_named_t wait_ticks_io; 1931f9f06cfSMatthew Ahrens kstat_named_t dtrace_probes; 1947c478bd9Sstevel@tonic-gate kstat_named_t bread; 1957c478bd9Sstevel@tonic-gate kstat_named_t bwrite; 1967c478bd9Sstevel@tonic-gate kstat_named_t lread; 1977c478bd9Sstevel@tonic-gate kstat_named_t lwrite; 1987c478bd9Sstevel@tonic-gate kstat_named_t phread; 1997c478bd9Sstevel@tonic-gate kstat_named_t phwrite; 2007c478bd9Sstevel@tonic-gate kstat_named_t pswitch; 2017c478bd9Sstevel@tonic-gate kstat_named_t trap; 2027c478bd9Sstevel@tonic-gate kstat_named_t intr; 2037c478bd9Sstevel@tonic-gate kstat_named_t syscall; 2047c478bd9Sstevel@tonic-gate kstat_named_t sysread; 2057c478bd9Sstevel@tonic-gate kstat_named_t syswrite; 2067c478bd9Sstevel@tonic-gate kstat_named_t sysfork; 2077c478bd9Sstevel@tonic-gate kstat_named_t sysvfork; 2087c478bd9Sstevel@tonic-gate kstat_named_t sysexec; 2097c478bd9Sstevel@tonic-gate kstat_named_t readch; 2107c478bd9Sstevel@tonic-gate kstat_named_t writech; 2117c478bd9Sstevel@tonic-gate kstat_named_t rcvint; 2127c478bd9Sstevel@tonic-gate kstat_named_t xmtint; 2137c478bd9Sstevel@tonic-gate kstat_named_t mdmint; 2147c478bd9Sstevel@tonic-gate kstat_named_t rawch; 2157c478bd9Sstevel@tonic-gate kstat_named_t canch; 2167c478bd9Sstevel@tonic-gate kstat_named_t outch; 2177c478bd9Sstevel@tonic-gate kstat_named_t msg; 2187c478bd9Sstevel@tonic-gate kstat_named_t sema; 2197c478bd9Sstevel@tonic-gate kstat_named_t namei; 2207c478bd9Sstevel@tonic-gate kstat_named_t ufsiget; 2217c478bd9Sstevel@tonic-gate kstat_named_t ufsdirblk; 2227c478bd9Sstevel@tonic-gate kstat_named_t ufsipage; 2237c478bd9Sstevel@tonic-gate kstat_named_t ufsinopage; 2247c478bd9Sstevel@tonic-gate kstat_named_t procovf; 2257c478bd9Sstevel@tonic-gate kstat_named_t intrthread; 2267c478bd9Sstevel@tonic-gate kstat_named_t intrblk; 2277c478bd9Sstevel@tonic-gate kstat_named_t intrunpin; 2287c478bd9Sstevel@tonic-gate kstat_named_t idlethread; 2297c478bd9Sstevel@tonic-gate kstat_named_t inv_swtch; 2307c478bd9Sstevel@tonic-gate kstat_named_t nthreads; 2317c478bd9Sstevel@tonic-gate kstat_named_t cpumigrate; 2327c478bd9Sstevel@tonic-gate kstat_named_t xcalls; 2337c478bd9Sstevel@tonic-gate kstat_named_t mutex_adenters; 2347c478bd9Sstevel@tonic-gate kstat_named_t rw_rdfails; 2357c478bd9Sstevel@tonic-gate kstat_named_t rw_wrfails; 2367c478bd9Sstevel@tonic-gate kstat_named_t modload; 2377c478bd9Sstevel@tonic-gate kstat_named_t modunload; 2387c478bd9Sstevel@tonic-gate kstat_named_t bawrite; 2397c478bd9Sstevel@tonic-gate kstat_named_t iowait; 2407c478bd9Sstevel@tonic-gate } cpu_sys_stats_ks_data_template = { 2417c478bd9Sstevel@tonic-gate { "cpu_ticks_idle", KSTAT_DATA_UINT64 }, 2427c478bd9Sstevel@tonic-gate { "cpu_ticks_user", KSTAT_DATA_UINT64 }, 2437c478bd9Sstevel@tonic-gate { "cpu_ticks_kernel", KSTAT_DATA_UINT64 }, 2447c478bd9Sstevel@tonic-gate { "cpu_ticks_wait", KSTAT_DATA_UINT64 }, 2457c478bd9Sstevel@tonic-gate { "cpu_nsec_idle", KSTAT_DATA_UINT64 }, 2467c478bd9Sstevel@tonic-gate { "cpu_nsec_user", KSTAT_DATA_UINT64 }, 2477c478bd9Sstevel@tonic-gate { "cpu_nsec_kernel", KSTAT_DATA_UINT64 }, 2481f9f06cfSMatthew Ahrens { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 }, 2493aedfe0bSmishra { "cpu_nsec_intr", KSTAT_DATA_UINT64 }, 2503aedfe0bSmishra { "cpu_load_intr", KSTAT_DATA_UINT64 }, 2517c478bd9Sstevel@tonic-gate { "wait_ticks_io", KSTAT_DATA_UINT64 }, 2521f9f06cfSMatthew Ahrens { "dtrace_probes", KSTAT_DATA_UINT64 }, 2537c478bd9Sstevel@tonic-gate { "bread", KSTAT_DATA_UINT64 }, 2547c478bd9Sstevel@tonic-gate { "bwrite", KSTAT_DATA_UINT64 }, 2557c478bd9Sstevel@tonic-gate { "lread", KSTAT_DATA_UINT64 }, 2567c478bd9Sstevel@tonic-gate { "lwrite", KSTAT_DATA_UINT64 }, 2577c478bd9Sstevel@tonic-gate { "phread", KSTAT_DATA_UINT64 }, 2587c478bd9Sstevel@tonic-gate { "phwrite", KSTAT_DATA_UINT64 }, 2597c478bd9Sstevel@tonic-gate { "pswitch", KSTAT_DATA_UINT64 }, 2607c478bd9Sstevel@tonic-gate { "trap", KSTAT_DATA_UINT64 }, 2617c478bd9Sstevel@tonic-gate { "intr", KSTAT_DATA_UINT64 }, 2627c478bd9Sstevel@tonic-gate { "syscall", KSTAT_DATA_UINT64 }, 2637c478bd9Sstevel@tonic-gate { "sysread", KSTAT_DATA_UINT64 }, 2647c478bd9Sstevel@tonic-gate { "syswrite", KSTAT_DATA_UINT64 }, 2657c478bd9Sstevel@tonic-gate { "sysfork", KSTAT_DATA_UINT64 }, 2667c478bd9Sstevel@tonic-gate { "sysvfork", KSTAT_DATA_UINT64 }, 2677c478bd9Sstevel@tonic-gate { "sysexec", KSTAT_DATA_UINT64 }, 2687c478bd9Sstevel@tonic-gate { "readch", KSTAT_DATA_UINT64 }, 2697c478bd9Sstevel@tonic-gate { "writech", KSTAT_DATA_UINT64 }, 2707c478bd9Sstevel@tonic-gate { "rcvint", KSTAT_DATA_UINT64 }, 2717c478bd9Sstevel@tonic-gate { "xmtint", KSTAT_DATA_UINT64 }, 2727c478bd9Sstevel@tonic-gate { "mdmint", KSTAT_DATA_UINT64 }, 2737c478bd9Sstevel@tonic-gate { "rawch", KSTAT_DATA_UINT64 }, 2747c478bd9Sstevel@tonic-gate { "canch", KSTAT_DATA_UINT64 }, 2757c478bd9Sstevel@tonic-gate { "outch", KSTAT_DATA_UINT64 }, 2767c478bd9Sstevel@tonic-gate { "msg", KSTAT_DATA_UINT64 }, 2777c478bd9Sstevel@tonic-gate { "sema", KSTAT_DATA_UINT64 }, 2787c478bd9Sstevel@tonic-gate { "namei", KSTAT_DATA_UINT64 }, 2797c478bd9Sstevel@tonic-gate { "ufsiget", KSTAT_DATA_UINT64 }, 2807c478bd9Sstevel@tonic-gate { "ufsdirblk", KSTAT_DATA_UINT64 }, 2817c478bd9Sstevel@tonic-gate { "ufsipage", KSTAT_DATA_UINT64 }, 2827c478bd9Sstevel@tonic-gate { "ufsinopage", KSTAT_DATA_UINT64 }, 2837c478bd9Sstevel@tonic-gate { "procovf", KSTAT_DATA_UINT64 }, 2847c478bd9Sstevel@tonic-gate { "intrthread", KSTAT_DATA_UINT64 }, 2857c478bd9Sstevel@tonic-gate { "intrblk", KSTAT_DATA_UINT64 }, 2867c478bd9Sstevel@tonic-gate { "intrunpin", KSTAT_DATA_UINT64 }, 2877c478bd9Sstevel@tonic-gate { "idlethread", KSTAT_DATA_UINT64 }, 2887c478bd9Sstevel@tonic-gate { "inv_swtch", KSTAT_DATA_UINT64 }, 2897c478bd9Sstevel@tonic-gate { "nthreads", KSTAT_DATA_UINT64 }, 2907c478bd9Sstevel@tonic-gate { "cpumigrate", KSTAT_DATA_UINT64 }, 2917c478bd9Sstevel@tonic-gate { "xcalls", KSTAT_DATA_UINT64 }, 2927c478bd9Sstevel@tonic-gate { "mutex_adenters", KSTAT_DATA_UINT64 }, 2937c478bd9Sstevel@tonic-gate { "rw_rdfails", KSTAT_DATA_UINT64 }, 2947c478bd9Sstevel@tonic-gate { "rw_wrfails", KSTAT_DATA_UINT64 }, 2957c478bd9Sstevel@tonic-gate { "modload", KSTAT_DATA_UINT64 }, 2967c478bd9Sstevel@tonic-gate { "modunload", KSTAT_DATA_UINT64 }, 2977c478bd9Sstevel@tonic-gate { "bawrite", KSTAT_DATA_UINT64 }, 2987c478bd9Sstevel@tonic-gate { "iowait", KSTAT_DATA_UINT64 }, 2997c478bd9Sstevel@tonic-gate }; 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate static struct cpu_vm_stats_ks_data { 3027c478bd9Sstevel@tonic-gate kstat_named_t pgrec; 3037c478bd9Sstevel@tonic-gate kstat_named_t pgfrec; 3047c478bd9Sstevel@tonic-gate kstat_named_t pgin; 3057c478bd9Sstevel@tonic-gate kstat_named_t pgpgin; 3067c478bd9Sstevel@tonic-gate kstat_named_t pgout; 3077c478bd9Sstevel@tonic-gate kstat_named_t pgpgout; 3087c478bd9Sstevel@tonic-gate kstat_named_t swapin; 3097c478bd9Sstevel@tonic-gate kstat_named_t pgswapin; 3107c478bd9Sstevel@tonic-gate kstat_named_t swapout; 3117c478bd9Sstevel@tonic-gate kstat_named_t pgswapout; 3127c478bd9Sstevel@tonic-gate kstat_named_t zfod; 3137c478bd9Sstevel@tonic-gate kstat_named_t dfree; 3147c478bd9Sstevel@tonic-gate kstat_named_t scan; 3157c478bd9Sstevel@tonic-gate kstat_named_t rev; 3167c478bd9Sstevel@tonic-gate kstat_named_t hat_fault; 3177c478bd9Sstevel@tonic-gate kstat_named_t as_fault; 3187c478bd9Sstevel@tonic-gate kstat_named_t maj_fault; 3197c478bd9Sstevel@tonic-gate kstat_named_t cow_fault; 3207c478bd9Sstevel@tonic-gate kstat_named_t prot_fault; 3217c478bd9Sstevel@tonic-gate kstat_named_t softlock; 3227c478bd9Sstevel@tonic-gate kstat_named_t kernel_asflt; 3237c478bd9Sstevel@tonic-gate kstat_named_t pgrrun; 3247c478bd9Sstevel@tonic-gate kstat_named_t execpgin; 3257c478bd9Sstevel@tonic-gate kstat_named_t execpgout; 3267c478bd9Sstevel@tonic-gate kstat_named_t execfree; 3277c478bd9Sstevel@tonic-gate kstat_named_t anonpgin; 3287c478bd9Sstevel@tonic-gate kstat_named_t anonpgout; 3297c478bd9Sstevel@tonic-gate kstat_named_t anonfree; 3307c478bd9Sstevel@tonic-gate kstat_named_t fspgin; 3317c478bd9Sstevel@tonic-gate kstat_named_t fspgout; 3327c478bd9Sstevel@tonic-gate kstat_named_t fsfree; 3337c478bd9Sstevel@tonic-gate } cpu_vm_stats_ks_data_template = { 3347c478bd9Sstevel@tonic-gate { "pgrec", KSTAT_DATA_UINT64 }, 3357c478bd9Sstevel@tonic-gate { "pgfrec", KSTAT_DATA_UINT64 }, 3367c478bd9Sstevel@tonic-gate { "pgin", KSTAT_DATA_UINT64 }, 3377c478bd9Sstevel@tonic-gate { "pgpgin", KSTAT_DATA_UINT64 }, 3387c478bd9Sstevel@tonic-gate { "pgout", KSTAT_DATA_UINT64 }, 3397c478bd9Sstevel@tonic-gate { "pgpgout", KSTAT_DATA_UINT64 }, 3407c478bd9Sstevel@tonic-gate { "swapin", KSTAT_DATA_UINT64 }, 3417c478bd9Sstevel@tonic-gate { "pgswapin", KSTAT_DATA_UINT64 }, 3427c478bd9Sstevel@tonic-gate { "swapout", KSTAT_DATA_UINT64 }, 3437c478bd9Sstevel@tonic-gate { "pgswapout", KSTAT_DATA_UINT64 }, 3447c478bd9Sstevel@tonic-gate { "zfod", KSTAT_DATA_UINT64 }, 3457c478bd9Sstevel@tonic-gate { "dfree", KSTAT_DATA_UINT64 }, 3467c478bd9Sstevel@tonic-gate { "scan", KSTAT_DATA_UINT64 }, 3477c478bd9Sstevel@tonic-gate { "rev", KSTAT_DATA_UINT64 }, 3487c478bd9Sstevel@tonic-gate { "hat_fault", KSTAT_DATA_UINT64 }, 3497c478bd9Sstevel@tonic-gate { "as_fault", KSTAT_DATA_UINT64 }, 3507c478bd9Sstevel@tonic-gate { "maj_fault", KSTAT_DATA_UINT64 }, 3517c478bd9Sstevel@tonic-gate { "cow_fault", KSTAT_DATA_UINT64 }, 3527c478bd9Sstevel@tonic-gate { "prot_fault", KSTAT_DATA_UINT64 }, 3537c478bd9Sstevel@tonic-gate { "softlock", KSTAT_DATA_UINT64 }, 3547c478bd9Sstevel@tonic-gate { "kernel_asflt", KSTAT_DATA_UINT64 }, 3557c478bd9Sstevel@tonic-gate { "pgrrun", KSTAT_DATA_UINT64 }, 3567c478bd9Sstevel@tonic-gate { "execpgin", KSTAT_DATA_UINT64 }, 3577c478bd9Sstevel@tonic-gate { "execpgout", KSTAT_DATA_UINT64 }, 3587c478bd9Sstevel@tonic-gate { "execfree", KSTAT_DATA_UINT64 }, 3597c478bd9Sstevel@tonic-gate { "anonpgin", KSTAT_DATA_UINT64 }, 3607c478bd9Sstevel@tonic-gate { "anonpgout", KSTAT_DATA_UINT64 }, 3617c478bd9Sstevel@tonic-gate { "anonfree", KSTAT_DATA_UINT64 }, 3627c478bd9Sstevel@tonic-gate { "fspgin", KSTAT_DATA_UINT64 }, 3637c478bd9Sstevel@tonic-gate { "fspgout", KSTAT_DATA_UINT64 }, 3647c478bd9Sstevel@tonic-gate { "fsfree", KSTAT_DATA_UINT64 }, 3657c478bd9Sstevel@tonic-gate }; 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate /* 3687c478bd9Sstevel@tonic-gate * Force the specified thread to migrate to the appropriate processor. 3697c478bd9Sstevel@tonic-gate * Called with thread lock held, returns with it dropped. 3707c478bd9Sstevel@tonic-gate */ 3717c478bd9Sstevel@tonic-gate static void 3727c478bd9Sstevel@tonic-gate force_thread_migrate(kthread_id_t tp) 3737c478bd9Sstevel@tonic-gate { 3747c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 3757c478bd9Sstevel@tonic-gate if (tp == curthread) { 3767c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 3777c478bd9Sstevel@tonic-gate CL_SETRUN(tp); 3787c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp); 3797c478bd9Sstevel@tonic-gate swtch(); 3807c478bd9Sstevel@tonic-gate } else { 3817c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC) { 3827c478bd9Sstevel@tonic-gate cpu_surrender(tp); 3837c478bd9Sstevel@tonic-gate } else if (tp->t_state == TS_RUN) { 3847c478bd9Sstevel@tonic-gate (void) dispdeq(tp); 3857c478bd9Sstevel@tonic-gate setbackdq(tp); 3867c478bd9Sstevel@tonic-gate } 3877c478bd9Sstevel@tonic-gate thread_unlock(tp); 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate /* 3927c478bd9Sstevel@tonic-gate * Set affinity for a specified CPU. 393cfbda967SPatrick Mooney * 394cfbda967SPatrick Mooney * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for 395cfbda967SPatrick Mooney * curthread, will set affinity to the CPU on which the thread is currently 396cfbda967SPatrick Mooney * running. For other cpu_id values, the caller must ensure that the 397cfbda967SPatrick Mooney * referenced CPU remains valid, which can be done by holding cpu_lock across 398cfbda967SPatrick Mooney * this call. 399cfbda967SPatrick Mooney * 400cfbda967SPatrick Mooney * CPU affinity is guaranteed after return of thread_affinity_set(). If a 401cfbda967SPatrick Mooney * caller setting affinity to CPU_CURRENT requires that its thread not migrate 402cfbda967SPatrick Mooney * CPUs prior to a successful return, it should take extra precautions (such as 403cfbda967SPatrick Mooney * their own call to kpreempt_disable) to ensure that safety. 404cfbda967SPatrick Mooney * 405455e370cSJohn Levon * CPU_BEST can be used to pick a "best" CPU to migrate to, including 406455e370cSJohn Levon * potentially the current CPU. 407455e370cSJohn Levon * 408cfbda967SPatrick Mooney * A CPU affinity reference count is maintained by thread_affinity_set and 409cfbda967SPatrick Mooney * thread_affinity_clear (incrementing and decrementing it, respectively), 410cfbda967SPatrick Mooney * maintaining CPU affinity while the count is non-zero, and allowing regions 411cfbda967SPatrick Mooney * of code which require affinity to be nested. 4127c478bd9Sstevel@tonic-gate */ 4137c478bd9Sstevel@tonic-gate void 4147c478bd9Sstevel@tonic-gate thread_affinity_set(kthread_id_t t, int cpu_id) 4157c478bd9Sstevel@tonic-gate { 4167c478bd9Sstevel@tonic-gate cpu_t *cp; 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); 4197c478bd9Sstevel@tonic-gate 420cfbda967SPatrick Mooney if (cpu_id == CPU_CURRENT) { 421cfbda967SPatrick Mooney VERIFY3P(t, ==, curthread); 422cfbda967SPatrick Mooney kpreempt_disable(); 423cfbda967SPatrick Mooney cp = CPU; 424455e370cSJohn Levon } else if (cpu_id == CPU_BEST) { 425455e370cSJohn Levon VERIFY3P(t, ==, curthread); 426455e370cSJohn Levon kpreempt_disable(); 427455e370cSJohn Levon cp = disp_choose_best_cpu(); 428cfbda967SPatrick Mooney } else { 4297c478bd9Sstevel@tonic-gate /* 4307c478bd9Sstevel@tonic-gate * We should be asserting that cpu_lock is held here, but 4317c478bd9Sstevel@tonic-gate * the NCA code doesn't acquire it. The following assert 4327c478bd9Sstevel@tonic-gate * should be uncommented when the NCA code is fixed. 4337c478bd9Sstevel@tonic-gate * 4347c478bd9Sstevel@tonic-gate * ASSERT(MUTEX_HELD(&cpu_lock)); 4357c478bd9Sstevel@tonic-gate */ 436cfbda967SPatrick Mooney VERIFY((cpu_id >= 0) && (cpu_id < NCPU)); 4377c478bd9Sstevel@tonic-gate cp = cpu[cpu_id]; 438cfbda967SPatrick Mooney 439cfbda967SPatrick Mooney /* user must provide a good cpu_id */ 440cfbda967SPatrick Mooney VERIFY(cp != NULL); 441cfbda967SPatrick Mooney } 442cfbda967SPatrick Mooney 4437c478bd9Sstevel@tonic-gate /* 4447c478bd9Sstevel@tonic-gate * If there is already a hard affinity requested, and this affinity 4457c478bd9Sstevel@tonic-gate * conflicts with that, panic. 4467c478bd9Sstevel@tonic-gate */ 4477c478bd9Sstevel@tonic-gate thread_lock(t); 4487c478bd9Sstevel@tonic-gate if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) { 4497c478bd9Sstevel@tonic-gate panic("affinity_set: setting %p but already bound to %p", 4507c478bd9Sstevel@tonic-gate (void *)cp, (void *)t->t_bound_cpu); 4517c478bd9Sstevel@tonic-gate } 4527c478bd9Sstevel@tonic-gate t->t_affinitycnt++; 4537c478bd9Sstevel@tonic-gate t->t_bound_cpu = cp; 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * Make sure we're running on the right CPU. 4577c478bd9Sstevel@tonic-gate */ 4587c478bd9Sstevel@tonic-gate if (cp != t->t_cpu || t != curthread) { 459cfbda967SPatrick Mooney ASSERT(cpu_id != CPU_CURRENT); 4607c478bd9Sstevel@tonic-gate force_thread_migrate(t); /* drops thread lock */ 4617c478bd9Sstevel@tonic-gate } else { 4627c478bd9Sstevel@tonic-gate thread_unlock(t); 4637c478bd9Sstevel@tonic-gate } 4647c478bd9Sstevel@tonic-gate 465455e370cSJohn Levon if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST) 466cfbda967SPatrick Mooney kpreempt_enable(); 467cfbda967SPatrick Mooney } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* 4707c478bd9Sstevel@tonic-gate * Wrapper for backward compatibility. 4717c478bd9Sstevel@tonic-gate */ 4727c478bd9Sstevel@tonic-gate void 4737c478bd9Sstevel@tonic-gate affinity_set(int cpu_id) 4747c478bd9Sstevel@tonic-gate { 4757c478bd9Sstevel@tonic-gate thread_affinity_set(curthread, cpu_id); 4767c478bd9Sstevel@tonic-gate } 4777c478bd9Sstevel@tonic-gate 4787c478bd9Sstevel@tonic-gate /* 4797c478bd9Sstevel@tonic-gate * Decrement the affinity reservation count and if it becomes zero, 4807c478bd9Sstevel@tonic-gate * clear the CPU affinity for the current thread, or set it to the user's 4817c478bd9Sstevel@tonic-gate * software binding request. 4827c478bd9Sstevel@tonic-gate */ 4837c478bd9Sstevel@tonic-gate void 4847c478bd9Sstevel@tonic-gate thread_affinity_clear(kthread_id_t t) 4857c478bd9Sstevel@tonic-gate { 4867c478bd9Sstevel@tonic-gate register processorid_t binding; 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate thread_lock(t); 4897c478bd9Sstevel@tonic-gate if (--t->t_affinitycnt == 0) { 4907c478bd9Sstevel@tonic-gate if ((binding = t->t_bind_cpu) == PBIND_NONE) { 4917c478bd9Sstevel@tonic-gate /* 4927c478bd9Sstevel@tonic-gate * Adjust disp_max_unbound_pri if necessary. 4937c478bd9Sstevel@tonic-gate */ 4947c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(t); 4957c478bd9Sstevel@tonic-gate t->t_bound_cpu = NULL; 4967c478bd9Sstevel@tonic-gate if (t->t_cpu->cpu_part != t->t_cpupart) { 4977c478bd9Sstevel@tonic-gate force_thread_migrate(t); 4987c478bd9Sstevel@tonic-gate return; 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate } else { 5017c478bd9Sstevel@tonic-gate t->t_bound_cpu = cpu[binding]; 5027c478bd9Sstevel@tonic-gate /* 5037c478bd9Sstevel@tonic-gate * Make sure the thread is running on the bound CPU. 5047c478bd9Sstevel@tonic-gate */ 5057c478bd9Sstevel@tonic-gate if (t->t_cpu != t->t_bound_cpu) { 5067c478bd9Sstevel@tonic-gate force_thread_migrate(t); 5077c478bd9Sstevel@tonic-gate return; /* already dropped lock */ 5087c478bd9Sstevel@tonic-gate } 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate } 5117c478bd9Sstevel@tonic-gate thread_unlock(t); 5127c478bd9Sstevel@tonic-gate } 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate /* 5157c478bd9Sstevel@tonic-gate * Wrapper for backward compatibility. 5167c478bd9Sstevel@tonic-gate */ 5177c478bd9Sstevel@tonic-gate void 5187c478bd9Sstevel@tonic-gate affinity_clear(void) 5197c478bd9Sstevel@tonic-gate { 5207c478bd9Sstevel@tonic-gate thread_affinity_clear(curthread); 5217c478bd9Sstevel@tonic-gate } 5227c478bd9Sstevel@tonic-gate 5237c478bd9Sstevel@tonic-gate /* 5247c478bd9Sstevel@tonic-gate * Weak cpu affinity. Bind to the "current" cpu for short periods 5257c478bd9Sstevel@tonic-gate * of time during which the thread must not block (but may be preempted). 5267c478bd9Sstevel@tonic-gate * Use this instead of kpreempt_disable() when it is only "no migration" 5277c478bd9Sstevel@tonic-gate * rather than "no preemption" semantics that are required - disabling 5287c478bd9Sstevel@tonic-gate * preemption holds higher priority threads off of cpu and if the 5297c478bd9Sstevel@tonic-gate * operation that is protected is more than momentary this is not good 5307c478bd9Sstevel@tonic-gate * for realtime etc. 5317c478bd9Sstevel@tonic-gate * 5327c478bd9Sstevel@tonic-gate * Weakly bound threads will not prevent a cpu from being offlined - 5337c478bd9Sstevel@tonic-gate * we'll only run them on the cpu to which they are weakly bound but 5347c478bd9Sstevel@tonic-gate * (because they do not block) we'll always be able to move them on to 5357c478bd9Sstevel@tonic-gate * another cpu at offline time if we give them just a short moment to 5367c478bd9Sstevel@tonic-gate * run during which they will unbind. To give a cpu a chance of offlining, 5377c478bd9Sstevel@tonic-gate * however, we require a barrier to weak bindings that may be raised for a 5387c478bd9Sstevel@tonic-gate * given cpu (offline/move code may set this and then wait a short time for 5397c478bd9Sstevel@tonic-gate * existing weak bindings to drop); the cpu_inmotion pointer is that barrier. 5407c478bd9Sstevel@tonic-gate * 5417c478bd9Sstevel@tonic-gate * There are few restrictions on the calling context of thread_nomigrate. 5427c478bd9Sstevel@tonic-gate * The caller must not hold the thread lock. Calls may be nested. 5437c478bd9Sstevel@tonic-gate * 5447c478bd9Sstevel@tonic-gate * After weakbinding a thread must not perform actions that may block. 5457c478bd9Sstevel@tonic-gate * In particular it must not call thread_affinity_set; calling that when 5467c478bd9Sstevel@tonic-gate * already weakbound is nonsensical anyway. 5477c478bd9Sstevel@tonic-gate * 5487c478bd9Sstevel@tonic-gate * If curthread is prevented from migrating for other reasons 5497c478bd9Sstevel@tonic-gate * (kernel preemption disabled; high pil; strongly bound; interrupt thread) 5507c478bd9Sstevel@tonic-gate * then the weak binding will succeed even if this cpu is the target of an 5517c478bd9Sstevel@tonic-gate * offline/move request. 5527c478bd9Sstevel@tonic-gate */ 5537c478bd9Sstevel@tonic-gate void 5547c478bd9Sstevel@tonic-gate thread_nomigrate(void) 5557c478bd9Sstevel@tonic-gate { 5567c478bd9Sstevel@tonic-gate cpu_t *cp; 5577c478bd9Sstevel@tonic-gate kthread_id_t t = curthread; 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate again: 5607c478bd9Sstevel@tonic-gate kpreempt_disable(); 5617c478bd9Sstevel@tonic-gate cp = CPU; 5627c478bd9Sstevel@tonic-gate 5637c478bd9Sstevel@tonic-gate /* 5647c478bd9Sstevel@tonic-gate * A highlevel interrupt must not modify t_nomigrate or 5657c478bd9Sstevel@tonic-gate * t_weakbound_cpu of the thread it has interrupted. A lowlevel 5667c478bd9Sstevel@tonic-gate * interrupt thread cannot migrate and we can avoid the 5677c478bd9Sstevel@tonic-gate * thread_lock call below by short-circuiting here. In either 5687c478bd9Sstevel@tonic-gate * case we can just return since no migration is possible and 5697c478bd9Sstevel@tonic-gate * the condition will persist (ie, when we test for these again 5707c478bd9Sstevel@tonic-gate * in thread_allowmigrate they can't have changed). Migration 5717c478bd9Sstevel@tonic-gate * is also impossible if we're at or above DISP_LEVEL pil. 5727c478bd9Sstevel@tonic-gate */ 5737c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD || 5747c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL) { 5757c478bd9Sstevel@tonic-gate kpreempt_enable(); 5767c478bd9Sstevel@tonic-gate return; 5777c478bd9Sstevel@tonic-gate } 5787c478bd9Sstevel@tonic-gate 5797c478bd9Sstevel@tonic-gate /* 5807c478bd9Sstevel@tonic-gate * We must be consistent with existing weak bindings. Since we 5817c478bd9Sstevel@tonic-gate * may be interrupted between the increment of t_nomigrate and 5827c478bd9Sstevel@tonic-gate * the store to t_weakbound_cpu below we cannot assume that 5837c478bd9Sstevel@tonic-gate * t_weakbound_cpu will be set if t_nomigrate is. Note that we 5847c478bd9Sstevel@tonic-gate * cannot assert t_weakbound_cpu == t_bind_cpu since that is not 5857c478bd9Sstevel@tonic-gate * always the case. 5867c478bd9Sstevel@tonic-gate */ 5877c478bd9Sstevel@tonic-gate if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) { 5887c478bd9Sstevel@tonic-gate if (!panicstr) 5897c478bd9Sstevel@tonic-gate panic("thread_nomigrate: binding to %p but already " 5907c478bd9Sstevel@tonic-gate "bound to %p", (void *)cp, 5917c478bd9Sstevel@tonic-gate (void *)t->t_weakbound_cpu); 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate /* 5957c478bd9Sstevel@tonic-gate * At this point we have preemption disabled and we don't yet hold 5967c478bd9Sstevel@tonic-gate * the thread lock. So it's possible that somebody else could 5977c478bd9Sstevel@tonic-gate * set t_bind_cpu here and not be able to force us across to the 5987c478bd9Sstevel@tonic-gate * new cpu (since we have preemption disabled). 5997c478bd9Sstevel@tonic-gate */ 6007c478bd9Sstevel@tonic-gate thread_lock(curthread); 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate /* 6037c478bd9Sstevel@tonic-gate * If further weak bindings are being (temporarily) suppressed then 6047c478bd9Sstevel@tonic-gate * we'll settle for disabling kernel preemption (which assures 6057c478bd9Sstevel@tonic-gate * no migration provided the thread does not block which it is 6067c478bd9Sstevel@tonic-gate * not allowed to if using thread_nomigrate). We must remember 6077c478bd9Sstevel@tonic-gate * this disposition so we can take appropriate action in 6087c478bd9Sstevel@tonic-gate * thread_allowmigrate. If this is a nested call and the 6097c478bd9Sstevel@tonic-gate * thread is already weakbound then fall through as normal. 6107c478bd9Sstevel@tonic-gate * We remember the decision to settle for kpreempt_disable through 6117c478bd9Sstevel@tonic-gate * negative nesting counting in t_nomigrate. Once a thread has had one 6127c478bd9Sstevel@tonic-gate * weakbinding request satisfied in this way any further (nested) 6137c478bd9Sstevel@tonic-gate * requests will continue to be satisfied in the same way, 6147c478bd9Sstevel@tonic-gate * even if weak bindings have recommenced. 6157c478bd9Sstevel@tonic-gate */ 6167c478bd9Sstevel@tonic-gate if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) { 6177c478bd9Sstevel@tonic-gate --t->t_nomigrate; 6187c478bd9Sstevel@tonic-gate thread_unlock(curthread); 6197c478bd9Sstevel@tonic-gate return; /* with kpreempt_disable still active */ 6207c478bd9Sstevel@tonic-gate } 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate /* 6237c478bd9Sstevel@tonic-gate * We hold thread_lock so t_bind_cpu cannot change. We could, 6247c478bd9Sstevel@tonic-gate * however, be running on a different cpu to which we are t_bound_cpu 6257c478bd9Sstevel@tonic-gate * to (as explained above). If we grant the weak binding request 6267c478bd9Sstevel@tonic-gate * in that case then the dispatcher must favour our weak binding 6277c478bd9Sstevel@tonic-gate * over our strong (in which case, just as when preemption is 6287c478bd9Sstevel@tonic-gate * disabled, we can continue to run on a cpu other than the one to 6297c478bd9Sstevel@tonic-gate * which we are strongbound; the difference in this case is that 6307c478bd9Sstevel@tonic-gate * this thread can be preempted and so can appear on the dispatch 6317c478bd9Sstevel@tonic-gate * queues of a cpu other than the one it is strongbound to). 6327c478bd9Sstevel@tonic-gate * 6337c478bd9Sstevel@tonic-gate * If the cpu we are running on does not appear to be a current 6347c478bd9Sstevel@tonic-gate * offline target (we check cpu_inmotion to determine this - since 6357c478bd9Sstevel@tonic-gate * we don't hold cpu_lock we may not see a recent store to that, 6367c478bd9Sstevel@tonic-gate * so it's possible that we at times can grant a weak binding to a 6377c478bd9Sstevel@tonic-gate * cpu that is an offline target, but that one request will not 6387c478bd9Sstevel@tonic-gate * prevent the offline from succeeding) then we will always grant 6397c478bd9Sstevel@tonic-gate * the weak binding request. This includes the case above where 6407c478bd9Sstevel@tonic-gate * we grant a weakbinding not commensurate with our strong binding. 6417c478bd9Sstevel@tonic-gate * 6427c478bd9Sstevel@tonic-gate * If our cpu does appear to be an offline target then we're inclined 6437c478bd9Sstevel@tonic-gate * not to grant the weakbinding request just yet - we'd prefer to 6447c478bd9Sstevel@tonic-gate * migrate to another cpu and grant the request there. The 6457c478bd9Sstevel@tonic-gate * exceptions are those cases where going through preemption code 6467c478bd9Sstevel@tonic-gate * will not result in us changing cpu: 6477c478bd9Sstevel@tonic-gate * 6487c478bd9Sstevel@tonic-gate * . interrupts have already bypassed this case (see above) 6497c478bd9Sstevel@tonic-gate * . we are already weakbound to this cpu (dispatcher code will 6507c478bd9Sstevel@tonic-gate * always return us to the weakbound cpu) 6517c478bd9Sstevel@tonic-gate * . preemption was disabled even before we disabled it above 6527c478bd9Sstevel@tonic-gate * . we are strongbound to this cpu (if we're strongbound to 6537c478bd9Sstevel@tonic-gate * another and not yet running there the trip through the 6547c478bd9Sstevel@tonic-gate * dispatcher will move us to the strongbound cpu and we 6557c478bd9Sstevel@tonic-gate * will grant the weak binding there) 6567c478bd9Sstevel@tonic-gate */ 6577c478bd9Sstevel@tonic-gate if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 || 6587c478bd9Sstevel@tonic-gate t->t_bound_cpu == cp) { 6597c478bd9Sstevel@tonic-gate /* 6607c478bd9Sstevel@tonic-gate * Don't be tempted to store to t_weakbound_cpu only on 6617c478bd9Sstevel@tonic-gate * the first nested bind request - if we're interrupted 6627c478bd9Sstevel@tonic-gate * after the increment of t_nomigrate and before the 6637c478bd9Sstevel@tonic-gate * store to t_weakbound_cpu and the interrupt calls 6647c478bd9Sstevel@tonic-gate * thread_nomigrate then the assertion in thread_allowmigrate 6657c478bd9Sstevel@tonic-gate * would fail. 6667c478bd9Sstevel@tonic-gate */ 6677c478bd9Sstevel@tonic-gate t->t_nomigrate++; 6687c478bd9Sstevel@tonic-gate t->t_weakbound_cpu = cp; 6697c478bd9Sstevel@tonic-gate membar_producer(); 6707c478bd9Sstevel@tonic-gate thread_unlock(curthread); 6717c478bd9Sstevel@tonic-gate /* 6727c478bd9Sstevel@tonic-gate * Now that we have dropped the thread_lock another thread 6737c478bd9Sstevel@tonic-gate * can set our t_weakbound_cpu, and will try to migrate us 6747c478bd9Sstevel@tonic-gate * to the strongbound cpu (which will not be prevented by 6757c478bd9Sstevel@tonic-gate * preemption being disabled since we're about to enable 6767c478bd9Sstevel@tonic-gate * preemption). We have granted the weakbinding to the current 6777c478bd9Sstevel@tonic-gate * cpu, so again we are in the position that is is is possible 6787c478bd9Sstevel@tonic-gate * that our weak and strong bindings differ. Again this 6797c478bd9Sstevel@tonic-gate * is catered for by dispatcher code which will favour our 6807c478bd9Sstevel@tonic-gate * weak binding. 6817c478bd9Sstevel@tonic-gate */ 6827c478bd9Sstevel@tonic-gate kpreempt_enable(); 6837c478bd9Sstevel@tonic-gate } else { 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * Move to another cpu before granting the request by 6867c478bd9Sstevel@tonic-gate * forcing this thread through preemption code. When we 6877c478bd9Sstevel@tonic-gate * get to set{front,back}dq called from CL_PREEMPT() 6887c478bd9Sstevel@tonic-gate * cpu_choose() will be used to select a cpu to queue 6897c478bd9Sstevel@tonic-gate * us on - that will see cpu_inmotion and take 6907c478bd9Sstevel@tonic-gate * steps to avoid returning us to this cpu. 6917c478bd9Sstevel@tonic-gate */ 6927c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1; 6937c478bd9Sstevel@tonic-gate thread_unlock(curthread); 6947c478bd9Sstevel@tonic-gate kpreempt_enable(); /* will call preempt() */ 6957c478bd9Sstevel@tonic-gate goto again; 6967c478bd9Sstevel@tonic-gate } 6977c478bd9Sstevel@tonic-gate } 6987c478bd9Sstevel@tonic-gate 6997c478bd9Sstevel@tonic-gate void 7007c478bd9Sstevel@tonic-gate thread_allowmigrate(void) 7017c478bd9Sstevel@tonic-gate { 7027c478bd9Sstevel@tonic-gate kthread_id_t t = curthread; 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate ASSERT(t->t_weakbound_cpu == CPU || 7057c478bd9Sstevel@tonic-gate (t->t_nomigrate < 0 && t->t_preempt > 0) || 7067c478bd9Sstevel@tonic-gate CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD || 7077c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL); 7087c478bd9Sstevel@tonic-gate 7097c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) || 7107c478bd9Sstevel@tonic-gate getpil() >= DISP_LEVEL) 7117c478bd9Sstevel@tonic-gate return; 7127c478bd9Sstevel@tonic-gate 7137c478bd9Sstevel@tonic-gate if (t->t_nomigrate < 0) { 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * This thread was granted "weak binding" in the 7167c478bd9Sstevel@tonic-gate * stronger form of kernel preemption disabling. 7177c478bd9Sstevel@tonic-gate * Undo a level of nesting for both t_nomigrate 7187c478bd9Sstevel@tonic-gate * and t_preempt. 7197c478bd9Sstevel@tonic-gate */ 7207c478bd9Sstevel@tonic-gate ++t->t_nomigrate; 7217c478bd9Sstevel@tonic-gate kpreempt_enable(); 7227c478bd9Sstevel@tonic-gate } else if (--t->t_nomigrate == 0) { 7237c478bd9Sstevel@tonic-gate /* 7247c478bd9Sstevel@tonic-gate * Time to drop the weak binding. We need to cater 7257c478bd9Sstevel@tonic-gate * for the case where we're weakbound to a different 7267c478bd9Sstevel@tonic-gate * cpu than that to which we're strongbound (a very 7277c478bd9Sstevel@tonic-gate * temporary arrangement that must only persist until 7287c478bd9Sstevel@tonic-gate * weak binding drops). We don't acquire thread_lock 7297c478bd9Sstevel@tonic-gate * here so even as this code executes t_bound_cpu 7307c478bd9Sstevel@tonic-gate * may be changing. So we disable preemption and 7317c478bd9Sstevel@tonic-gate * a) in the case that t_bound_cpu changes while we 7327c478bd9Sstevel@tonic-gate * have preemption disabled kprunrun will be set 7337c478bd9Sstevel@tonic-gate * asynchronously, and b) if before disabling 7347c478bd9Sstevel@tonic-gate * preemption we were already on a different cpu to 7357c478bd9Sstevel@tonic-gate * our t_bound_cpu then we set kprunrun ourselves 7367c478bd9Sstevel@tonic-gate * to force a trip through the dispatcher when 7377c478bd9Sstevel@tonic-gate * preemption is enabled. 7387c478bd9Sstevel@tonic-gate */ 7397c478bd9Sstevel@tonic-gate kpreempt_disable(); 7407c478bd9Sstevel@tonic-gate if (t->t_bound_cpu && 7417c478bd9Sstevel@tonic-gate t->t_weakbound_cpu != t->t_bound_cpu) 7427c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 1; 7437c478bd9Sstevel@tonic-gate t->t_weakbound_cpu = NULL; 7447c478bd9Sstevel@tonic-gate membar_producer(); 7457c478bd9Sstevel@tonic-gate kpreempt_enable(); 7467c478bd9Sstevel@tonic-gate } 7477c478bd9Sstevel@tonic-gate } 7487c478bd9Sstevel@tonic-gate 7497c478bd9Sstevel@tonic-gate /* 7507c478bd9Sstevel@tonic-gate * weakbinding_stop can be used to temporarily cause weakbindings made 7517c478bd9Sstevel@tonic-gate * with thread_nomigrate to be satisfied through the stronger action of 7527c478bd9Sstevel@tonic-gate * kpreempt_disable. weakbinding_start recommences normal weakbinding. 7537c478bd9Sstevel@tonic-gate */ 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate void 7567c478bd9Sstevel@tonic-gate weakbinding_stop(void) 7577c478bd9Sstevel@tonic-gate { 7587c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 7597c478bd9Sstevel@tonic-gate weakbindingbarrier = 1; 7607c478bd9Sstevel@tonic-gate membar_producer(); /* make visible before subsequent thread_lock */ 7617c478bd9Sstevel@tonic-gate } 7627c478bd9Sstevel@tonic-gate 7637c478bd9Sstevel@tonic-gate void 7647c478bd9Sstevel@tonic-gate weakbinding_start(void) 7657c478bd9Sstevel@tonic-gate { 7667c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 7677c478bd9Sstevel@tonic-gate weakbindingbarrier = 0; 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 770575a7426Spt157919 void 771575a7426Spt157919 null_xcall(void) 772575a7426Spt157919 { 773575a7426Spt157919 } 774575a7426Spt157919 7757c478bd9Sstevel@tonic-gate /* 7767c478bd9Sstevel@tonic-gate * This routine is called to place the CPUs in a safe place so that 7777c478bd9Sstevel@tonic-gate * one of them can be taken off line or placed on line. What we are 7787c478bd9Sstevel@tonic-gate * trying to do here is prevent a thread from traversing the list 7797c478bd9Sstevel@tonic-gate * of active CPUs while we are changing it or from getting placed on 7807c478bd9Sstevel@tonic-gate * the run queue of a CPU that has just gone off line. We do this by 7817c478bd9Sstevel@tonic-gate * creating a thread with the highest possible prio for each CPU and 7827c478bd9Sstevel@tonic-gate * having it call this routine. The advantage of this method is that 7837c478bd9Sstevel@tonic-gate * we can eliminate all checks for CPU_ACTIVE in the disp routines. 7847c478bd9Sstevel@tonic-gate * This makes disp faster at the expense of making p_online() slower 7857c478bd9Sstevel@tonic-gate * which is a good trade off. 7867c478bd9Sstevel@tonic-gate */ 7877c478bd9Sstevel@tonic-gate static void 7882df1fe9cSrandyf cpu_pause(int index) 7897c478bd9Sstevel@tonic-gate { 7907c478bd9Sstevel@tonic-gate int s; 7917c478bd9Sstevel@tonic-gate struct _cpu_pause_info *cpi = &cpu_pause_info; 7922df1fe9cSrandyf volatile char *safe = &safe_list[index]; 7932df1fe9cSrandyf long lindex = index; 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE)); 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate while (*safe != PAUSE_DIE) { 7987c478bd9Sstevel@tonic-gate *safe = PAUSE_READY; 7997c478bd9Sstevel@tonic-gate membar_enter(); /* make sure stores are flushed */ 8007c478bd9Sstevel@tonic-gate sema_v(&cpi->cp_sem); /* signal requesting thread */ 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate /* 8037c478bd9Sstevel@tonic-gate * Wait here until all pause threads are running. That 8047c478bd9Sstevel@tonic-gate * indicates that it's safe to do the spl. Until 8057c478bd9Sstevel@tonic-gate * cpu_pause_info.cp_go is set, we don't want to spl 8067c478bd9Sstevel@tonic-gate * because that might block clock interrupts needed 8077c478bd9Sstevel@tonic-gate * to preempt threads on other CPUs. 8087c478bd9Sstevel@tonic-gate */ 8097c478bd9Sstevel@tonic-gate while (cpi->cp_go == 0) 8107c478bd9Sstevel@tonic-gate ; 8117c478bd9Sstevel@tonic-gate /* 8127c478bd9Sstevel@tonic-gate * Even though we are at the highest disp prio, we need 8137c478bd9Sstevel@tonic-gate * to block out all interrupts below LOCK_LEVEL so that 8147c478bd9Sstevel@tonic-gate * an intr doesn't come in, wake up a thread, and call 8157c478bd9Sstevel@tonic-gate * setbackdq/setfrontdq. 8167c478bd9Sstevel@tonic-gate */ 8177c478bd9Sstevel@tonic-gate s = splhigh(); 8182df1fe9cSrandyf /* 8190ed5c46eSJosef 'Jeff' Sipek * if cp_func has been set then call it using index as the 8200ed5c46eSJosef 'Jeff' Sipek * argument, currently only used by cpr_suspend_cpus(). 8210ed5c46eSJosef 'Jeff' Sipek * This function is used as the code to execute on the 8220ed5c46eSJosef 'Jeff' Sipek * "paused" cpu's when a machine comes out of a sleep state 8230ed5c46eSJosef 'Jeff' Sipek * and CPU's were powered off. (could also be used for 8240ed5c46eSJosef 'Jeff' Sipek * hotplugging CPU's). 8252df1fe9cSrandyf */ 8260ed5c46eSJosef 'Jeff' Sipek if (cpi->cp_func != NULL) 8270ed5c46eSJosef 'Jeff' Sipek (*cpi->cp_func)((void *)lindex); 8287c478bd9Sstevel@tonic-gate 829ae115bc7Smrj mach_cpu_pause(safe); 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate splx(s); 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * Waiting is at an end. Switch out of cpu_pause 8347c478bd9Sstevel@tonic-gate * loop and resume useful work. 8357c478bd9Sstevel@tonic-gate */ 8367c478bd9Sstevel@tonic-gate swtch(); 8377c478bd9Sstevel@tonic-gate } 8387c478bd9Sstevel@tonic-gate 8397c478bd9Sstevel@tonic-gate mutex_enter(&pause_free_mutex); 8407c478bd9Sstevel@tonic-gate *safe = PAUSE_DEAD; 8417c478bd9Sstevel@tonic-gate cv_broadcast(&pause_free_cv); 8427c478bd9Sstevel@tonic-gate mutex_exit(&pause_free_mutex); 8437c478bd9Sstevel@tonic-gate } 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate /* 8467c478bd9Sstevel@tonic-gate * Allow the cpus to start running again. 8477c478bd9Sstevel@tonic-gate */ 8487c478bd9Sstevel@tonic-gate void 8497c478bd9Sstevel@tonic-gate start_cpus() 8507c478bd9Sstevel@tonic-gate { 8517c478bd9Sstevel@tonic-gate int i; 8527c478bd9Sstevel@tonic-gate 8537c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 8547c478bd9Sstevel@tonic-gate ASSERT(cpu_pause_info.cp_paused); 8557c478bd9Sstevel@tonic-gate cpu_pause_info.cp_paused = NULL; 8567c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) 8577c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_IDLE; 8587c478bd9Sstevel@tonic-gate membar_enter(); /* make sure stores are flushed */ 8597c478bd9Sstevel@tonic-gate affinity_clear(); 8607c478bd9Sstevel@tonic-gate splx(cpu_pause_info.cp_spl); 8617c478bd9Sstevel@tonic-gate kpreempt_enable(); 8627c478bd9Sstevel@tonic-gate } 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate /* 8657c478bd9Sstevel@tonic-gate * Allocate a pause thread for a CPU. 8667c478bd9Sstevel@tonic-gate */ 8677c478bd9Sstevel@tonic-gate static void 8687c478bd9Sstevel@tonic-gate cpu_pause_alloc(cpu_t *cp) 8697c478bd9Sstevel@tonic-gate { 8707c478bd9Sstevel@tonic-gate kthread_id_t t; 8712df1fe9cSrandyf long cpun = cp->cpu_id; 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate /* 8747c478bd9Sstevel@tonic-gate * Note, v.v_nglobpris will not change value as long as I hold 8757c478bd9Sstevel@tonic-gate * cpu_lock. 8767c478bd9Sstevel@tonic-gate */ 8772df1fe9cSrandyf t = thread_create(NULL, 0, cpu_pause, (void *)cpun, 8787c478bd9Sstevel@tonic-gate 0, &p0, TS_STOPPED, v.v_nglobpris - 1); 8797c478bd9Sstevel@tonic-gate thread_lock(t); 8807c478bd9Sstevel@tonic-gate t->t_bound_cpu = cp; 8817c478bd9Sstevel@tonic-gate t->t_disp_queue = cp->cpu_disp; 8827c478bd9Sstevel@tonic-gate t->t_affinitycnt = 1; 8837c478bd9Sstevel@tonic-gate t->t_preempt = 1; 8847c478bd9Sstevel@tonic-gate thread_unlock(t); 8857c478bd9Sstevel@tonic-gate cp->cpu_pause_thread = t; 8867c478bd9Sstevel@tonic-gate /* 8877c478bd9Sstevel@tonic-gate * Registering a thread in the callback table is usually done 8887c478bd9Sstevel@tonic-gate * in the initialization code of the thread. In this 8897c478bd9Sstevel@tonic-gate * case, we do it right after thread creation because the 8907c478bd9Sstevel@tonic-gate * thread itself may never run, and we need to register the 8917c478bd9Sstevel@tonic-gate * fact that it is safe for cpr suspend. 8927c478bd9Sstevel@tonic-gate */ 8937c478bd9Sstevel@tonic-gate CALLB_CPR_INIT_SAFE(t, "cpu_pause"); 8947c478bd9Sstevel@tonic-gate } 8957c478bd9Sstevel@tonic-gate 8967c478bd9Sstevel@tonic-gate /* 8977c478bd9Sstevel@tonic-gate * Free a pause thread for a CPU. 8987c478bd9Sstevel@tonic-gate */ 8997c478bd9Sstevel@tonic-gate static void 9007c478bd9Sstevel@tonic-gate cpu_pause_free(cpu_t *cp) 9017c478bd9Sstevel@tonic-gate { 9027c478bd9Sstevel@tonic-gate kthread_id_t t; 9037c478bd9Sstevel@tonic-gate int cpun = cp->cpu_id; 9047c478bd9Sstevel@tonic-gate 9057c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 9067c478bd9Sstevel@tonic-gate /* 90748bbca81SDaniel Hoffman * We have to get the thread and tell it to die. 9087c478bd9Sstevel@tonic-gate */ 9097c478bd9Sstevel@tonic-gate if ((t = cp->cpu_pause_thread) == NULL) { 9107c478bd9Sstevel@tonic-gate ASSERT(safe_list[cpun] == PAUSE_IDLE); 9117c478bd9Sstevel@tonic-gate return; 9127c478bd9Sstevel@tonic-gate } 9137c478bd9Sstevel@tonic-gate thread_lock(t); 9147c478bd9Sstevel@tonic-gate t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */ 9157c478bd9Sstevel@tonic-gate t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */ 9167c478bd9Sstevel@tonic-gate t->t_pri = v.v_nglobpris - 1; 9177c478bd9Sstevel@tonic-gate ASSERT(safe_list[cpun] == PAUSE_IDLE); 9187c478bd9Sstevel@tonic-gate safe_list[cpun] = PAUSE_DIE; 9197c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 9207c478bd9Sstevel@tonic-gate setbackdq(t); 9217c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* 9247c478bd9Sstevel@tonic-gate * If we don't wait for the thread to actually die, it may try to 9257c478bd9Sstevel@tonic-gate * run on the wrong cpu as part of an actual call to pause_cpus(). 9267c478bd9Sstevel@tonic-gate */ 9277c478bd9Sstevel@tonic-gate mutex_enter(&pause_free_mutex); 9287c478bd9Sstevel@tonic-gate while (safe_list[cpun] != PAUSE_DEAD) { 9297c478bd9Sstevel@tonic-gate cv_wait(&pause_free_cv, &pause_free_mutex); 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate mutex_exit(&pause_free_mutex); 9327c478bd9Sstevel@tonic-gate safe_list[cpun] = PAUSE_IDLE; 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate cp->cpu_pause_thread = NULL; 9357c478bd9Sstevel@tonic-gate } 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate /* 9387c478bd9Sstevel@tonic-gate * Initialize basic structures for pausing CPUs. 9397c478bd9Sstevel@tonic-gate */ 9407c478bd9Sstevel@tonic-gate void 9417c478bd9Sstevel@tonic-gate cpu_pause_init() 9427c478bd9Sstevel@tonic-gate { 9437c478bd9Sstevel@tonic-gate sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL); 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * Create initial CPU pause thread. 9467c478bd9Sstevel@tonic-gate */ 9477c478bd9Sstevel@tonic-gate cpu_pause_alloc(CPU); 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate /* 9517c478bd9Sstevel@tonic-gate * Start the threads used to pause another CPU. 9527c478bd9Sstevel@tonic-gate */ 9537c478bd9Sstevel@tonic-gate static int 9547c478bd9Sstevel@tonic-gate cpu_pause_start(processorid_t cpu_id) 9557c478bd9Sstevel@tonic-gate { 9567c478bd9Sstevel@tonic-gate int i; 9577c478bd9Sstevel@tonic-gate int cpu_count = 0; 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 9607c478bd9Sstevel@tonic-gate cpu_t *cp; 9617c478bd9Sstevel@tonic-gate kthread_id_t t; 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate cp = cpu[i]; 9647c478bd9Sstevel@tonic-gate if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) { 9657c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_WAIT; 9667c478bd9Sstevel@tonic-gate continue; 9677c478bd9Sstevel@tonic-gate } 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate /* 9707c478bd9Sstevel@tonic-gate * Skip CPU if it is quiesced or not yet started. 9717c478bd9Sstevel@tonic-gate */ 9727c478bd9Sstevel@tonic-gate if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) { 9737c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_WAIT; 9747c478bd9Sstevel@tonic-gate continue; 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate 9777c478bd9Sstevel@tonic-gate /* 9787c478bd9Sstevel@tonic-gate * Start this CPU's pause thread. 9797c478bd9Sstevel@tonic-gate */ 9807c478bd9Sstevel@tonic-gate t = cp->cpu_pause_thread; 9817c478bd9Sstevel@tonic-gate thread_lock(t); 9827c478bd9Sstevel@tonic-gate /* 9837c478bd9Sstevel@tonic-gate * Reset the priority, since nglobpris may have 9847c478bd9Sstevel@tonic-gate * changed since the thread was created, if someone 9857c478bd9Sstevel@tonic-gate * has loaded the RT (or some other) scheduling 9867c478bd9Sstevel@tonic-gate * class. 9877c478bd9Sstevel@tonic-gate */ 9887c478bd9Sstevel@tonic-gate t->t_pri = v.v_nglobpris - 1; 9897c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 9907c478bd9Sstevel@tonic-gate setbackdq(t); 9917c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 9927c478bd9Sstevel@tonic-gate ++cpu_count; 9937c478bd9Sstevel@tonic-gate } 9947c478bd9Sstevel@tonic-gate return (cpu_count); 9957c478bd9Sstevel@tonic-gate } 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate /* 9997c478bd9Sstevel@tonic-gate * Pause all of the CPUs except the one we are on by creating a high 10007c478bd9Sstevel@tonic-gate * priority thread bound to those CPUs. 10017c478bd9Sstevel@tonic-gate * 10027c478bd9Sstevel@tonic-gate * Note that one must be extremely careful regarding code 10037c478bd9Sstevel@tonic-gate * executed while CPUs are paused. Since a CPU may be paused 10047c478bd9Sstevel@tonic-gate * while a thread scheduling on that CPU is holding an adaptive 10057c478bd9Sstevel@tonic-gate * lock, code executed with CPUs paused must not acquire adaptive 10067c478bd9Sstevel@tonic-gate * (or low-level spin) locks. Also, such code must not block, 10077c478bd9Sstevel@tonic-gate * since the thread that is supposed to initiate the wakeup may 10087c478bd9Sstevel@tonic-gate * never run. 10097c478bd9Sstevel@tonic-gate * 10107c478bd9Sstevel@tonic-gate * With a few exceptions, the restrictions on code executed with CPUs 10117c478bd9Sstevel@tonic-gate * paused match those for code executed at high-level interrupt 10127c478bd9Sstevel@tonic-gate * context. 10137c478bd9Sstevel@tonic-gate */ 10147c478bd9Sstevel@tonic-gate void 10150ed5c46eSJosef 'Jeff' Sipek pause_cpus(cpu_t *off_cp, void *(*func)(void *)) 10167c478bd9Sstevel@tonic-gate { 10177c478bd9Sstevel@tonic-gate processorid_t cpu_id; 10187c478bd9Sstevel@tonic-gate int i; 10197c478bd9Sstevel@tonic-gate struct _cpu_pause_info *cpi = &cpu_pause_info; 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 10227c478bd9Sstevel@tonic-gate ASSERT(cpi->cp_paused == NULL); 10237c478bd9Sstevel@tonic-gate cpi->cp_count = 0; 10247c478bd9Sstevel@tonic-gate cpi->cp_go = 0; 10257c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) 10267c478bd9Sstevel@tonic-gate safe_list[i] = PAUSE_IDLE; 10277c478bd9Sstevel@tonic-gate kpreempt_disable(); 10287c478bd9Sstevel@tonic-gate 10290ed5c46eSJosef 'Jeff' Sipek cpi->cp_func = func; 10300ed5c46eSJosef 'Jeff' Sipek 10317c478bd9Sstevel@tonic-gate /* 10327c478bd9Sstevel@tonic-gate * If running on the cpu that is going offline, get off it. 10337c478bd9Sstevel@tonic-gate * This is so that it won't be necessary to rechoose a CPU 10347c478bd9Sstevel@tonic-gate * when done. 10357c478bd9Sstevel@tonic-gate */ 10367c478bd9Sstevel@tonic-gate if (CPU == off_cp) 10377c478bd9Sstevel@tonic-gate cpu_id = off_cp->cpu_next_part->cpu_id; 10387c478bd9Sstevel@tonic-gate else 10397c478bd9Sstevel@tonic-gate cpu_id = CPU->cpu_id; 10407c478bd9Sstevel@tonic-gate affinity_set(cpu_id); 10417c478bd9Sstevel@tonic-gate 10427c478bd9Sstevel@tonic-gate /* 10437c478bd9Sstevel@tonic-gate * Start the pause threads and record how many were started 10447c478bd9Sstevel@tonic-gate */ 10457c478bd9Sstevel@tonic-gate cpi->cp_count = cpu_pause_start(cpu_id); 10467c478bd9Sstevel@tonic-gate 10477c478bd9Sstevel@tonic-gate /* 10487c478bd9Sstevel@tonic-gate * Now wait for all CPUs to be running the pause thread. 10497c478bd9Sstevel@tonic-gate */ 10507c478bd9Sstevel@tonic-gate while (cpi->cp_count > 0) { 10517c478bd9Sstevel@tonic-gate /* 10527c478bd9Sstevel@tonic-gate * Spin reading the count without grabbing the disp 10537c478bd9Sstevel@tonic-gate * lock to make sure we don't prevent the pause 10547c478bd9Sstevel@tonic-gate * threads from getting the lock. 10557c478bd9Sstevel@tonic-gate */ 10567c478bd9Sstevel@tonic-gate while (sema_held(&cpi->cp_sem)) 10577c478bd9Sstevel@tonic-gate ; 10587c478bd9Sstevel@tonic-gate if (sema_tryp(&cpi->cp_sem)) 10597c478bd9Sstevel@tonic-gate --cpi->cp_count; 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate cpi->cp_go = 1; /* all have reached cpu_pause */ 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate /* 10647c478bd9Sstevel@tonic-gate * Now wait for all CPUs to spl. (Transition from PAUSE_READY 10657c478bd9Sstevel@tonic-gate * to PAUSE_WAIT.) 10667c478bd9Sstevel@tonic-gate */ 10677c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 10687c478bd9Sstevel@tonic-gate while (safe_list[i] != PAUSE_WAIT) 10697c478bd9Sstevel@tonic-gate ; 10707c478bd9Sstevel@tonic-gate } 10717c478bd9Sstevel@tonic-gate cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */ 10727c478bd9Sstevel@tonic-gate cpi->cp_paused = curthread; 10737c478bd9Sstevel@tonic-gate } 10747c478bd9Sstevel@tonic-gate 10757c478bd9Sstevel@tonic-gate /* 10767c478bd9Sstevel@tonic-gate * Check whether the current thread has CPUs paused 10777c478bd9Sstevel@tonic-gate */ 10787c478bd9Sstevel@tonic-gate int 10797c478bd9Sstevel@tonic-gate cpus_paused(void) 10807c478bd9Sstevel@tonic-gate { 10817c478bd9Sstevel@tonic-gate if (cpu_pause_info.cp_paused != NULL) { 10827c478bd9Sstevel@tonic-gate ASSERT(cpu_pause_info.cp_paused == curthread); 10837c478bd9Sstevel@tonic-gate return (1); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate return (0); 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate 10887c478bd9Sstevel@tonic-gate static cpu_t * 10897c478bd9Sstevel@tonic-gate cpu_get_all(processorid_t cpun) 10907c478bd9Sstevel@tonic-gate { 10917c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun)) 10947c478bd9Sstevel@tonic-gate return (NULL); 10957c478bd9Sstevel@tonic-gate return (cpu[cpun]); 10967c478bd9Sstevel@tonic-gate } 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate /* 10997c478bd9Sstevel@tonic-gate * Check whether cpun is a valid processor id and whether it should be 11007c478bd9Sstevel@tonic-gate * visible from the current zone. If it is, return a pointer to the 11017c478bd9Sstevel@tonic-gate * associated CPU structure. 11027c478bd9Sstevel@tonic-gate */ 11037c478bd9Sstevel@tonic-gate cpu_t * 11047c478bd9Sstevel@tonic-gate cpu_get(processorid_t cpun) 11057c478bd9Sstevel@tonic-gate { 11067c478bd9Sstevel@tonic-gate cpu_t *c; 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11097c478bd9Sstevel@tonic-gate c = cpu_get_all(cpun); 11107c478bd9Sstevel@tonic-gate if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() && 11117c478bd9Sstevel@tonic-gate zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c)) 11127c478bd9Sstevel@tonic-gate return (NULL); 11137c478bd9Sstevel@tonic-gate return (c); 11147c478bd9Sstevel@tonic-gate } 11157c478bd9Sstevel@tonic-gate 11167c478bd9Sstevel@tonic-gate /* 11177c478bd9Sstevel@tonic-gate * The following functions should be used to check CPU states in the kernel. 11187c478bd9Sstevel@tonic-gate * They should be invoked with cpu_lock held. Kernel subsystems interested 11197c478bd9Sstevel@tonic-gate * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc 11207c478bd9Sstevel@tonic-gate * states. Those are for user-land (and system call) use only. 11217c478bd9Sstevel@tonic-gate */ 11227c478bd9Sstevel@tonic-gate 11237c478bd9Sstevel@tonic-gate /* 11247c478bd9Sstevel@tonic-gate * Determine whether the CPU is online and handling interrupts. 11257c478bd9Sstevel@tonic-gate */ 11267c478bd9Sstevel@tonic-gate int 11277c478bd9Sstevel@tonic-gate cpu_is_online(cpu_t *cpu) 11287c478bd9Sstevel@tonic-gate { 11297c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11307c478bd9Sstevel@tonic-gate return (cpu_flagged_online(cpu->cpu_flags)); 11317c478bd9Sstevel@tonic-gate } 11327c478bd9Sstevel@tonic-gate 11337c478bd9Sstevel@tonic-gate /* 11347c478bd9Sstevel@tonic-gate * Determine whether the CPU is offline (this includes spare and faulted). 11357c478bd9Sstevel@tonic-gate */ 11367c478bd9Sstevel@tonic-gate int 11377c478bd9Sstevel@tonic-gate cpu_is_offline(cpu_t *cpu) 11387c478bd9Sstevel@tonic-gate { 11397c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11407c478bd9Sstevel@tonic-gate return (cpu_flagged_offline(cpu->cpu_flags)); 11417c478bd9Sstevel@tonic-gate } 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate /* 11447c478bd9Sstevel@tonic-gate * Determine whether the CPU is powered off. 11457c478bd9Sstevel@tonic-gate */ 11467c478bd9Sstevel@tonic-gate int 11477c478bd9Sstevel@tonic-gate cpu_is_poweredoff(cpu_t *cpu) 11487c478bd9Sstevel@tonic-gate { 11497c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11507c478bd9Sstevel@tonic-gate return (cpu_flagged_poweredoff(cpu->cpu_flags)); 11517c478bd9Sstevel@tonic-gate } 11527c478bd9Sstevel@tonic-gate 11537c478bd9Sstevel@tonic-gate /* 11547c478bd9Sstevel@tonic-gate * Determine whether the CPU is handling interrupts. 11557c478bd9Sstevel@tonic-gate */ 11567c478bd9Sstevel@tonic-gate int 11577c478bd9Sstevel@tonic-gate cpu_is_nointr(cpu_t *cpu) 11587c478bd9Sstevel@tonic-gate { 11597c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11607c478bd9Sstevel@tonic-gate return (cpu_flagged_nointr(cpu->cpu_flags)); 11617c478bd9Sstevel@tonic-gate } 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate /* 11647c478bd9Sstevel@tonic-gate * Determine whether the CPU is active (scheduling threads). 11657c478bd9Sstevel@tonic-gate */ 11667c478bd9Sstevel@tonic-gate int 11677c478bd9Sstevel@tonic-gate cpu_is_active(cpu_t *cpu) 11687c478bd9Sstevel@tonic-gate { 11697c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 11707c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu->cpu_flags)); 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate 11737c478bd9Sstevel@tonic-gate /* 11747c478bd9Sstevel@tonic-gate * Same as above, but these require cpu_flags instead of cpu_t pointers. 11757c478bd9Sstevel@tonic-gate */ 11767c478bd9Sstevel@tonic-gate int 11777c478bd9Sstevel@tonic-gate cpu_flagged_online(cpu_flag_t cpu_flags) 11787c478bd9Sstevel@tonic-gate { 11797c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu_flags) && 11807c478bd9Sstevel@tonic-gate (cpu_flags & CPU_ENABLE)); 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate int 11847c478bd9Sstevel@tonic-gate cpu_flagged_offline(cpu_flag_t cpu_flags) 11857c478bd9Sstevel@tonic-gate { 11867c478bd9Sstevel@tonic-gate return (((cpu_flags & CPU_POWEROFF) == 0) && 11877c478bd9Sstevel@tonic-gate ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)); 11887c478bd9Sstevel@tonic-gate } 11897c478bd9Sstevel@tonic-gate 11907c478bd9Sstevel@tonic-gate int 11917c478bd9Sstevel@tonic-gate cpu_flagged_poweredoff(cpu_flag_t cpu_flags) 11927c478bd9Sstevel@tonic-gate { 11937c478bd9Sstevel@tonic-gate return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF); 11947c478bd9Sstevel@tonic-gate } 11957c478bd9Sstevel@tonic-gate 11967c478bd9Sstevel@tonic-gate int 11977c478bd9Sstevel@tonic-gate cpu_flagged_nointr(cpu_flag_t cpu_flags) 11987c478bd9Sstevel@tonic-gate { 11997c478bd9Sstevel@tonic-gate return (cpu_flagged_active(cpu_flags) && 12007c478bd9Sstevel@tonic-gate (cpu_flags & CPU_ENABLE) == 0); 12017c478bd9Sstevel@tonic-gate } 12027c478bd9Sstevel@tonic-gate 12037c478bd9Sstevel@tonic-gate int 12047c478bd9Sstevel@tonic-gate cpu_flagged_active(cpu_flag_t cpu_flags) 12057c478bd9Sstevel@tonic-gate { 12067c478bd9Sstevel@tonic-gate return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) && 12077c478bd9Sstevel@tonic-gate ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY)); 12087c478bd9Sstevel@tonic-gate } 12097c478bd9Sstevel@tonic-gate 12107c478bd9Sstevel@tonic-gate /* 12117c478bd9Sstevel@tonic-gate * Bring the indicated CPU online. 12127c478bd9Sstevel@tonic-gate */ 12137c478bd9Sstevel@tonic-gate int 1214*c3377ee9SJohn Levon cpu_online(cpu_t *cp, int flags) 12157c478bd9Sstevel@tonic-gate { 12167c478bd9Sstevel@tonic-gate int error = 0; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * Handle on-line request. 12207c478bd9Sstevel@tonic-gate * This code must put the new CPU on the active list before 12217c478bd9Sstevel@tonic-gate * starting it because it will not be paused, and will start 12227c478bd9Sstevel@tonic-gate * using the active list immediately. The real start occurs 12237c478bd9Sstevel@tonic-gate * when the CPU_QUIESCED flag is turned off. 12247c478bd9Sstevel@tonic-gate */ 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 12277c478bd9Sstevel@tonic-gate 1228*c3377ee9SJohn Levon if ((cp->cpu_flags & CPU_DISABLED) && !smt_can_enable(cp, flags)) 1229*c3377ee9SJohn Levon return (EINVAL); 1230*c3377ee9SJohn Levon 12317c478bd9Sstevel@tonic-gate /* 12327c478bd9Sstevel@tonic-gate * Put all the cpus into a known safe place. 12337c478bd9Sstevel@tonic-gate * No mutexes can be entered while CPUs are paused. 12347c478bd9Sstevel@tonic-gate */ 12357c478bd9Sstevel@tonic-gate error = mp_cpu_start(cp); /* arch-dep hook */ 12367c478bd9Sstevel@tonic-gate if (error == 0) { 1237e196c24bSesaxe pg_cpupart_in(cp, cp->cpu_part); 12380ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 12397c478bd9Sstevel@tonic-gate cpu_add_active_internal(cp); 12407c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) { 12417c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED; 12427c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp); 12437c478bd9Sstevel@tonic-gate } 1244*c3377ee9SJohn Levon 1245*c3377ee9SJohn Levon if (cp->cpu_flags & CPU_DISABLED) 1246*c3377ee9SJohn Levon smt_force_enabled(); 1247*c3377ee9SJohn Levon 12487c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN | 1249*c3377ee9SJohn Levon CPU_SPARE | CPU_DISABLED); 1250b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp); 12517c478bd9Sstevel@tonic-gate start_cpus(); 12527c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp); 12537c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp); 12547c478bd9Sstevel@tonic-gate lgrp_kstat_create(cp); 12557c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_ON); 12567c478bd9Sstevel@tonic-gate cpu_intr_enable(cp); /* arch-dep hook */ 1257b885580bSAlexander Kolbasov cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1258ae115bc7Smrj cpu_set_state(cp); 12597c478bd9Sstevel@tonic-gate cyclic_online(cp); 126087a18d3fSMadhavan Venkataraman /* 126187a18d3fSMadhavan Venkataraman * This has to be called only after cyclic_online(). This 126287a18d3fSMadhavan Venkataraman * function uses cyclics. 126387a18d3fSMadhavan Venkataraman */ 126487a18d3fSMadhavan Venkataraman callout_cpu_online(cp); 12657c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id); 12667c478bd9Sstevel@tonic-gate } 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate return (error); 12697c478bd9Sstevel@tonic-gate } 12707c478bd9Sstevel@tonic-gate 12717c478bd9Sstevel@tonic-gate /* 12727c478bd9Sstevel@tonic-gate * Take the indicated CPU offline. 12737c478bd9Sstevel@tonic-gate */ 12747c478bd9Sstevel@tonic-gate int 12757c478bd9Sstevel@tonic-gate cpu_offline(cpu_t *cp, int flags) 12767c478bd9Sstevel@tonic-gate { 12777c478bd9Sstevel@tonic-gate cpupart_t *pp; 12787c478bd9Sstevel@tonic-gate int error = 0; 12797c478bd9Sstevel@tonic-gate cpu_t *ncp; 12807c478bd9Sstevel@tonic-gate int intr_enable; 12817c478bd9Sstevel@tonic-gate int cyclic_off = 0; 1282454ab202SMadhavan Venkataraman int callout_off = 0; 12837c478bd9Sstevel@tonic-gate int loop_count; 12847c478bd9Sstevel@tonic-gate int no_quiesce = 0; 12857c478bd9Sstevel@tonic-gate int (*bound_func)(struct cpu *, int); 12867c478bd9Sstevel@tonic-gate kthread_t *t; 12877c478bd9Sstevel@tonic-gate lpl_t *cpu_lpl; 12887c478bd9Sstevel@tonic-gate proc_t *p; 12897c478bd9Sstevel@tonic-gate int lgrp_diff_lpl; 1290*c3377ee9SJohn Levon boolean_t forced = (flags & CPU_FORCED) != 0; 12917c478bd9Sstevel@tonic-gate 12927c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 12937c478bd9Sstevel@tonic-gate 1294*c3377ee9SJohn Levon if (cp->cpu_flags & CPU_DISABLED) 1295*c3377ee9SJohn Levon return (EINVAL); 1296*c3377ee9SJohn Levon 12977c478bd9Sstevel@tonic-gate /* 12987c478bd9Sstevel@tonic-gate * If we're going from faulted or spare to offline, just 12997c478bd9Sstevel@tonic-gate * clear these flags and update CPU state. 13007c478bd9Sstevel@tonic-gate */ 13017c478bd9Sstevel@tonic-gate if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) { 13027c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) { 13037c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED; 13047c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp); 13057c478bd9Sstevel@tonic-gate } 13067c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_SPARE; 13077c478bd9Sstevel@tonic-gate cpu_set_state(cp); 13087c478bd9Sstevel@tonic-gate return (0); 13097c478bd9Sstevel@tonic-gate } 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate /* 13127c478bd9Sstevel@tonic-gate * Handle off-line request. 13137c478bd9Sstevel@tonic-gate */ 13147c478bd9Sstevel@tonic-gate pp = cp->cpu_part; 13157c478bd9Sstevel@tonic-gate /* 13167c478bd9Sstevel@tonic-gate * Don't offline last online CPU in partition 13177c478bd9Sstevel@tonic-gate */ 13187c478bd9Sstevel@tonic-gate if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2) 13197c478bd9Sstevel@tonic-gate return (EBUSY); 13207c478bd9Sstevel@tonic-gate /* 13210b70c467Sakolb * Unbind all soft-bound threads bound to our CPU and hard bound threads 13220b70c467Sakolb * if we were asked to. 13237c478bd9Sstevel@tonic-gate */ 1324*c3377ee9SJohn Levon error = cpu_unbind(cp->cpu_id, forced); 13250b70c467Sakolb if (error != 0) 13267c478bd9Sstevel@tonic-gate return (error); 13277c478bd9Sstevel@tonic-gate /* 13287c478bd9Sstevel@tonic-gate * We shouldn't be bound to this CPU ourselves. 13297c478bd9Sstevel@tonic-gate */ 13307c478bd9Sstevel@tonic-gate if (curthread->t_bound_cpu == cp) 13317c478bd9Sstevel@tonic-gate return (EBUSY); 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate /* 13347c478bd9Sstevel@tonic-gate * Tell interested parties that this CPU is going offline. 13357c478bd9Sstevel@tonic-gate */ 1336b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp); 13377c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_OFF); 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate /* 1340fb2f18f8Sesaxe * Tell the PG subsystem that the CPU is leaving the partition 1341fb2f18f8Sesaxe */ 1342fb2f18f8Sesaxe pg_cpupart_out(cp, pp); 1343fb2f18f8Sesaxe 1344fb2f18f8Sesaxe /* 13457c478bd9Sstevel@tonic-gate * Take the CPU out of interrupt participation so we won't find 13467c478bd9Sstevel@tonic-gate * bound kernel threads. If the architecture cannot completely 13477c478bd9Sstevel@tonic-gate * shut off interrupts on the CPU, don't quiesce it, but don't 13487c478bd9Sstevel@tonic-gate * run anything but interrupt thread... this is indicated by 13497c478bd9Sstevel@tonic-gate * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being 13507c478bd9Sstevel@tonic-gate * off. 13517c478bd9Sstevel@tonic-gate */ 13527c478bd9Sstevel@tonic-gate intr_enable = cp->cpu_flags & CPU_ENABLE; 13537c478bd9Sstevel@tonic-gate if (intr_enable) 13547c478bd9Sstevel@tonic-gate no_quiesce = cpu_intr_disable(cp); 13557c478bd9Sstevel@tonic-gate 13567c478bd9Sstevel@tonic-gate /* 13577c478bd9Sstevel@tonic-gate * Record that we are aiming to offline this cpu. This acts as 13587c478bd9Sstevel@tonic-gate * a barrier to further weak binding requests in thread_nomigrate 13597c478bd9Sstevel@tonic-gate * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to 13607c478bd9Sstevel@tonic-gate * lean away from this cpu. Further strong bindings are already 13617c478bd9Sstevel@tonic-gate * avoided since we hold cpu_lock. Since threads that are set 13627c478bd9Sstevel@tonic-gate * runnable around now and others coming off the target cpu are 13637c478bd9Sstevel@tonic-gate * directed away from the target, existing strong and weak bindings 13647c478bd9Sstevel@tonic-gate * (especially the latter) to the target cpu stand maximum chance of 13657c478bd9Sstevel@tonic-gate * being able to unbind during the short delay loop below (if other 13667c478bd9Sstevel@tonic-gate * unbound threads compete they may not see cpu in time to unbind 13677c478bd9Sstevel@tonic-gate * even if they would do so immediately. 13687c478bd9Sstevel@tonic-gate */ 13697c478bd9Sstevel@tonic-gate cpu_inmotion = cp; 13707c478bd9Sstevel@tonic-gate membar_enter(); 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate /* 13737c478bd9Sstevel@tonic-gate * Check for kernel threads (strong or weak) bound to that CPU. 13747c478bd9Sstevel@tonic-gate * Strongly bound threads may not unbind, and we'll have to return 13757c478bd9Sstevel@tonic-gate * EBUSY. Weakly bound threads should always disappear - we've 13767c478bd9Sstevel@tonic-gate * stopped more weak binding with cpu_inmotion and existing 13777c478bd9Sstevel@tonic-gate * bindings will drain imminently (they may not block). Nonetheless 13787c478bd9Sstevel@tonic-gate * we will wait for a fixed period for all bound threads to disappear. 13797c478bd9Sstevel@tonic-gate * Inactive interrupt threads are OK (they'll be in TS_FREE 13807c478bd9Sstevel@tonic-gate * state). If test finds some bound threads, wait a few ticks 13817c478bd9Sstevel@tonic-gate * to give short-lived threads (such as interrupts) chance to 13827c478bd9Sstevel@tonic-gate * complete. Note that if no_quiesce is set, i.e. this cpu 13837c478bd9Sstevel@tonic-gate * is required to service interrupts, then we take the route 13847c478bd9Sstevel@tonic-gate * that permits interrupt threads to be active (or bypassed). 13857c478bd9Sstevel@tonic-gate */ 13867c478bd9Sstevel@tonic-gate bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads; 13877c478bd9Sstevel@tonic-gate 13887c478bd9Sstevel@tonic-gate again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) { 13897c478bd9Sstevel@tonic-gate if (loop_count >= 5) { 13907c478bd9Sstevel@tonic-gate error = EBUSY; /* some threads still bound */ 13917c478bd9Sstevel@tonic-gate break; 13927c478bd9Sstevel@tonic-gate } 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate /* 13957c478bd9Sstevel@tonic-gate * If some threads were assigned, give them 13967c478bd9Sstevel@tonic-gate * a chance to complete or move. 13977c478bd9Sstevel@tonic-gate * 13987c478bd9Sstevel@tonic-gate * This assumes that the clock_thread is not bound 13997c478bd9Sstevel@tonic-gate * to any CPU, because the clock_thread is needed to 14007c478bd9Sstevel@tonic-gate * do the delay(hz/100). 14017c478bd9Sstevel@tonic-gate * 14027c478bd9Sstevel@tonic-gate * Note: we still hold the cpu_lock while waiting for 14037c478bd9Sstevel@tonic-gate * the next clock tick. This is OK since it isn't 14047c478bd9Sstevel@tonic-gate * needed for anything else except processor_bind(2), 14057c478bd9Sstevel@tonic-gate * and system initialization. If we drop the lock, 14067c478bd9Sstevel@tonic-gate * we would risk another p_online disabling the last 14077c478bd9Sstevel@tonic-gate * processor. 14087c478bd9Sstevel@tonic-gate */ 14097c478bd9Sstevel@tonic-gate delay(hz/100); 14107c478bd9Sstevel@tonic-gate } 14117c478bd9Sstevel@tonic-gate 1412454ab202SMadhavan Venkataraman if (error == 0 && callout_off == 0) { 1413454ab202SMadhavan Venkataraman callout_cpu_offline(cp); 1414454ab202SMadhavan Venkataraman callout_off = 1; 1415454ab202SMadhavan Venkataraman } 1416454ab202SMadhavan Venkataraman 14177c478bd9Sstevel@tonic-gate if (error == 0 && cyclic_off == 0) { 14187c478bd9Sstevel@tonic-gate if (!cyclic_offline(cp)) { 14197c478bd9Sstevel@tonic-gate /* 14207c478bd9Sstevel@tonic-gate * We must have bound cyclics... 14217c478bd9Sstevel@tonic-gate */ 14227c478bd9Sstevel@tonic-gate error = EBUSY; 14237c478bd9Sstevel@tonic-gate goto out; 14247c478bd9Sstevel@tonic-gate } 14257c478bd9Sstevel@tonic-gate cyclic_off = 1; 14267c478bd9Sstevel@tonic-gate } 14277c478bd9Sstevel@tonic-gate 14287c478bd9Sstevel@tonic-gate /* 14297c478bd9Sstevel@tonic-gate * Call mp_cpu_stop() to perform any special operations 14307c478bd9Sstevel@tonic-gate * needed for this machine architecture to offline a CPU. 14317c478bd9Sstevel@tonic-gate */ 14327c478bd9Sstevel@tonic-gate if (error == 0) 14337c478bd9Sstevel@tonic-gate error = mp_cpu_stop(cp); /* arch-dep hook */ 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate /* 14367c478bd9Sstevel@tonic-gate * If that all worked, take the CPU offline and decrement 14377c478bd9Sstevel@tonic-gate * ncpus_online. 14387c478bd9Sstevel@tonic-gate */ 14397c478bd9Sstevel@tonic-gate if (error == 0) { 14407c478bd9Sstevel@tonic-gate /* 14417c478bd9Sstevel@tonic-gate * Put all the cpus into a known safe place. 14427c478bd9Sstevel@tonic-gate * No mutexes can be entered while CPUs are paused. 14437c478bd9Sstevel@tonic-gate */ 14440ed5c46eSJosef 'Jeff' Sipek pause_cpus(cp, NULL); 14457c478bd9Sstevel@tonic-gate /* 14467c478bd9Sstevel@tonic-gate * Repeat the operation, if necessary, to make sure that 14477c478bd9Sstevel@tonic-gate * all outstanding low-level interrupts run to completion 14487c478bd9Sstevel@tonic-gate * before we set the CPU_QUIESCED flag. It's also possible 14497c478bd9Sstevel@tonic-gate * that a thread has weak bound to the cpu despite our raising 14507c478bd9Sstevel@tonic-gate * cpu_inmotion above since it may have loaded that 14517c478bd9Sstevel@tonic-gate * value before the barrier became visible (this would have 14527c478bd9Sstevel@tonic-gate * to be the thread that was on the target cpu at the time 14537c478bd9Sstevel@tonic-gate * we raised the barrier). 14547c478bd9Sstevel@tonic-gate */ 14557c478bd9Sstevel@tonic-gate if ((!no_quiesce && cp->cpu_intr_actv != 0) || 14567c478bd9Sstevel@tonic-gate (*bound_func)(cp, 1)) { 14577c478bd9Sstevel@tonic-gate start_cpus(); 14587c478bd9Sstevel@tonic-gate (void) mp_cpu_start(cp); 14597c478bd9Sstevel@tonic-gate goto again; 14607c478bd9Sstevel@tonic-gate } 14617c478bd9Sstevel@tonic-gate ncp = cp->cpu_next_part; 14627c478bd9Sstevel@tonic-gate cpu_lpl = cp->cpu_lpl; 14637c478bd9Sstevel@tonic-gate ASSERT(cpu_lpl != NULL); 14647c478bd9Sstevel@tonic-gate 14657c478bd9Sstevel@tonic-gate /* 14667c478bd9Sstevel@tonic-gate * Remove the CPU from the list of active CPUs. 14677c478bd9Sstevel@tonic-gate */ 14687c478bd9Sstevel@tonic-gate cpu_remove_active(cp); 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate /* 14717c478bd9Sstevel@tonic-gate * Walk the active process list and look for threads 14727c478bd9Sstevel@tonic-gate * whose home lgroup needs to be updated, or 14737c478bd9Sstevel@tonic-gate * the last CPU they run on is the one being offlined now. 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate 14767c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpu != cp); 14777c478bd9Sstevel@tonic-gate for (p = practive; p != NULL; p = p->p_next) { 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate t = p->p_tlist; 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate if (t == NULL) 14827c478bd9Sstevel@tonic-gate continue; 14837c478bd9Sstevel@tonic-gate 14847c478bd9Sstevel@tonic-gate lgrp_diff_lpl = 0; 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate do { 14877c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl != NULL); 14887c478bd9Sstevel@tonic-gate /* 14897c478bd9Sstevel@tonic-gate * Taking last CPU in lpl offline 14907c478bd9Sstevel@tonic-gate * Rehome thread if it is in this lpl 14917c478bd9Sstevel@tonic-gate * Otherwise, update the count of how many 14927c478bd9Sstevel@tonic-gate * threads are in this CPU's lgroup but have 14937c478bd9Sstevel@tonic-gate * a different lpl. 14947c478bd9Sstevel@tonic-gate */ 14957c478bd9Sstevel@tonic-gate 14967c478bd9Sstevel@tonic-gate if (cpu_lpl->lpl_ncpu == 0) { 14977c478bd9Sstevel@tonic-gate if (t->t_lpl == cpu_lpl) 14987c478bd9Sstevel@tonic-gate lgrp_move_thread(t, 14997c478bd9Sstevel@tonic-gate lgrp_choose(t, 15007c478bd9Sstevel@tonic-gate t->t_cpupart), 0); 15017c478bd9Sstevel@tonic-gate else if (t->t_lpl->lpl_lgrpid == 15027c478bd9Sstevel@tonic-gate cpu_lpl->lpl_lgrpid) 15037c478bd9Sstevel@tonic-gate lgrp_diff_lpl++; 15047c478bd9Sstevel@tonic-gate } 15057c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl->lpl_ncpu > 0); 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate /* 15087c478bd9Sstevel@tonic-gate * Update CPU last ran on if it was this CPU 15097c478bd9Sstevel@tonic-gate */ 15107c478bd9Sstevel@tonic-gate if (t->t_cpu == cp && t->t_bound_cpu != cp) 1511455e370cSJohn Levon t->t_cpu = disp_lowpri_cpu(ncp, t, 1512455e370cSJohn Levon t->t_pri); 15137c478bd9Sstevel@tonic-gate ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 15147c478bd9Sstevel@tonic-gate t->t_weakbound_cpu == cp); 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate t = t->t_forw; 15177c478bd9Sstevel@tonic-gate } while (t != p->p_tlist); 15187c478bd9Sstevel@tonic-gate 15197c478bd9Sstevel@tonic-gate /* 15207c478bd9Sstevel@tonic-gate * Didn't find any threads in the same lgroup as this 15217c478bd9Sstevel@tonic-gate * CPU with a different lpl, so remove the lgroup from 15227c478bd9Sstevel@tonic-gate * the process lgroup bitmask. 15237c478bd9Sstevel@tonic-gate */ 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate if (lgrp_diff_lpl == 0) 15267c478bd9Sstevel@tonic-gate klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid); 15277c478bd9Sstevel@tonic-gate } 15287c478bd9Sstevel@tonic-gate 15297c478bd9Sstevel@tonic-gate /* 15307c478bd9Sstevel@tonic-gate * Walk thread list looking for threads that need to be 15317c478bd9Sstevel@tonic-gate * rehomed, since there are some threads that are not in 15327c478bd9Sstevel@tonic-gate * their process's p_tlist. 15337c478bd9Sstevel@tonic-gate */ 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate t = curthread; 15367c478bd9Sstevel@tonic-gate do { 15377c478bd9Sstevel@tonic-gate ASSERT(t != NULL && t->t_lpl != NULL); 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate /* 15407c478bd9Sstevel@tonic-gate * Rehome threads with same lpl as this CPU when this 15417c478bd9Sstevel@tonic-gate * is the last CPU in the lpl. 15427c478bd9Sstevel@tonic-gate */ 15437c478bd9Sstevel@tonic-gate 15447c478bd9Sstevel@tonic-gate if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl)) 15457c478bd9Sstevel@tonic-gate lgrp_move_thread(t, 15467c478bd9Sstevel@tonic-gate lgrp_choose(t, t->t_cpupart), 1); 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl->lpl_ncpu > 0); 15497c478bd9Sstevel@tonic-gate 15507c478bd9Sstevel@tonic-gate /* 15517c478bd9Sstevel@tonic-gate * Update CPU last ran on if it was this CPU 15527c478bd9Sstevel@tonic-gate */ 15537c478bd9Sstevel@tonic-gate 1554455e370cSJohn Levon if (t->t_cpu == cp && t->t_bound_cpu != cp) 1555455e370cSJohn Levon t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri); 1556455e370cSJohn Levon 15577c478bd9Sstevel@tonic-gate ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 15587c478bd9Sstevel@tonic-gate t->t_weakbound_cpu == cp); 15597c478bd9Sstevel@tonic-gate t = t->t_next; 15607c478bd9Sstevel@tonic-gate 15617c478bd9Sstevel@tonic-gate } while (t != curthread); 15627c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0); 15637c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_OFFLINE; 15647c478bd9Sstevel@tonic-gate disp_cpu_inactive(cp); 15657c478bd9Sstevel@tonic-gate if (!no_quiesce) 15667c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_QUIESCED; 15677c478bd9Sstevel@tonic-gate ncpus_online--; 15687c478bd9Sstevel@tonic-gate cpu_set_state(cp); 15697c478bd9Sstevel@tonic-gate cpu_inmotion = NULL; 15707c478bd9Sstevel@tonic-gate start_cpus(); 15717c478bd9Sstevel@tonic-gate cpu_stats_kstat_destroy(cp); 15727c478bd9Sstevel@tonic-gate cpu_delete_intrstat(cp); 15737c478bd9Sstevel@tonic-gate lgrp_kstat_destroy(cp); 15747c478bd9Sstevel@tonic-gate } 15757c478bd9Sstevel@tonic-gate 15767c478bd9Sstevel@tonic-gate out: 15777c478bd9Sstevel@tonic-gate cpu_inmotion = NULL; 15787c478bd9Sstevel@tonic-gate 15797c478bd9Sstevel@tonic-gate /* 15807c478bd9Sstevel@tonic-gate * If we failed, re-enable interrupts. 15817c478bd9Sstevel@tonic-gate * Do this even if cpu_intr_disable returned an error, because 15827c478bd9Sstevel@tonic-gate * it may have partially disabled interrupts. 15837c478bd9Sstevel@tonic-gate */ 15847c478bd9Sstevel@tonic-gate if (error && intr_enable) 15857c478bd9Sstevel@tonic-gate cpu_intr_enable(cp); 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate /* 15887c478bd9Sstevel@tonic-gate * If we failed, but managed to offline the cyclic subsystem on this 15897c478bd9Sstevel@tonic-gate * CPU, bring it back online. 15907c478bd9Sstevel@tonic-gate */ 15917c478bd9Sstevel@tonic-gate if (error && cyclic_off) 15927c478bd9Sstevel@tonic-gate cyclic_online(cp); 15937c478bd9Sstevel@tonic-gate 15947c478bd9Sstevel@tonic-gate /* 1595454ab202SMadhavan Venkataraman * If we failed, but managed to offline callouts on this CPU, 1596454ab202SMadhavan Venkataraman * bring it back online. 1597454ab202SMadhavan Venkataraman */ 1598454ab202SMadhavan Venkataraman if (error && callout_off) 1599454ab202SMadhavan Venkataraman callout_cpu_online(cp); 1600454ab202SMadhavan Venkataraman 1601454ab202SMadhavan Venkataraman /* 1602fb2f18f8Sesaxe * If we failed, tell the PG subsystem that the CPU is back 1603fb2f18f8Sesaxe */ 1604fb2f18f8Sesaxe pg_cpupart_in(cp, pp); 1605fb2f18f8Sesaxe 1606fb2f18f8Sesaxe /* 16077c478bd9Sstevel@tonic-gate * If we failed, we need to notify everyone that this CPU is back on. 16087c478bd9Sstevel@tonic-gate */ 1609b885580bSAlexander Kolbasov if (error != 0) { 1610b885580bSAlexander Kolbasov CPU_NEW_GENERATION(cp); 16117c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_ON); 1612b885580bSAlexander Kolbasov cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1613b885580bSAlexander Kolbasov } 16147c478bd9Sstevel@tonic-gate 16157c478bd9Sstevel@tonic-gate return (error); 16167c478bd9Sstevel@tonic-gate } 16177c478bd9Sstevel@tonic-gate 16187c478bd9Sstevel@tonic-gate /* 16197c478bd9Sstevel@tonic-gate * Mark the indicated CPU as faulted, taking it offline. 16207c478bd9Sstevel@tonic-gate */ 16217c478bd9Sstevel@tonic-gate int 16227c478bd9Sstevel@tonic-gate cpu_faulted(cpu_t *cp, int flags) 16237c478bd9Sstevel@tonic-gate { 16247c478bd9Sstevel@tonic-gate int error = 0; 16257c478bd9Sstevel@tonic-gate 16267c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 16277c478bd9Sstevel@tonic-gate ASSERT(!cpu_is_poweredoff(cp)); 16287c478bd9Sstevel@tonic-gate 1629*c3377ee9SJohn Levon if (cp->cpu_flags & CPU_DISABLED) 1630*c3377ee9SJohn Levon return (EINVAL); 1631*c3377ee9SJohn Levon 16327c478bd9Sstevel@tonic-gate if (cpu_is_offline(cp)) { 16337c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_SPARE; 16347c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_FAULTED; 16357c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(cp); 16367c478bd9Sstevel@tonic-gate cpu_set_state(cp); 16377c478bd9Sstevel@tonic-gate return (0); 16387c478bd9Sstevel@tonic-gate } 16397c478bd9Sstevel@tonic-gate 16407c478bd9Sstevel@tonic-gate if ((error = cpu_offline(cp, flags)) == 0) { 16417c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_FAULTED; 16427c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(cp); 16437c478bd9Sstevel@tonic-gate cpu_set_state(cp); 16447c478bd9Sstevel@tonic-gate } 16457c478bd9Sstevel@tonic-gate 16467c478bd9Sstevel@tonic-gate return (error); 16477c478bd9Sstevel@tonic-gate } 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate /* 16507c478bd9Sstevel@tonic-gate * Mark the indicated CPU as a spare, taking it offline. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate int 16537c478bd9Sstevel@tonic-gate cpu_spare(cpu_t *cp, int flags) 16547c478bd9Sstevel@tonic-gate { 16557c478bd9Sstevel@tonic-gate int error = 0; 16567c478bd9Sstevel@tonic-gate 16577c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 16587c478bd9Sstevel@tonic-gate ASSERT(!cpu_is_poweredoff(cp)); 16597c478bd9Sstevel@tonic-gate 1660*c3377ee9SJohn Levon if (cp->cpu_flags & CPU_DISABLED) 1661*c3377ee9SJohn Levon return (EINVAL); 1662*c3377ee9SJohn Levon 16637c478bd9Sstevel@tonic-gate if (cpu_is_offline(cp)) { 16647c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_FAULTED) { 16657c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_FAULTED; 16667c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(cp); 16677c478bd9Sstevel@tonic-gate } 16687c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_SPARE; 16697c478bd9Sstevel@tonic-gate cpu_set_state(cp); 16707c478bd9Sstevel@tonic-gate return (0); 16717c478bd9Sstevel@tonic-gate } 16727c478bd9Sstevel@tonic-gate 16737c478bd9Sstevel@tonic-gate if ((error = cpu_offline(cp, flags)) == 0) { 16747c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_SPARE; 16757c478bd9Sstevel@tonic-gate cpu_set_state(cp); 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate 16787c478bd9Sstevel@tonic-gate return (error); 16797c478bd9Sstevel@tonic-gate } 16807c478bd9Sstevel@tonic-gate 16817c478bd9Sstevel@tonic-gate /* 16827c478bd9Sstevel@tonic-gate * Take the indicated CPU from poweroff to offline. 16837c478bd9Sstevel@tonic-gate */ 16847c478bd9Sstevel@tonic-gate int 16857c478bd9Sstevel@tonic-gate cpu_poweron(cpu_t *cp) 16867c478bd9Sstevel@tonic-gate { 16877c478bd9Sstevel@tonic-gate int error = ENOTSUP; 16887c478bd9Sstevel@tonic-gate 16897c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 16907c478bd9Sstevel@tonic-gate ASSERT(cpu_is_poweredoff(cp)); 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate error = mp_cpu_poweron(cp); /* arch-dep hook */ 16937c478bd9Sstevel@tonic-gate if (error == 0) 16947c478bd9Sstevel@tonic-gate cpu_set_state(cp); 16957c478bd9Sstevel@tonic-gate 16967c478bd9Sstevel@tonic-gate return (error); 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate 16997c478bd9Sstevel@tonic-gate /* 17007c478bd9Sstevel@tonic-gate * Take the indicated CPU from any inactive state to powered off. 17017c478bd9Sstevel@tonic-gate */ 17027c478bd9Sstevel@tonic-gate int 17037c478bd9Sstevel@tonic-gate cpu_poweroff(cpu_t *cp) 17047c478bd9Sstevel@tonic-gate { 17057c478bd9Sstevel@tonic-gate int error = ENOTSUP; 17067c478bd9Sstevel@tonic-gate 17077c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 17087c478bd9Sstevel@tonic-gate ASSERT(cpu_is_offline(cp)); 17097c478bd9Sstevel@tonic-gate 17107c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_QUIESCED)) 17117c478bd9Sstevel@tonic-gate return (EBUSY); /* not completely idle */ 17127c478bd9Sstevel@tonic-gate 17137c478bd9Sstevel@tonic-gate error = mp_cpu_poweroff(cp); /* arch-dep hook */ 17147c478bd9Sstevel@tonic-gate if (error == 0) 17157c478bd9Sstevel@tonic-gate cpu_set_state(cp); 17167c478bd9Sstevel@tonic-gate 17177c478bd9Sstevel@tonic-gate return (error); 17187c478bd9Sstevel@tonic-gate } 17197c478bd9Sstevel@tonic-gate 17207c478bd9Sstevel@tonic-gate /* 17216890d023SEric Saxe * Initialize the Sequential CPU id lookup table 17226890d023SEric Saxe */ 17236890d023SEric Saxe void 17246890d023SEric Saxe cpu_seq_tbl_init() 17256890d023SEric Saxe { 17266890d023SEric Saxe cpu_t **tbl; 17276890d023SEric Saxe 17286890d023SEric Saxe tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP); 17296890d023SEric Saxe tbl[0] = CPU; 17306890d023SEric Saxe 17316890d023SEric Saxe cpu_seq = tbl; 17326890d023SEric Saxe } 17336890d023SEric Saxe 17346890d023SEric Saxe /* 17357c478bd9Sstevel@tonic-gate * Initialize the CPU lists for the first CPU. 17367c478bd9Sstevel@tonic-gate */ 17377c478bd9Sstevel@tonic-gate void 17387c478bd9Sstevel@tonic-gate cpu_list_init(cpu_t *cp) 17397c478bd9Sstevel@tonic-gate { 17407c478bd9Sstevel@tonic-gate cp->cpu_next = cp; 17417c478bd9Sstevel@tonic-gate cp->cpu_prev = cp; 17427c478bd9Sstevel@tonic-gate cpu_list = cp; 1743c97ad5cdSakolb clock_cpu_list = cp; 17447c478bd9Sstevel@tonic-gate 17457c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp; 17467c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp; 17477c478bd9Sstevel@tonic-gate cpu_active = cp; 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate cp->cpu_seqid = 0; 17507c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_seqid_inuse, 0); 17516890d023SEric Saxe 17526890d023SEric Saxe /* 17536890d023SEric Saxe * Bootstrap cpu_seq using cpu_list 17546890d023SEric Saxe * The cpu_seq[] table will be dynamically allocated 17556890d023SEric Saxe * when kmem later becomes available (but before going MP) 17566890d023SEric Saxe */ 17576890d023SEric Saxe cpu_seq = &cpu_list; 17586890d023SEric Saxe 17592af6eb52SMichael Corcoran cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 17607c478bd9Sstevel@tonic-gate cp_default.cp_cpulist = cp; 17617c478bd9Sstevel@tonic-gate cp_default.cp_ncpus = 1; 17627c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp; 17637c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp; 17647c478bd9Sstevel@tonic-gate cp->cpu_part = &cp_default; 17657c478bd9Sstevel@tonic-gate 17667c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_available, cp->cpu_id); 176789574a1fSPatrick Mooney CPUSET_ADD(cpu_active_set, cp->cpu_id); 17687c478bd9Sstevel@tonic-gate } 17697c478bd9Sstevel@tonic-gate 17707c478bd9Sstevel@tonic-gate /* 17717c478bd9Sstevel@tonic-gate * Insert a CPU into the list of available CPUs. 17727c478bd9Sstevel@tonic-gate */ 17737c478bd9Sstevel@tonic-gate void 17747c478bd9Sstevel@tonic-gate cpu_add_unit(cpu_t *cp) 17757c478bd9Sstevel@tonic-gate { 17767c478bd9Sstevel@tonic-gate int seqid; 17777c478bd9Sstevel@tonic-gate 17787c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 17797c478bd9Sstevel@tonic-gate ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 17807c478bd9Sstevel@tonic-gate 17817c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0); 17827c478bd9Sstevel@tonic-gate 17837c478bd9Sstevel@tonic-gate /* 17847c478bd9Sstevel@tonic-gate * Note: most users of the cpu_list will grab the 17857c478bd9Sstevel@tonic-gate * cpu_lock to insure that it isn't modified. However, 17867c478bd9Sstevel@tonic-gate * certain users can't or won't do that. To allow this 17877c478bd9Sstevel@tonic-gate * we pause the other cpus. Users who walk the list 17887c478bd9Sstevel@tonic-gate * without cpu_lock, must disable kernel preemption 17897c478bd9Sstevel@tonic-gate * to insure that the list isn't modified underneath 17907c478bd9Sstevel@tonic-gate * them. Also, any cached pointers to cpu structures 17917c478bd9Sstevel@tonic-gate * must be revalidated by checking to see if the 17927c478bd9Sstevel@tonic-gate * cpu_next pointer points to itself. This check must 17937c478bd9Sstevel@tonic-gate * be done with the cpu_lock held or kernel preemption 17947c478bd9Sstevel@tonic-gate * disabled. This check relies upon the fact that 17957c478bd9Sstevel@tonic-gate * old cpu structures are not free'ed or cleared after 17967c478bd9Sstevel@tonic-gate * then are removed from the cpu_list. 17977c478bd9Sstevel@tonic-gate * 17987c478bd9Sstevel@tonic-gate * Note that the clock code walks the cpu list dereferencing 17997c478bd9Sstevel@tonic-gate * the cpu_part pointer, so we need to initialize it before 18007c478bd9Sstevel@tonic-gate * adding the cpu to the list. 18017c478bd9Sstevel@tonic-gate */ 18027c478bd9Sstevel@tonic-gate cp->cpu_part = &cp_default; 18030ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 18047c478bd9Sstevel@tonic-gate cp->cpu_next = cpu_list; 18057c478bd9Sstevel@tonic-gate cp->cpu_prev = cpu_list->cpu_prev; 18067c478bd9Sstevel@tonic-gate cpu_list->cpu_prev->cpu_next = cp; 18077c478bd9Sstevel@tonic-gate cpu_list->cpu_prev = cp; 18087c478bd9Sstevel@tonic-gate start_cpus(); 18097c478bd9Sstevel@tonic-gate 18107c478bd9Sstevel@tonic-gate for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++) 18117c478bd9Sstevel@tonic-gate continue; 18127c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_seqid_inuse, seqid); 18137c478bd9Sstevel@tonic-gate cp->cpu_seqid = seqid; 1814b52a336eSPavel Tatashin 1815b52a336eSPavel Tatashin if (seqid > max_cpu_seqid_ever) 1816b52a336eSPavel Tatashin max_cpu_seqid_ever = seqid; 1817b52a336eSPavel Tatashin 18187c478bd9Sstevel@tonic-gate ASSERT(ncpus < max_ncpus); 18197c478bd9Sstevel@tonic-gate ncpus++; 18202af6eb52SMichael Corcoran cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 18217c478bd9Sstevel@tonic-gate cpu[cp->cpu_id] = cp; 18227c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_available, cp->cpu_id); 18236890d023SEric Saxe cpu_seq[cp->cpu_seqid] = cp; 18247c478bd9Sstevel@tonic-gate 18257c478bd9Sstevel@tonic-gate /* 18267c478bd9Sstevel@tonic-gate * allocate a pause thread for this CPU. 18277c478bd9Sstevel@tonic-gate */ 18287c478bd9Sstevel@tonic-gate cpu_pause_alloc(cp); 18297c478bd9Sstevel@tonic-gate 18307c478bd9Sstevel@tonic-gate /* 18317c478bd9Sstevel@tonic-gate * So that new CPUs won't have NULL prev_onln and next_onln pointers, 18327c478bd9Sstevel@tonic-gate * link them into a list of just that CPU. 18337c478bd9Sstevel@tonic-gate * This is so that disp_lowpri_cpu will work for thread_create in 18347c478bd9Sstevel@tonic-gate * pause_cpus() when called from the startup thread in a new CPU. 18357c478bd9Sstevel@tonic-gate */ 18367c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp; 18377c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp; 18387c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cp); 18397c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp; 18407c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp; 18417c478bd9Sstevel@tonic-gate 18427c478bd9Sstevel@tonic-gate init_cpu_mstate(cp, CMS_SYSTEM); 18437c478bd9Sstevel@tonic-gate 18447c478bd9Sstevel@tonic-gate pool_pset_mod = gethrtime(); 18457c478bd9Sstevel@tonic-gate } 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate /* 18487c478bd9Sstevel@tonic-gate * Do the opposite of cpu_add_unit(). 18497c478bd9Sstevel@tonic-gate */ 18507c478bd9Sstevel@tonic-gate void 18517c478bd9Sstevel@tonic-gate cpu_del_unit(int cpuid) 18527c478bd9Sstevel@tonic-gate { 18537c478bd9Sstevel@tonic-gate struct cpu *cp, *cpnext; 18547c478bd9Sstevel@tonic-gate 18557c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 18567c478bd9Sstevel@tonic-gate cp = cpu[cpuid]; 18577c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); 18587c478bd9Sstevel@tonic-gate 18597c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_onln == cp); 18607c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_onln == cp); 18617c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_part == cp); 18627c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_part == cp); 18637c478bd9Sstevel@tonic-gate 1864fb2f18f8Sesaxe /* 1865fb2f18f8Sesaxe * Tear down the CPU's physical ID cache, and update any 1866fb2f18f8Sesaxe * processor groups 1867fb2f18f8Sesaxe */ 1868023e71deSHaik Aftandilian pg_cpu_fini(cp, NULL); 1869fb2f18f8Sesaxe pghw_physid_destroy(cp); 18707c478bd9Sstevel@tonic-gate 18717c478bd9Sstevel@tonic-gate /* 18727c478bd9Sstevel@tonic-gate * Destroy kstat stuff. 18737c478bd9Sstevel@tonic-gate */ 18747c478bd9Sstevel@tonic-gate cpu_info_kstat_destroy(cp); 18757c478bd9Sstevel@tonic-gate term_cpu_mstate(cp); 18767c478bd9Sstevel@tonic-gate /* 18777c478bd9Sstevel@tonic-gate * Free up pause thread. 18787c478bd9Sstevel@tonic-gate */ 18797c478bd9Sstevel@tonic-gate cpu_pause_free(cp); 18807c478bd9Sstevel@tonic-gate CPUSET_DEL(cpu_available, cp->cpu_id); 18817c478bd9Sstevel@tonic-gate cpu[cp->cpu_id] = NULL; 18826890d023SEric Saxe cpu_seq[cp->cpu_seqid] = NULL; 18836890d023SEric Saxe 18847c478bd9Sstevel@tonic-gate /* 18857c478bd9Sstevel@tonic-gate * The clock thread and mutex_vector_enter cannot hold the 18867c478bd9Sstevel@tonic-gate * cpu_lock while traversing the cpu list, therefore we pause 18877c478bd9Sstevel@tonic-gate * all other threads by pausing the other cpus. These, and any 18887c478bd9Sstevel@tonic-gate * other routines holding cpu pointers while possibly sleeping 18897c478bd9Sstevel@tonic-gate * must be sure to call kpreempt_disable before processing the 18907c478bd9Sstevel@tonic-gate * list and be sure to check that the cpu has not been deleted 18917c478bd9Sstevel@tonic-gate * after any sleeps (check cp->cpu_next != NULL). We guarantee 18927c478bd9Sstevel@tonic-gate * to keep the deleted cpu structure around. 18937c478bd9Sstevel@tonic-gate * 18947c478bd9Sstevel@tonic-gate * Note that this MUST be done AFTER cpu_available 18957c478bd9Sstevel@tonic-gate * has been updated so that we don't waste time 18967c478bd9Sstevel@tonic-gate * trying to pause the cpu we're trying to delete. 18977c478bd9Sstevel@tonic-gate */ 18980ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 18997c478bd9Sstevel@tonic-gate 19007c478bd9Sstevel@tonic-gate cpnext = cp->cpu_next; 19017c478bd9Sstevel@tonic-gate cp->cpu_prev->cpu_next = cp->cpu_next; 19027c478bd9Sstevel@tonic-gate cp->cpu_next->cpu_prev = cp->cpu_prev; 19037c478bd9Sstevel@tonic-gate if (cp == cpu_list) 19047c478bd9Sstevel@tonic-gate cpu_list = cpnext; 19057c478bd9Sstevel@tonic-gate 19067c478bd9Sstevel@tonic-gate /* 19077c478bd9Sstevel@tonic-gate * Signals that the cpu has been deleted (see above). 19087c478bd9Sstevel@tonic-gate */ 19097c478bd9Sstevel@tonic-gate cp->cpu_next = NULL; 19107c478bd9Sstevel@tonic-gate cp->cpu_prev = NULL; 19117c478bd9Sstevel@tonic-gate 19127c478bd9Sstevel@tonic-gate start_cpus(); 19137c478bd9Sstevel@tonic-gate 19147c478bd9Sstevel@tonic-gate CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid); 19157c478bd9Sstevel@tonic-gate ncpus--; 19167c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0); 19177c478bd9Sstevel@tonic-gate 19187c478bd9Sstevel@tonic-gate pool_pset_mod = gethrtime(); 19197c478bd9Sstevel@tonic-gate } 19207c478bd9Sstevel@tonic-gate 19217c478bd9Sstevel@tonic-gate /* 19227c478bd9Sstevel@tonic-gate * Add a CPU to the list of active CPUs. 19237c478bd9Sstevel@tonic-gate * This routine must not get any locks, because other CPUs are paused. 19247c478bd9Sstevel@tonic-gate */ 19257c478bd9Sstevel@tonic-gate static void 19267c478bd9Sstevel@tonic-gate cpu_add_active_internal(cpu_t *cp) 19277c478bd9Sstevel@tonic-gate { 19287c478bd9Sstevel@tonic-gate cpupart_t *pp = cp->cpu_part; 19297c478bd9Sstevel@tonic-gate 19307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 19317c478bd9Sstevel@tonic-gate ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 19327c478bd9Sstevel@tonic-gate 19337c478bd9Sstevel@tonic-gate ncpus_online++; 19347c478bd9Sstevel@tonic-gate cpu_set_state(cp); 19357c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cpu_active; 19367c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cpu_active->cpu_prev_onln; 19377c478bd9Sstevel@tonic-gate cpu_active->cpu_prev_onln->cpu_next_onln = cp; 19387c478bd9Sstevel@tonic-gate cpu_active->cpu_prev_onln = cp; 193989574a1fSPatrick Mooney CPUSET_ADD(cpu_active_set, cp->cpu_id); 19407c478bd9Sstevel@tonic-gate 19417c478bd9Sstevel@tonic-gate if (pp->cp_cpulist) { 19427c478bd9Sstevel@tonic-gate cp->cpu_next_part = pp->cp_cpulist; 19437c478bd9Sstevel@tonic-gate cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part; 19447c478bd9Sstevel@tonic-gate pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp; 19457c478bd9Sstevel@tonic-gate pp->cp_cpulist->cpu_prev_part = cp; 19467c478bd9Sstevel@tonic-gate } else { 19477c478bd9Sstevel@tonic-gate ASSERT(pp->cp_ncpus == 0); 19487c478bd9Sstevel@tonic-gate pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp; 19497c478bd9Sstevel@tonic-gate } 19507c478bd9Sstevel@tonic-gate pp->cp_ncpus++; 19517c478bd9Sstevel@tonic-gate if (pp->cp_ncpus == 1) { 19527c478bd9Sstevel@tonic-gate cp_numparts_nonempty++; 19537c478bd9Sstevel@tonic-gate ASSERT(cp_numparts_nonempty != 0); 19547c478bd9Sstevel@tonic-gate } 19557c478bd9Sstevel@tonic-gate 1956fb2f18f8Sesaxe pg_cpu_active(cp); 19577c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0); 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg)); 19607c478bd9Sstevel@tonic-gate } 19617c478bd9Sstevel@tonic-gate 19627c478bd9Sstevel@tonic-gate /* 19637c478bd9Sstevel@tonic-gate * Add a CPU to the list of active CPUs. 19647c478bd9Sstevel@tonic-gate * This is called from machine-dependent layers when a new CPU is started. 19657c478bd9Sstevel@tonic-gate */ 19667c478bd9Sstevel@tonic-gate void 19677c478bd9Sstevel@tonic-gate cpu_add_active(cpu_t *cp) 19687c478bd9Sstevel@tonic-gate { 1969fb2f18f8Sesaxe pg_cpupart_in(cp, cp->cpu_part); 1970fb2f18f8Sesaxe 19710ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 19727c478bd9Sstevel@tonic-gate cpu_add_active_internal(cp); 19737c478bd9Sstevel@tonic-gate start_cpus(); 1974fb2f18f8Sesaxe 19757c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp); 19767c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp); 19777c478bd9Sstevel@tonic-gate lgrp_kstat_create(cp); 19787c478bd9Sstevel@tonic-gate cpu_state_change_notify(cp->cpu_id, CPU_INIT); 19797c478bd9Sstevel@tonic-gate } 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate 19827c478bd9Sstevel@tonic-gate /* 19837c478bd9Sstevel@tonic-gate * Remove a CPU from the list of active CPUs. 19847c478bd9Sstevel@tonic-gate * This routine must not get any locks, because other CPUs are paused. 19857c478bd9Sstevel@tonic-gate */ 19867c478bd9Sstevel@tonic-gate /* ARGSUSED */ 19877c478bd9Sstevel@tonic-gate static void 19887c478bd9Sstevel@tonic-gate cpu_remove_active(cpu_t *cp) 19897c478bd9Sstevel@tonic-gate { 19907c478bd9Sstevel@tonic-gate cpupart_t *pp = cp->cpu_part; 19917c478bd9Sstevel@tonic-gate 19927c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 19937c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_next_onln != cp); /* not the last one */ 19947c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_prev_onln != cp); /* not the last one */ 19957c478bd9Sstevel@tonic-gate 1996fb2f18f8Sesaxe pg_cpu_inactive(cp); 19977c478bd9Sstevel@tonic-gate 19987c478bd9Sstevel@tonic-gate lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0); 19997c478bd9Sstevel@tonic-gate 20002850d85bSmv143129 if (cp == clock_cpu_list) 20012850d85bSmv143129 clock_cpu_list = cp->cpu_next_onln; 20022850d85bSmv143129 20037c478bd9Sstevel@tonic-gate cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln; 20047c478bd9Sstevel@tonic-gate cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln; 20057c478bd9Sstevel@tonic-gate if (cpu_active == cp) { 20067c478bd9Sstevel@tonic-gate cpu_active = cp->cpu_next_onln; 20077c478bd9Sstevel@tonic-gate } 20087c478bd9Sstevel@tonic-gate cp->cpu_next_onln = cp; 20097c478bd9Sstevel@tonic-gate cp->cpu_prev_onln = cp; 201089574a1fSPatrick Mooney CPUSET_DEL(cpu_active_set, cp->cpu_id); 20117c478bd9Sstevel@tonic-gate 20127c478bd9Sstevel@tonic-gate cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; 20137c478bd9Sstevel@tonic-gate cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; 20147c478bd9Sstevel@tonic-gate if (pp->cp_cpulist == cp) { 20157c478bd9Sstevel@tonic-gate pp->cp_cpulist = cp->cpu_next_part; 20167c478bd9Sstevel@tonic-gate ASSERT(pp->cp_cpulist != cp); 20177c478bd9Sstevel@tonic-gate } 20187c478bd9Sstevel@tonic-gate cp->cpu_next_part = cp; 20197c478bd9Sstevel@tonic-gate cp->cpu_prev_part = cp; 20207c478bd9Sstevel@tonic-gate pp->cp_ncpus--; 20217c478bd9Sstevel@tonic-gate if (pp->cp_ncpus == 0) { 20227c478bd9Sstevel@tonic-gate cp_numparts_nonempty--; 20237c478bd9Sstevel@tonic-gate ASSERT(cp_numparts_nonempty != 0); 20247c478bd9Sstevel@tonic-gate } 20257c478bd9Sstevel@tonic-gate } 20267c478bd9Sstevel@tonic-gate 20277c478bd9Sstevel@tonic-gate /* 20287c478bd9Sstevel@tonic-gate * Routine used to setup a newly inserted CPU in preparation for starting 20297c478bd9Sstevel@tonic-gate * it running code. 20307c478bd9Sstevel@tonic-gate */ 20317c478bd9Sstevel@tonic-gate int 20327c478bd9Sstevel@tonic-gate cpu_configure(int cpuid) 20337c478bd9Sstevel@tonic-gate { 20347c478bd9Sstevel@tonic-gate int retval = 0; 20357c478bd9Sstevel@tonic-gate 20367c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate /* 20397c478bd9Sstevel@tonic-gate * Some structures are statically allocated based upon 20407c478bd9Sstevel@tonic-gate * the maximum number of cpus the system supports. Do not 20417c478bd9Sstevel@tonic-gate * try to add anything beyond this limit. 20427c478bd9Sstevel@tonic-gate */ 20437c478bd9Sstevel@tonic-gate if (cpuid < 0 || cpuid >= NCPU) { 20447c478bd9Sstevel@tonic-gate return (EINVAL); 20457c478bd9Sstevel@tonic-gate } 20467c478bd9Sstevel@tonic-gate 20477c478bd9Sstevel@tonic-gate if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) { 20487c478bd9Sstevel@tonic-gate return (EALREADY); 20497c478bd9Sstevel@tonic-gate } 20507c478bd9Sstevel@tonic-gate 20517c478bd9Sstevel@tonic-gate if ((retval = mp_cpu_configure(cpuid)) != 0) { 20527c478bd9Sstevel@tonic-gate return (retval); 20537c478bd9Sstevel@tonic-gate } 20547c478bd9Sstevel@tonic-gate 20557c478bd9Sstevel@tonic-gate cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF; 20567c478bd9Sstevel@tonic-gate cpu_set_state(cpu[cpuid]); 20577c478bd9Sstevel@tonic-gate retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG); 20587c478bd9Sstevel@tonic-gate if (retval != 0) 20597c478bd9Sstevel@tonic-gate (void) mp_cpu_unconfigure(cpuid); 20607c478bd9Sstevel@tonic-gate 20617c478bd9Sstevel@tonic-gate return (retval); 20627c478bd9Sstevel@tonic-gate } 20637c478bd9Sstevel@tonic-gate 20647c478bd9Sstevel@tonic-gate /* 20657c478bd9Sstevel@tonic-gate * Routine used to cleanup a CPU that has been powered off. This will 20667c478bd9Sstevel@tonic-gate * destroy all per-cpu information related to this cpu. 20677c478bd9Sstevel@tonic-gate */ 20687c478bd9Sstevel@tonic-gate int 20697c478bd9Sstevel@tonic-gate cpu_unconfigure(int cpuid) 20707c478bd9Sstevel@tonic-gate { 20717c478bd9Sstevel@tonic-gate int error; 20727c478bd9Sstevel@tonic-gate 20737c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 20747c478bd9Sstevel@tonic-gate 20757c478bd9Sstevel@tonic-gate if (cpu[cpuid] == NULL) { 20767c478bd9Sstevel@tonic-gate return (ENODEV); 20777c478bd9Sstevel@tonic-gate } 20787c478bd9Sstevel@tonic-gate 20797c478bd9Sstevel@tonic-gate if (cpu[cpuid]->cpu_flags == 0) { 20807c478bd9Sstevel@tonic-gate return (EALREADY); 20817c478bd9Sstevel@tonic-gate } 20827c478bd9Sstevel@tonic-gate 20837c478bd9Sstevel@tonic-gate if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) { 20847c478bd9Sstevel@tonic-gate return (EBUSY); 20857c478bd9Sstevel@tonic-gate } 20867c478bd9Sstevel@tonic-gate 20877c478bd9Sstevel@tonic-gate if (cpu[cpuid]->cpu_props != NULL) { 20887c478bd9Sstevel@tonic-gate (void) nvlist_free(cpu[cpuid]->cpu_props); 20897c478bd9Sstevel@tonic-gate cpu[cpuid]->cpu_props = NULL; 20907c478bd9Sstevel@tonic-gate } 20917c478bd9Sstevel@tonic-gate 20927c478bd9Sstevel@tonic-gate error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG); 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate if (error != 0) 20957c478bd9Sstevel@tonic-gate return (error); 20967c478bd9Sstevel@tonic-gate 20977c478bd9Sstevel@tonic-gate return (mp_cpu_unconfigure(cpuid)); 20987c478bd9Sstevel@tonic-gate } 20997c478bd9Sstevel@tonic-gate 21007c478bd9Sstevel@tonic-gate /* 21017c478bd9Sstevel@tonic-gate * Routines for registering and de-registering cpu_setup callback functions. 21027c478bd9Sstevel@tonic-gate * 21037c478bd9Sstevel@tonic-gate * Caller's context 21047c478bd9Sstevel@tonic-gate * These routines must not be called from a driver's attach(9E) or 21057c478bd9Sstevel@tonic-gate * detach(9E) entry point. 21067c478bd9Sstevel@tonic-gate * 21077c478bd9Sstevel@tonic-gate * NOTE: CPU callbacks should not block. They are called with cpu_lock held. 21087c478bd9Sstevel@tonic-gate */ 21097c478bd9Sstevel@tonic-gate 21107c478bd9Sstevel@tonic-gate /* 21117c478bd9Sstevel@tonic-gate * Ideally, these would be dynamically allocated and put into a linked 21127c478bd9Sstevel@tonic-gate * list; however that is not feasible because the registration routine 21137c478bd9Sstevel@tonic-gate * has to be available before the kmem allocator is working (in fact, 21147c478bd9Sstevel@tonic-gate * it is called by the kmem allocator init code). In any case, there 21157c478bd9Sstevel@tonic-gate * are quite a few extra entries for future users. 21167c478bd9Sstevel@tonic-gate */ 21171aa15ad6Sjkennedy #define NCPU_SETUPS 20 21187c478bd9Sstevel@tonic-gate 21197c478bd9Sstevel@tonic-gate struct cpu_setup { 21207c478bd9Sstevel@tonic-gate cpu_setup_func_t *func; 21217c478bd9Sstevel@tonic-gate void *arg; 21227c478bd9Sstevel@tonic-gate } cpu_setups[NCPU_SETUPS]; 21237c478bd9Sstevel@tonic-gate 21247c478bd9Sstevel@tonic-gate void 21257c478bd9Sstevel@tonic-gate register_cpu_setup_func(cpu_setup_func_t *func, void *arg) 21267c478bd9Sstevel@tonic-gate { 21277c478bd9Sstevel@tonic-gate int i; 21287c478bd9Sstevel@tonic-gate 21297c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 21307c478bd9Sstevel@tonic-gate 21317c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) 21327c478bd9Sstevel@tonic-gate if (cpu_setups[i].func == NULL) 21337c478bd9Sstevel@tonic-gate break; 21347c478bd9Sstevel@tonic-gate if (i >= NCPU_SETUPS) 21357c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries"); 21367c478bd9Sstevel@tonic-gate 21377c478bd9Sstevel@tonic-gate cpu_setups[i].func = func; 21387c478bd9Sstevel@tonic-gate cpu_setups[i].arg = arg; 21397c478bd9Sstevel@tonic-gate } 21407c478bd9Sstevel@tonic-gate 21417c478bd9Sstevel@tonic-gate void 21427c478bd9Sstevel@tonic-gate unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg) 21437c478bd9Sstevel@tonic-gate { 21447c478bd9Sstevel@tonic-gate int i; 21457c478bd9Sstevel@tonic-gate 21467c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 21477c478bd9Sstevel@tonic-gate 21487c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) 21497c478bd9Sstevel@tonic-gate if ((cpu_setups[i].func == func) && 21507c478bd9Sstevel@tonic-gate (cpu_setups[i].arg == arg)) 21517c478bd9Sstevel@tonic-gate break; 21527c478bd9Sstevel@tonic-gate if (i >= NCPU_SETUPS) 21537c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Could not find cpu_setup callback to " 21547c478bd9Sstevel@tonic-gate "deregister"); 21557c478bd9Sstevel@tonic-gate 21567c478bd9Sstevel@tonic-gate cpu_setups[i].func = NULL; 21577c478bd9Sstevel@tonic-gate cpu_setups[i].arg = 0; 21587c478bd9Sstevel@tonic-gate } 21597c478bd9Sstevel@tonic-gate 21607c478bd9Sstevel@tonic-gate /* 21617c478bd9Sstevel@tonic-gate * Call any state change hooks for this CPU, ignore any errors. 21627c478bd9Sstevel@tonic-gate */ 21637c478bd9Sstevel@tonic-gate void 21647c478bd9Sstevel@tonic-gate cpu_state_change_notify(int id, cpu_setup_t what) 21657c478bd9Sstevel@tonic-gate { 21667c478bd9Sstevel@tonic-gate int i; 21677c478bd9Sstevel@tonic-gate 21687c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 21697c478bd9Sstevel@tonic-gate 21707c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) { 21717c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL) { 21727c478bd9Sstevel@tonic-gate cpu_setups[i].func(what, id, cpu_setups[i].arg); 21737c478bd9Sstevel@tonic-gate } 21747c478bd9Sstevel@tonic-gate } 21757c478bd9Sstevel@tonic-gate } 21767c478bd9Sstevel@tonic-gate 21777c478bd9Sstevel@tonic-gate /* 21787c478bd9Sstevel@tonic-gate * Call any state change hooks for this CPU, undo it if error found. 21797c478bd9Sstevel@tonic-gate */ 21807c478bd9Sstevel@tonic-gate static int 21817c478bd9Sstevel@tonic-gate cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo) 21827c478bd9Sstevel@tonic-gate { 21837c478bd9Sstevel@tonic-gate int i; 21847c478bd9Sstevel@tonic-gate int retval = 0; 21857c478bd9Sstevel@tonic-gate 21867c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 21877c478bd9Sstevel@tonic-gate 21887c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU_SETUPS; i++) { 21897c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL) { 21907c478bd9Sstevel@tonic-gate retval = cpu_setups[i].func(what, id, 21917c478bd9Sstevel@tonic-gate cpu_setups[i].arg); 21927c478bd9Sstevel@tonic-gate if (retval) { 21937c478bd9Sstevel@tonic-gate for (i--; i >= 0; i--) { 21947c478bd9Sstevel@tonic-gate if (cpu_setups[i].func != NULL) 21957c478bd9Sstevel@tonic-gate cpu_setups[i].func(undo, 21967c478bd9Sstevel@tonic-gate id, cpu_setups[i].arg); 21977c478bd9Sstevel@tonic-gate } 21987c478bd9Sstevel@tonic-gate break; 21997c478bd9Sstevel@tonic-gate } 22007c478bd9Sstevel@tonic-gate } 22017c478bd9Sstevel@tonic-gate } 22027c478bd9Sstevel@tonic-gate return (retval); 22037c478bd9Sstevel@tonic-gate } 22047c478bd9Sstevel@tonic-gate 22057c478bd9Sstevel@tonic-gate /* 22067c478bd9Sstevel@tonic-gate * Export information about this CPU via the kstat mechanism. 22077c478bd9Sstevel@tonic-gate */ 22087c478bd9Sstevel@tonic-gate static struct { 22097c478bd9Sstevel@tonic-gate kstat_named_t ci_state; 22107c478bd9Sstevel@tonic-gate kstat_named_t ci_state_begin; 22117c478bd9Sstevel@tonic-gate kstat_named_t ci_cpu_type; 22127c478bd9Sstevel@tonic-gate kstat_named_t ci_fpu_type; 22137c478bd9Sstevel@tonic-gate kstat_named_t ci_clock_MHz; 22147c478bd9Sstevel@tonic-gate kstat_named_t ci_chip_id; 22157c478bd9Sstevel@tonic-gate kstat_named_t ci_implementation; 22167aec1d6eScindi kstat_named_t ci_brandstr; 22177aec1d6eScindi kstat_named_t ci_core_id; 22185cff7825Smh27603 kstat_named_t ci_curr_clock_Hz; 22195cff7825Smh27603 kstat_named_t ci_supp_freq_Hz; 2220b885580bSAlexander Kolbasov kstat_named_t ci_pg_id; 22217aec1d6eScindi #if defined(__sparcv9) 22227c478bd9Sstevel@tonic-gate kstat_named_t ci_device_ID; 22237c478bd9Sstevel@tonic-gate kstat_named_t ci_cpu_fru; 22247c478bd9Sstevel@tonic-gate #endif 2225ae115bc7Smrj #if defined(__x86) 22267aec1d6eScindi kstat_named_t ci_vendorstr; 22277aec1d6eScindi kstat_named_t ci_family; 22287aec1d6eScindi kstat_named_t ci_model; 22297aec1d6eScindi kstat_named_t ci_step; 22307aec1d6eScindi kstat_named_t ci_clogid; 223110569901Sgavinm kstat_named_t ci_pkg_core_id; 223220c794b3Sgavinm kstat_named_t ci_ncpuperchip; 223320c794b3Sgavinm kstat_named_t ci_ncoreperchip; 22340e751525SEric Saxe kstat_named_t ci_max_cstates; 22350e751525SEric Saxe kstat_named_t ci_curr_cstate; 2236b885580bSAlexander Kolbasov kstat_named_t ci_cacheid; 223789e921d5SKuriakose Kuruvilla kstat_named_t ci_sktstr; 22387aec1d6eScindi #endif 22397c478bd9Sstevel@tonic-gate } cpu_info_template = { 22407c478bd9Sstevel@tonic-gate { "state", KSTAT_DATA_CHAR }, 22417c478bd9Sstevel@tonic-gate { "state_begin", KSTAT_DATA_LONG }, 22427c478bd9Sstevel@tonic-gate { "cpu_type", KSTAT_DATA_CHAR }, 22437c478bd9Sstevel@tonic-gate { "fpu_type", KSTAT_DATA_CHAR }, 22447c478bd9Sstevel@tonic-gate { "clock_MHz", KSTAT_DATA_LONG }, 22457c478bd9Sstevel@tonic-gate { "chip_id", KSTAT_DATA_LONG }, 22467c478bd9Sstevel@tonic-gate { "implementation", KSTAT_DATA_STRING }, 22477aec1d6eScindi { "brand", KSTAT_DATA_STRING }, 22487aec1d6eScindi { "core_id", KSTAT_DATA_LONG }, 22495cff7825Smh27603 { "current_clock_Hz", KSTAT_DATA_UINT64 }, 22505cff7825Smh27603 { "supported_frequencies_Hz", KSTAT_DATA_STRING }, 2251b885580bSAlexander Kolbasov { "pg_id", KSTAT_DATA_LONG }, 22527aec1d6eScindi #if defined(__sparcv9) 22537c478bd9Sstevel@tonic-gate { "device_ID", KSTAT_DATA_UINT64 }, 22547c478bd9Sstevel@tonic-gate { "cpu_fru", KSTAT_DATA_STRING }, 22557c478bd9Sstevel@tonic-gate #endif 2256ae115bc7Smrj #if defined(__x86) 22577aec1d6eScindi { "vendor_id", KSTAT_DATA_STRING }, 22587aec1d6eScindi { "family", KSTAT_DATA_INT32 }, 22597aec1d6eScindi { "model", KSTAT_DATA_INT32 }, 22607aec1d6eScindi { "stepping", KSTAT_DATA_INT32 }, 22617aec1d6eScindi { "clog_id", KSTAT_DATA_INT32 }, 226210569901Sgavinm { "pkg_core_id", KSTAT_DATA_LONG }, 226320c794b3Sgavinm { "ncpu_per_chip", KSTAT_DATA_INT32 }, 226420c794b3Sgavinm { "ncore_per_chip", KSTAT_DATA_INT32 }, 22650e751525SEric Saxe { "supported_max_cstates", KSTAT_DATA_INT32 }, 22660e751525SEric Saxe { "current_cstate", KSTAT_DATA_INT32 }, 2267b885580bSAlexander Kolbasov { "cache_id", KSTAT_DATA_INT32 }, 226889e921d5SKuriakose Kuruvilla { "socket_type", KSTAT_DATA_STRING }, 22697aec1d6eScindi #endif 22707c478bd9Sstevel@tonic-gate }; 22717c478bd9Sstevel@tonic-gate 22727c478bd9Sstevel@tonic-gate static kmutex_t cpu_info_template_lock; 22737c478bd9Sstevel@tonic-gate 22747c478bd9Sstevel@tonic-gate static int 22757c478bd9Sstevel@tonic-gate cpu_info_kstat_update(kstat_t *ksp, int rw) 22767c478bd9Sstevel@tonic-gate { 22777c478bd9Sstevel@tonic-gate cpu_t *cp = ksp->ks_private; 22787c478bd9Sstevel@tonic-gate const char *pi_state; 22797c478bd9Sstevel@tonic-gate 22807c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 22817c478bd9Sstevel@tonic-gate return (EACCES); 22827c478bd9Sstevel@tonic-gate 22837c208b9dSSurya Prakki #if defined(__x86) 22847c208b9dSSurya Prakki /* Is the cpu still initialising itself? */ 22857c208b9dSSurya Prakki if (cpuid_checkpass(cp, 1) == 0) 22867c208b9dSSurya Prakki return (ENXIO); 22877c208b9dSSurya Prakki #endif 2288*c3377ee9SJohn Levon 2289*c3377ee9SJohn Levon pi_state = cpu_get_state_str(cp->cpu_flags); 2290*c3377ee9SJohn Levon 22917c478bd9Sstevel@tonic-gate (void) strcpy(cpu_info_template.ci_state.value.c, pi_state); 22927c478bd9Sstevel@tonic-gate cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin; 22937c478bd9Sstevel@tonic-gate (void) strncpy(cpu_info_template.ci_cpu_type.value.c, 22947c478bd9Sstevel@tonic-gate cp->cpu_type_info.pi_processor_type, 15); 22957c478bd9Sstevel@tonic-gate (void) strncpy(cpu_info_template.ci_fpu_type.value.c, 22967c478bd9Sstevel@tonic-gate cp->cpu_type_info.pi_fputypes, 15); 22977c478bd9Sstevel@tonic-gate cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock; 2298fb2f18f8Sesaxe cpu_info_template.ci_chip_id.value.l = 2299fb2f18f8Sesaxe pg_plat_hw_instance_id(cp, PGHW_CHIP); 23007c478bd9Sstevel@tonic-gate kstat_named_setstr(&cpu_info_template.ci_implementation, 23017c478bd9Sstevel@tonic-gate cp->cpu_idstr); 23027aec1d6eScindi kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr); 2303fb2f18f8Sesaxe cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp); 23045cff7825Smh27603 cpu_info_template.ci_curr_clock_Hz.value.ui64 = 2305cf74e62bSmh27603 cp->cpu_curr_clock; 2306b885580bSAlexander Kolbasov cpu_info_template.ci_pg_id.value.l = 2307b885580bSAlexander Kolbasov cp->cpu_pg && cp->cpu_pg->cmt_lineage ? 2308b885580bSAlexander Kolbasov cp->cpu_pg->cmt_lineage->pg_id : -1; 23095cff7825Smh27603 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz, 2310cf74e62bSmh27603 cp->cpu_supp_freqs); 23117aec1d6eScindi #if defined(__sparcv9) 23127c478bd9Sstevel@tonic-gate cpu_info_template.ci_device_ID.value.ui64 = 23137c478bd9Sstevel@tonic-gate cpunodes[cp->cpu_id].device_id; 23147c478bd9Sstevel@tonic-gate kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp)); 23157c478bd9Sstevel@tonic-gate #endif 2316ae115bc7Smrj #if defined(__x86) 23177aec1d6eScindi kstat_named_setstr(&cpu_info_template.ci_vendorstr, 23187aec1d6eScindi cpuid_getvendorstr(cp)); 23197aec1d6eScindi cpu_info_template.ci_family.value.l = cpuid_getfamily(cp); 23207aec1d6eScindi cpu_info_template.ci_model.value.l = cpuid_getmodel(cp); 23217aec1d6eScindi cpu_info_template.ci_step.value.l = cpuid_getstep(cp); 2322fb2f18f8Sesaxe cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp); 232320c794b3Sgavinm cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp); 232420c794b3Sgavinm cpu_info_template.ci_ncoreperchip.value.l = 232520c794b3Sgavinm cpuid_get_ncore_per_chip(cp); 232610569901Sgavinm cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp); 23270e751525SEric Saxe cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates; 2328fb2caebeSRandy Fishel cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp); 2329b885580bSAlexander Kolbasov cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp); 233089e921d5SKuriakose Kuruvilla kstat_named_setstr(&cpu_info_template.ci_sktstr, 233189e921d5SKuriakose Kuruvilla cpuid_getsocketstr(cp)); 23327aec1d6eScindi #endif 23337aec1d6eScindi 23347c478bd9Sstevel@tonic-gate return (0); 23357c478bd9Sstevel@tonic-gate } 23367c478bd9Sstevel@tonic-gate 23377c478bd9Sstevel@tonic-gate static void 23387c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cpu_t *cp) 23397c478bd9Sstevel@tonic-gate { 23407c478bd9Sstevel@tonic-gate zoneid_t zoneid; 23417c478bd9Sstevel@tonic-gate 23427c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 23437c478bd9Sstevel@tonic-gate 23447c478bd9Sstevel@tonic-gate if (pool_pset_enabled()) 23457c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 23467c478bd9Sstevel@tonic-gate else 23477c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES; 23487c478bd9Sstevel@tonic-gate if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id, 23497c478bd9Sstevel@tonic-gate NULL, "misc", KSTAT_TYPE_NAMED, 23507c478bd9Sstevel@tonic-gate sizeof (cpu_info_template) / sizeof (kstat_named_t), 23517c208b9dSSurya Prakki KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) { 23527c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN; 23537aec1d6eScindi #if defined(__sparcv9) 23547c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data_size += 23557c478bd9Sstevel@tonic-gate strlen(cpu_fru_fmri(cp)) + 1; 23567c478bd9Sstevel@tonic-gate #endif 2357ae115bc7Smrj #if defined(__x86) 23587aec1d6eScindi cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN; 23597aec1d6eScindi #endif 23604e93b15cSmh27603 if (cp->cpu_supp_freqs != NULL) 23614e93b15cSmh27603 cp->cpu_info_kstat->ks_data_size += 23624e93b15cSmh27603 strlen(cp->cpu_supp_freqs) + 1; 23637c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock; 23647c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_data = &cpu_info_template; 23657c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_private = cp; 23667c478bd9Sstevel@tonic-gate cp->cpu_info_kstat->ks_update = cpu_info_kstat_update; 23677c478bd9Sstevel@tonic-gate kstat_install(cp->cpu_info_kstat); 23687c478bd9Sstevel@tonic-gate } 23697c478bd9Sstevel@tonic-gate } 23707c478bd9Sstevel@tonic-gate 23717c478bd9Sstevel@tonic-gate static void 23727c478bd9Sstevel@tonic-gate cpu_info_kstat_destroy(cpu_t *cp) 23737c478bd9Sstevel@tonic-gate { 23747c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 23757c478bd9Sstevel@tonic-gate 23767c478bd9Sstevel@tonic-gate kstat_delete(cp->cpu_info_kstat); 23777c478bd9Sstevel@tonic-gate cp->cpu_info_kstat = NULL; 23787c478bd9Sstevel@tonic-gate } 23797c478bd9Sstevel@tonic-gate 23807c478bd9Sstevel@tonic-gate /* 23817c478bd9Sstevel@tonic-gate * Create and install kstats for the boot CPU. 23827c478bd9Sstevel@tonic-gate */ 23837c478bd9Sstevel@tonic-gate void 23847c478bd9Sstevel@tonic-gate cpu_kstat_init(cpu_t *cp) 23857c478bd9Sstevel@tonic-gate { 23867c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 23877c478bd9Sstevel@tonic-gate cpu_info_kstat_create(cp); 23887c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cp); 23897c478bd9Sstevel@tonic-gate cpu_create_intrstat(cp); 23907c478bd9Sstevel@tonic-gate cpu_set_state(cp); 23917c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 23927c478bd9Sstevel@tonic-gate } 23937c478bd9Sstevel@tonic-gate 23947c478bd9Sstevel@tonic-gate /* 23957c478bd9Sstevel@tonic-gate * Make visible to the zone that subset of the cpu information that would be 23967c478bd9Sstevel@tonic-gate * initialized when a cpu is configured (but still offline). 23977c478bd9Sstevel@tonic-gate */ 23987c478bd9Sstevel@tonic-gate void 23997c478bd9Sstevel@tonic-gate cpu_visibility_configure(cpu_t *cp, zone_t *zone) 24007c478bd9Sstevel@tonic-gate { 24017c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 24027c478bd9Sstevel@tonic-gate 24037c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 24047c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled()); 24057c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); 24067c478bd9Sstevel@tonic-gate 24077c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 24087c478bd9Sstevel@tonic-gate zone->zone_ncpus++; 24097c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus <= ncpus); 24107c478bd9Sstevel@tonic-gate } 24117c478bd9Sstevel@tonic-gate if (cp->cpu_info_kstat != NULL) 24127c478bd9Sstevel@tonic-gate kstat_zone_add(cp->cpu_info_kstat, zoneid); 24137c478bd9Sstevel@tonic-gate } 24147c478bd9Sstevel@tonic-gate 24157c478bd9Sstevel@tonic-gate /* 24167c478bd9Sstevel@tonic-gate * Make visible to the zone that subset of the cpu information that would be 24177c478bd9Sstevel@tonic-gate * initialized when a previously configured cpu is onlined. 24187c478bd9Sstevel@tonic-gate */ 24197c478bd9Sstevel@tonic-gate void 24207c478bd9Sstevel@tonic-gate cpu_visibility_online(cpu_t *cp, zone_t *zone) 24217c478bd9Sstevel@tonic-gate { 24227c478bd9Sstevel@tonic-gate kstat_t *ksp; 24237c478bd9Sstevel@tonic-gate char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 24247c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 24257c478bd9Sstevel@tonic-gate processorid_t cpun; 24267c478bd9Sstevel@tonic-gate 24277c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 24287c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled()); 24297c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); 24307c478bd9Sstevel@tonic-gate ASSERT(cpu_is_active(cp)); 24317c478bd9Sstevel@tonic-gate 24327c478bd9Sstevel@tonic-gate cpun = cp->cpu_id; 24337c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 24347c478bd9Sstevel@tonic-gate zone->zone_ncpus_online++; 24357c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus_online <= ncpus_online); 24367c478bd9Sstevel@tonic-gate } 24377c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 24387c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 24397c478bd9Sstevel@tonic-gate != NULL) { 24407c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid); 24417c478bd9Sstevel@tonic-gate kstat_rele(ksp); 24427c478bd9Sstevel@tonic-gate } 24437c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 24447c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid); 24457c478bd9Sstevel@tonic-gate kstat_rele(ksp); 24467c478bd9Sstevel@tonic-gate } 24477c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 24487c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid); 24497c478bd9Sstevel@tonic-gate kstat_rele(ksp); 24507c478bd9Sstevel@tonic-gate } 24517c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 24527c478bd9Sstevel@tonic-gate NULL) { 24537c478bd9Sstevel@tonic-gate kstat_zone_add(ksp, zoneid); 24547c478bd9Sstevel@tonic-gate kstat_rele(ksp); 24557c478bd9Sstevel@tonic-gate } 24567c478bd9Sstevel@tonic-gate } 24577c478bd9Sstevel@tonic-gate 24587c478bd9Sstevel@tonic-gate /* 24597c478bd9Sstevel@tonic-gate * Update relevant kstats such that cpu is now visible to processes 24607c478bd9Sstevel@tonic-gate * executing in specified zone. 24617c478bd9Sstevel@tonic-gate */ 24627c478bd9Sstevel@tonic-gate void 24637c478bd9Sstevel@tonic-gate cpu_visibility_add(cpu_t *cp, zone_t *zone) 24647c478bd9Sstevel@tonic-gate { 24657c478bd9Sstevel@tonic-gate cpu_visibility_configure(cp, zone); 24667c478bd9Sstevel@tonic-gate if (cpu_is_active(cp)) 24677c478bd9Sstevel@tonic-gate cpu_visibility_online(cp, zone); 24687c478bd9Sstevel@tonic-gate } 24697c478bd9Sstevel@tonic-gate 24707c478bd9Sstevel@tonic-gate /* 24717c478bd9Sstevel@tonic-gate * Make invisible to the zone that subset of the cpu information that would be 24727c478bd9Sstevel@tonic-gate * torn down when a previously offlined cpu is unconfigured. 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate void 24757c478bd9Sstevel@tonic-gate cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone) 24767c478bd9Sstevel@tonic-gate { 24777c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 24787c478bd9Sstevel@tonic-gate 24797c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 24807c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled()); 24817c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); 24827c478bd9Sstevel@tonic-gate 24837c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 24847c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus != 0); 24857c478bd9Sstevel@tonic-gate zone->zone_ncpus--; 24867c478bd9Sstevel@tonic-gate } 24877c478bd9Sstevel@tonic-gate if (cp->cpu_info_kstat) 24887c478bd9Sstevel@tonic-gate kstat_zone_remove(cp->cpu_info_kstat, zoneid); 24897c478bd9Sstevel@tonic-gate } 24907c478bd9Sstevel@tonic-gate 24917c478bd9Sstevel@tonic-gate /* 24927c478bd9Sstevel@tonic-gate * Make invisible to the zone that subset of the cpu information that would be 24937c478bd9Sstevel@tonic-gate * torn down when a cpu is offlined (but still configured). 24947c478bd9Sstevel@tonic-gate */ 24957c478bd9Sstevel@tonic-gate void 24967c478bd9Sstevel@tonic-gate cpu_visibility_offline(cpu_t *cp, zone_t *zone) 24977c478bd9Sstevel@tonic-gate { 24987c478bd9Sstevel@tonic-gate kstat_t *ksp; 24997c478bd9Sstevel@tonic-gate char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 25007c478bd9Sstevel@tonic-gate zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 25017c478bd9Sstevel@tonic-gate processorid_t cpun; 25027c478bd9Sstevel@tonic-gate 25037c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 25047c478bd9Sstevel@tonic-gate ASSERT(pool_pset_enabled()); 25057c478bd9Sstevel@tonic-gate ASSERT(cp != NULL); 25067c478bd9Sstevel@tonic-gate ASSERT(cpu_is_active(cp)); 25077c478bd9Sstevel@tonic-gate 25087c478bd9Sstevel@tonic-gate cpun = cp->cpu_id; 25097c478bd9Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 25107c478bd9Sstevel@tonic-gate ASSERT(zone->zone_ncpus_online != 0); 25117c478bd9Sstevel@tonic-gate zone->zone_ncpus_online--; 25127c478bd9Sstevel@tonic-gate } 25137c478bd9Sstevel@tonic-gate 25147c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 25157c478bd9Sstevel@tonic-gate NULL) { 25167c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid); 25177c478bd9Sstevel@tonic-gate kstat_rele(ksp); 25187c478bd9Sstevel@tonic-gate } 25197c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 25207c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid); 25217c478bd9Sstevel@tonic-gate kstat_rele(ksp); 25227c478bd9Sstevel@tonic-gate } 25237c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 25247c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid); 25257c478bd9Sstevel@tonic-gate kstat_rele(ksp); 25267c478bd9Sstevel@tonic-gate } 25277c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 25287c478bd9Sstevel@tonic-gate if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 25297c478bd9Sstevel@tonic-gate != NULL) { 25307c478bd9Sstevel@tonic-gate kstat_zone_remove(ksp, zoneid); 25317c478bd9Sstevel@tonic-gate kstat_rele(ksp); 25327c478bd9Sstevel@tonic-gate } 25337c478bd9Sstevel@tonic-gate } 25347c478bd9Sstevel@tonic-gate 25357c478bd9Sstevel@tonic-gate /* 25367c478bd9Sstevel@tonic-gate * Update relevant kstats such that cpu is no longer visible to processes 25377c478bd9Sstevel@tonic-gate * executing in specified zone. 25387c478bd9Sstevel@tonic-gate */ 25397c478bd9Sstevel@tonic-gate void 25407c478bd9Sstevel@tonic-gate cpu_visibility_remove(cpu_t *cp, zone_t *zone) 25417c478bd9Sstevel@tonic-gate { 25427c478bd9Sstevel@tonic-gate if (cpu_is_active(cp)) 25437c478bd9Sstevel@tonic-gate cpu_visibility_offline(cp, zone); 25447c478bd9Sstevel@tonic-gate cpu_visibility_unconfigure(cp, zone); 25457c478bd9Sstevel@tonic-gate } 25467c478bd9Sstevel@tonic-gate 25477c478bd9Sstevel@tonic-gate /* 25487c478bd9Sstevel@tonic-gate * Bind a thread to a CPU as requested. 25497c478bd9Sstevel@tonic-gate */ 25507c478bd9Sstevel@tonic-gate int 25517c478bd9Sstevel@tonic-gate cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, 25527c478bd9Sstevel@tonic-gate int *error) 25537c478bd9Sstevel@tonic-gate { 25547c478bd9Sstevel@tonic-gate processorid_t binding; 25550b70c467Sakolb cpu_t *cp = NULL; 25567c478bd9Sstevel@tonic-gate 25577c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 25587c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 25597c478bd9Sstevel@tonic-gate 25607c478bd9Sstevel@tonic-gate thread_lock(tp); 25617c478bd9Sstevel@tonic-gate 25627c478bd9Sstevel@tonic-gate /* 25637c478bd9Sstevel@tonic-gate * Record old binding, but change the obind, which was initialized 25647c478bd9Sstevel@tonic-gate * to PBIND_NONE, only if this thread has a binding. This avoids 25657c478bd9Sstevel@tonic-gate * reporting PBIND_NONE for a process when some LWPs are bound. 25667c478bd9Sstevel@tonic-gate */ 25677c478bd9Sstevel@tonic-gate binding = tp->t_bind_cpu; 25683eea75d7SAlexander Kolbasov if (binding != PBIND_NONE) 25693eea75d7SAlexander Kolbasov *obind = binding; /* record old binding */ 25707c478bd9Sstevel@tonic-gate 25710b70c467Sakolb switch (bind) { 25720b70c467Sakolb case PBIND_QUERY: 25730b70c467Sakolb /* Just return the old binding */ 25747c478bd9Sstevel@tonic-gate thread_unlock(tp); 25757c478bd9Sstevel@tonic-gate return (0); 25760b70c467Sakolb 25770b70c467Sakolb case PBIND_QUERY_TYPE: 25780b70c467Sakolb /* Return the binding type */ 25790b70c467Sakolb *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD; 25800b70c467Sakolb thread_unlock(tp); 25810b70c467Sakolb return (0); 25820b70c467Sakolb 25830b70c467Sakolb case PBIND_SOFT: 25840b70c467Sakolb /* 25850b70c467Sakolb * Set soft binding for this thread and return the actual 25860b70c467Sakolb * binding 25870b70c467Sakolb */ 25880b70c467Sakolb TB_CPU_SOFT_SET(tp); 25890b70c467Sakolb thread_unlock(tp); 25900b70c467Sakolb return (0); 25910b70c467Sakolb 25920b70c467Sakolb case PBIND_HARD: 25930b70c467Sakolb /* 25940b70c467Sakolb * Set hard binding for this thread and return the actual 25950b70c467Sakolb * binding 25960b70c467Sakolb */ 25970b70c467Sakolb TB_CPU_HARD_SET(tp); 25980b70c467Sakolb thread_unlock(tp); 25990b70c467Sakolb return (0); 26000b70c467Sakolb 26010b70c467Sakolb default: 26020b70c467Sakolb break; 26037c478bd9Sstevel@tonic-gate } 26047c478bd9Sstevel@tonic-gate 26057c478bd9Sstevel@tonic-gate /* 26067c478bd9Sstevel@tonic-gate * If this thread/LWP cannot be bound because of permission 26077c478bd9Sstevel@tonic-gate * problems, just note that and return success so that the 26087c478bd9Sstevel@tonic-gate * other threads/LWPs will be bound. This is the way 26097c478bd9Sstevel@tonic-gate * processor_bind() is defined to work. 26107c478bd9Sstevel@tonic-gate * 26117c478bd9Sstevel@tonic-gate * Binding will get EPERM if the thread is of system class 26127c478bd9Sstevel@tonic-gate * or hasprocperm() fails. 26137c478bd9Sstevel@tonic-gate */ 26147c478bd9Sstevel@tonic-gate if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) { 26157c478bd9Sstevel@tonic-gate *error = EPERM; 26167c478bd9Sstevel@tonic-gate thread_unlock(tp); 26177c478bd9Sstevel@tonic-gate return (0); 26187c478bd9Sstevel@tonic-gate } 26197c478bd9Sstevel@tonic-gate 26207c478bd9Sstevel@tonic-gate binding = bind; 26217c478bd9Sstevel@tonic-gate if (binding != PBIND_NONE) { 26220b70c467Sakolb cp = cpu_get((processorid_t)binding); 26237c478bd9Sstevel@tonic-gate /* 26240b70c467Sakolb * Make sure binding is valid and is in right partition. 26257c478bd9Sstevel@tonic-gate */ 26260b70c467Sakolb if (cp == NULL || tp->t_cpupart != cp->cpu_part) { 26277c478bd9Sstevel@tonic-gate *error = EINVAL; 26287c478bd9Sstevel@tonic-gate thread_unlock(tp); 26297c478bd9Sstevel@tonic-gate return (0); 26307c478bd9Sstevel@tonic-gate } 26317c478bd9Sstevel@tonic-gate } 26327c478bd9Sstevel@tonic-gate tp->t_bind_cpu = binding; /* set new binding */ 26337c478bd9Sstevel@tonic-gate 26347c478bd9Sstevel@tonic-gate /* 26357c478bd9Sstevel@tonic-gate * If there is no system-set reason for affinity, set 26367c478bd9Sstevel@tonic-gate * the t_bound_cpu field to reflect the binding. 26377c478bd9Sstevel@tonic-gate */ 26387c478bd9Sstevel@tonic-gate if (tp->t_affinitycnt == 0) { 26397c478bd9Sstevel@tonic-gate if (binding == PBIND_NONE) { 26407c478bd9Sstevel@tonic-gate /* 26417c478bd9Sstevel@tonic-gate * We may need to adjust disp_max_unbound_pri 26427c478bd9Sstevel@tonic-gate * since we're becoming unbound. 26437c478bd9Sstevel@tonic-gate */ 26447c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(tp); 26457c478bd9Sstevel@tonic-gate 26467c478bd9Sstevel@tonic-gate tp->t_bound_cpu = NULL; /* set new binding */ 26477c478bd9Sstevel@tonic-gate 26487c478bd9Sstevel@tonic-gate /* 26497c478bd9Sstevel@tonic-gate * Move thread to lgroup with strongest affinity 26507c478bd9Sstevel@tonic-gate * after unbinding 26517c478bd9Sstevel@tonic-gate */ 26527c478bd9Sstevel@tonic-gate if (tp->t_lgrp_affinity) 26537c478bd9Sstevel@tonic-gate lgrp_move_thread(tp, 26547c478bd9Sstevel@tonic-gate lgrp_choose(tp, tp->t_cpupart), 1); 26557c478bd9Sstevel@tonic-gate 26567c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC && 26577c478bd9Sstevel@tonic-gate tp->t_cpu->cpu_part != tp->t_cpupart) 26587c478bd9Sstevel@tonic-gate cpu_surrender(tp); 26597c478bd9Sstevel@tonic-gate } else { 26607c478bd9Sstevel@tonic-gate lpl_t *lpl; 26617c478bd9Sstevel@tonic-gate 26627c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 26637c478bd9Sstevel@tonic-gate ASSERT(cp->cpu_lpl != NULL); 26647c478bd9Sstevel@tonic-gate 26657c478bd9Sstevel@tonic-gate /* 26667c478bd9Sstevel@tonic-gate * Set home to lgroup with most affinity containing CPU 26677c478bd9Sstevel@tonic-gate * that thread is being bound or minimum bounding 26687c478bd9Sstevel@tonic-gate * lgroup if no affinities set 26697c478bd9Sstevel@tonic-gate */ 26707c478bd9Sstevel@tonic-gate if (tp->t_lgrp_affinity) 267103400a71Sjjc lpl = lgrp_affinity_best(tp, tp->t_cpupart, 267203400a71Sjjc LGRP_NONE, B_FALSE); 26737c478bd9Sstevel@tonic-gate else 26747c478bd9Sstevel@tonic-gate lpl = cp->cpu_lpl; 26757c478bd9Sstevel@tonic-gate 26767c478bd9Sstevel@tonic-gate if (tp->t_lpl != lpl) { 26777c478bd9Sstevel@tonic-gate /* can't grab cpu_lock */ 26787c478bd9Sstevel@tonic-gate lgrp_move_thread(tp, lpl, 1); 26797c478bd9Sstevel@tonic-gate } 26807c478bd9Sstevel@tonic-gate 26817c478bd9Sstevel@tonic-gate /* 26827c478bd9Sstevel@tonic-gate * Make the thread switch to the bound CPU. 26837c478bd9Sstevel@tonic-gate * If the thread is runnable, we need to 26847c478bd9Sstevel@tonic-gate * requeue it even if t_cpu is already set 26857c478bd9Sstevel@tonic-gate * to the right CPU, since it may be on a 26867c478bd9Sstevel@tonic-gate * kpreempt queue and need to move to a local 26877c478bd9Sstevel@tonic-gate * queue. We could check t_disp_queue to 26887c478bd9Sstevel@tonic-gate * avoid unnecessary overhead if it's already 26897c478bd9Sstevel@tonic-gate * on the right queue, but since this isn't 26907c478bd9Sstevel@tonic-gate * a performance-critical operation it doesn't 26917c478bd9Sstevel@tonic-gate * seem worth the extra code and complexity. 26927c478bd9Sstevel@tonic-gate * 26937c478bd9Sstevel@tonic-gate * If the thread is weakbound to the cpu then it will 26947c478bd9Sstevel@tonic-gate * resist the new binding request until the weak 26957c478bd9Sstevel@tonic-gate * binding drops. The cpu_surrender or requeueing 26967c478bd9Sstevel@tonic-gate * below could be skipped in such cases (since it 26977c478bd9Sstevel@tonic-gate * will have no effect), but that would require 26987c478bd9Sstevel@tonic-gate * thread_allowmigrate to acquire thread_lock so 26997c478bd9Sstevel@tonic-gate * we'll take the very occasional hit here instead. 27007c478bd9Sstevel@tonic-gate */ 27017c478bd9Sstevel@tonic-gate if (tp->t_state == TS_ONPROC) { 27027c478bd9Sstevel@tonic-gate cpu_surrender(tp); 27037c478bd9Sstevel@tonic-gate } else if (tp->t_state == TS_RUN) { 27047c478bd9Sstevel@tonic-gate cpu_t *ocp = tp->t_cpu; 27057c478bd9Sstevel@tonic-gate 27067c478bd9Sstevel@tonic-gate (void) dispdeq(tp); 27077c478bd9Sstevel@tonic-gate setbackdq(tp); 27087c478bd9Sstevel@tonic-gate /* 27097c478bd9Sstevel@tonic-gate * Either on the bound CPU's disp queue now, 27107c478bd9Sstevel@tonic-gate * or swapped out or on the swap queue. 27117c478bd9Sstevel@tonic-gate */ 27127c478bd9Sstevel@tonic-gate ASSERT(tp->t_disp_queue == cp->cpu_disp || 27137c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == ocp || 27147c478bd9Sstevel@tonic-gate (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) 27157c478bd9Sstevel@tonic-gate != TS_LOAD); 27167c478bd9Sstevel@tonic-gate } 27177c478bd9Sstevel@tonic-gate } 27187c478bd9Sstevel@tonic-gate } 27197c478bd9Sstevel@tonic-gate 27207c478bd9Sstevel@tonic-gate /* 27217c478bd9Sstevel@tonic-gate * Our binding has changed; set TP_CHANGEBIND. 27227c478bd9Sstevel@tonic-gate */ 27237c478bd9Sstevel@tonic-gate tp->t_proc_flag |= TP_CHANGEBIND; 27247c478bd9Sstevel@tonic-gate aston(tp); 27257c478bd9Sstevel@tonic-gate 27267c478bd9Sstevel@tonic-gate thread_unlock(tp); 27277c478bd9Sstevel@tonic-gate 27287c478bd9Sstevel@tonic-gate return (0); 27297c478bd9Sstevel@tonic-gate } 27307c478bd9Sstevel@tonic-gate 27317c478bd9Sstevel@tonic-gate 273289574a1fSPatrick Mooney cpuset_t * 273389574a1fSPatrick Mooney cpuset_alloc(int kmflags) 273489574a1fSPatrick Mooney { 273589574a1fSPatrick Mooney return (kmem_alloc(sizeof (cpuset_t), kmflags)); 273689574a1fSPatrick Mooney } 273789574a1fSPatrick Mooney 273889574a1fSPatrick Mooney void 273989574a1fSPatrick Mooney cpuset_free(cpuset_t *s) 274089574a1fSPatrick Mooney { 274189574a1fSPatrick Mooney kmem_free(s, sizeof (cpuset_t)); 274289574a1fSPatrick Mooney } 27437c478bd9Sstevel@tonic-gate 27447c478bd9Sstevel@tonic-gate void 27457c478bd9Sstevel@tonic-gate cpuset_all(cpuset_t *s) 27467c478bd9Sstevel@tonic-gate { 27477c478bd9Sstevel@tonic-gate int i; 27487c478bd9Sstevel@tonic-gate 27497c478bd9Sstevel@tonic-gate for (i = 0; i < CPUSET_WORDS; i++) 27507c478bd9Sstevel@tonic-gate s->cpub[i] = ~0UL; 27517c478bd9Sstevel@tonic-gate } 27527c478bd9Sstevel@tonic-gate 27537c478bd9Sstevel@tonic-gate void 275489574a1fSPatrick Mooney cpuset_all_but(cpuset_t *s, const uint_t cpu) 27557c478bd9Sstevel@tonic-gate { 27567c478bd9Sstevel@tonic-gate cpuset_all(s); 27577c478bd9Sstevel@tonic-gate CPUSET_DEL(*s, cpu); 27587c478bd9Sstevel@tonic-gate } 27597c478bd9Sstevel@tonic-gate 27607c478bd9Sstevel@tonic-gate void 276189574a1fSPatrick Mooney cpuset_only(cpuset_t *s, const uint_t cpu) 27627c478bd9Sstevel@tonic-gate { 27637c478bd9Sstevel@tonic-gate CPUSET_ZERO(*s); 27647c478bd9Sstevel@tonic-gate CPUSET_ADD(*s, cpu); 27657c478bd9Sstevel@tonic-gate } 27667c478bd9Sstevel@tonic-gate 276789574a1fSPatrick Mooney long 276889574a1fSPatrick Mooney cpu_in_set(const cpuset_t *s, const uint_t cpu) 276989574a1fSPatrick Mooney { 277089574a1fSPatrick Mooney VERIFY(cpu < NCPU); 277189574a1fSPatrick Mooney return (BT_TEST(s->cpub, cpu)); 277289574a1fSPatrick Mooney } 277389574a1fSPatrick Mooney 277489574a1fSPatrick Mooney void 277589574a1fSPatrick Mooney cpuset_add(cpuset_t *s, const uint_t cpu) 277689574a1fSPatrick Mooney { 277789574a1fSPatrick Mooney VERIFY(cpu < NCPU); 277889574a1fSPatrick Mooney BT_SET(s->cpub, cpu); 277989574a1fSPatrick Mooney } 278089574a1fSPatrick Mooney 278189574a1fSPatrick Mooney void 278289574a1fSPatrick Mooney cpuset_del(cpuset_t *s, const uint_t cpu) 278389574a1fSPatrick Mooney { 278489574a1fSPatrick Mooney VERIFY(cpu < NCPU); 278589574a1fSPatrick Mooney BT_CLEAR(s->cpub, cpu); 278689574a1fSPatrick Mooney } 278789574a1fSPatrick Mooney 27887c478bd9Sstevel@tonic-gate int 278989574a1fSPatrick Mooney cpuset_isnull(const cpuset_t *s) 27907c478bd9Sstevel@tonic-gate { 27917c478bd9Sstevel@tonic-gate int i; 27927c478bd9Sstevel@tonic-gate 279389574a1fSPatrick Mooney for (i = 0; i < CPUSET_WORDS; i++) { 27947c478bd9Sstevel@tonic-gate if (s->cpub[i] != 0) 27957c478bd9Sstevel@tonic-gate return (0); 279689574a1fSPatrick Mooney } 27977c478bd9Sstevel@tonic-gate return (1); 27987c478bd9Sstevel@tonic-gate } 27997c478bd9Sstevel@tonic-gate 28007c478bd9Sstevel@tonic-gate int 280189574a1fSPatrick Mooney cpuset_isequal(const cpuset_t *s1, const cpuset_t *s2) 28027c478bd9Sstevel@tonic-gate { 28037c478bd9Sstevel@tonic-gate int i; 28047c478bd9Sstevel@tonic-gate 280589574a1fSPatrick Mooney for (i = 0; i < CPUSET_WORDS; i++) { 28067c478bd9Sstevel@tonic-gate if (s1->cpub[i] != s2->cpub[i]) 28077c478bd9Sstevel@tonic-gate return (0); 280889574a1fSPatrick Mooney } 28097c478bd9Sstevel@tonic-gate return (1); 28107c478bd9Sstevel@tonic-gate } 28117c478bd9Sstevel@tonic-gate 28127c478bd9Sstevel@tonic-gate uint_t 281389574a1fSPatrick Mooney cpuset_find(const cpuset_t *s) 28147c478bd9Sstevel@tonic-gate { 28157c478bd9Sstevel@tonic-gate 28167c478bd9Sstevel@tonic-gate uint_t i; 28177c478bd9Sstevel@tonic-gate uint_t cpu = (uint_t)-1; 28187c478bd9Sstevel@tonic-gate 28197c478bd9Sstevel@tonic-gate /* 28207c478bd9Sstevel@tonic-gate * Find a cpu in the cpuset 28217c478bd9Sstevel@tonic-gate */ 282225cf1a30Sjl139090 for (i = 0; i < CPUSET_WORDS; i++) { 28237c478bd9Sstevel@tonic-gate cpu = (uint_t)(lowbit(s->cpub[i]) - 1); 282425cf1a30Sjl139090 if (cpu != (uint_t)-1) { 282525cf1a30Sjl139090 cpu += i * BT_NBIPUL; 282625cf1a30Sjl139090 break; 282725cf1a30Sjl139090 } 282825cf1a30Sjl139090 } 28297c478bd9Sstevel@tonic-gate return (cpu); 28307c478bd9Sstevel@tonic-gate } 28317c478bd9Sstevel@tonic-gate 283200423197Sha137994 void 283389574a1fSPatrick Mooney cpuset_bounds(const cpuset_t *s, uint_t *smallestid, uint_t *largestid) 283400423197Sha137994 { 283500423197Sha137994 int i, j; 283600423197Sha137994 uint_t bit; 283700423197Sha137994 283800423197Sha137994 /* 283900423197Sha137994 * First, find the smallest cpu id in the set. 284000423197Sha137994 */ 284100423197Sha137994 for (i = 0; i < CPUSET_WORDS; i++) { 284200423197Sha137994 if (s->cpub[i] != 0) { 284300423197Sha137994 bit = (uint_t)(lowbit(s->cpub[i]) - 1); 284400423197Sha137994 ASSERT(bit != (uint_t)-1); 284500423197Sha137994 *smallestid = bit + (i * BT_NBIPUL); 284600423197Sha137994 284700423197Sha137994 /* 284800423197Sha137994 * Now find the largest cpu id in 284900423197Sha137994 * the set and return immediately. 285000423197Sha137994 * Done in an inner loop to avoid 285100423197Sha137994 * having to break out of the first 285200423197Sha137994 * loop. 285300423197Sha137994 */ 285400423197Sha137994 for (j = CPUSET_WORDS - 1; j >= i; j--) { 285500423197Sha137994 if (s->cpub[j] != 0) { 285600423197Sha137994 bit = (uint_t)(highbit(s->cpub[j]) - 1); 285700423197Sha137994 ASSERT(bit != (uint_t)-1); 285800423197Sha137994 *largestid = bit + (j * BT_NBIPUL); 285900423197Sha137994 ASSERT(*largestid >= *smallestid); 286000423197Sha137994 return; 286100423197Sha137994 } 286200423197Sha137994 } 286300423197Sha137994 286400423197Sha137994 /* 286500423197Sha137994 * If this code is reached, a 286600423197Sha137994 * smallestid was found, but not a 286700423197Sha137994 * largestid. The cpuset must have 286800423197Sha137994 * been changed during the course 286900423197Sha137994 * of this function call. 287000423197Sha137994 */ 287100423197Sha137994 ASSERT(0); 287200423197Sha137994 } 287300423197Sha137994 } 287400423197Sha137994 *smallestid = *largestid = CPUSET_NOTINSET; 287500423197Sha137994 } 287600423197Sha137994 287789574a1fSPatrick Mooney void 287889574a1fSPatrick Mooney cpuset_atomic_del(cpuset_t *s, const uint_t cpu) 287989574a1fSPatrick Mooney { 288089574a1fSPatrick Mooney VERIFY(cpu < NCPU); 288189574a1fSPatrick Mooney BT_ATOMIC_CLEAR(s->cpub, (cpu)) 288289574a1fSPatrick Mooney } 288389574a1fSPatrick Mooney 288489574a1fSPatrick Mooney void 288589574a1fSPatrick Mooney cpuset_atomic_add(cpuset_t *s, const uint_t cpu) 288689574a1fSPatrick Mooney { 288789574a1fSPatrick Mooney VERIFY(cpu < NCPU); 288889574a1fSPatrick Mooney BT_ATOMIC_SET(s->cpub, (cpu)) 288989574a1fSPatrick Mooney } 289089574a1fSPatrick Mooney 289189574a1fSPatrick Mooney long 289289574a1fSPatrick Mooney cpuset_atomic_xadd(cpuset_t *s, const uint_t cpu) 289389574a1fSPatrick Mooney { 289489574a1fSPatrick Mooney long res; 289589574a1fSPatrick Mooney 289689574a1fSPatrick Mooney VERIFY(cpu < NCPU); 289789574a1fSPatrick Mooney BT_ATOMIC_SET_EXCL(s->cpub, cpu, res); 289889574a1fSPatrick Mooney return (res); 289989574a1fSPatrick Mooney } 290089574a1fSPatrick Mooney 290189574a1fSPatrick Mooney long 290289574a1fSPatrick Mooney cpuset_atomic_xdel(cpuset_t *s, const uint_t cpu) 290389574a1fSPatrick Mooney { 290489574a1fSPatrick Mooney long res; 290589574a1fSPatrick Mooney 290689574a1fSPatrick Mooney VERIFY(cpu < NCPU); 290789574a1fSPatrick Mooney BT_ATOMIC_CLEAR_EXCL(s->cpub, cpu, res); 290889574a1fSPatrick Mooney return (res); 290989574a1fSPatrick Mooney } 291089574a1fSPatrick Mooney 291189574a1fSPatrick Mooney void 291289574a1fSPatrick Mooney cpuset_or(cpuset_t *dst, cpuset_t *src) 291389574a1fSPatrick Mooney { 291489574a1fSPatrick Mooney for (int i = 0; i < CPUSET_WORDS; i++) { 291589574a1fSPatrick Mooney dst->cpub[i] |= src->cpub[i]; 291689574a1fSPatrick Mooney } 291789574a1fSPatrick Mooney } 291889574a1fSPatrick Mooney 291989574a1fSPatrick Mooney void 292089574a1fSPatrick Mooney cpuset_xor(cpuset_t *dst, cpuset_t *src) 292189574a1fSPatrick Mooney { 292289574a1fSPatrick Mooney for (int i = 0; i < CPUSET_WORDS; i++) { 292389574a1fSPatrick Mooney dst->cpub[i] ^= src->cpub[i]; 292489574a1fSPatrick Mooney } 292589574a1fSPatrick Mooney } 292689574a1fSPatrick Mooney 292789574a1fSPatrick Mooney void 292889574a1fSPatrick Mooney cpuset_and(cpuset_t *dst, cpuset_t *src) 292989574a1fSPatrick Mooney { 293089574a1fSPatrick Mooney for (int i = 0; i < CPUSET_WORDS; i++) { 293189574a1fSPatrick Mooney dst->cpub[i] &= src->cpub[i]; 293289574a1fSPatrick Mooney } 293389574a1fSPatrick Mooney } 293489574a1fSPatrick Mooney 293589574a1fSPatrick Mooney void 293689574a1fSPatrick Mooney cpuset_zero(cpuset_t *dst) 293789574a1fSPatrick Mooney { 293889574a1fSPatrick Mooney for (int i = 0; i < CPUSET_WORDS; i++) { 293989574a1fSPatrick Mooney dst->cpub[i] = 0; 294089574a1fSPatrick Mooney } 294189574a1fSPatrick Mooney } 294289574a1fSPatrick Mooney 29437c478bd9Sstevel@tonic-gate 29447c478bd9Sstevel@tonic-gate /* 29450b70c467Sakolb * Unbind threads bound to specified CPU. 29460b70c467Sakolb * 29470b70c467Sakolb * If `unbind_all_threads' is true, unbind all user threads bound to a given 29480b70c467Sakolb * CPU. Otherwise unbind all soft-bound user threads. 29497c478bd9Sstevel@tonic-gate */ 29507c478bd9Sstevel@tonic-gate int 29510b70c467Sakolb cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads) 29527c478bd9Sstevel@tonic-gate { 29537c478bd9Sstevel@tonic-gate processorid_t obind; 29547c478bd9Sstevel@tonic-gate kthread_t *tp; 29557c478bd9Sstevel@tonic-gate int ret = 0; 29567c478bd9Sstevel@tonic-gate proc_t *pp; 29577c478bd9Sstevel@tonic-gate int err, berr = 0; 29587c478bd9Sstevel@tonic-gate 29597c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 29607c478bd9Sstevel@tonic-gate 29617c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 29627c478bd9Sstevel@tonic-gate for (pp = practive; pp != NULL; pp = pp->p_next) { 29637c478bd9Sstevel@tonic-gate mutex_enter(&pp->p_lock); 29647c478bd9Sstevel@tonic-gate tp = pp->p_tlist; 29657c478bd9Sstevel@tonic-gate /* 29667c478bd9Sstevel@tonic-gate * Skip zombies, kernel processes, and processes in 29677c478bd9Sstevel@tonic-gate * other zones, if called from a non-global zone. 29687c478bd9Sstevel@tonic-gate */ 29697c478bd9Sstevel@tonic-gate if (tp == NULL || (pp->p_flag & SSYS) || 29707c478bd9Sstevel@tonic-gate !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 29717c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_lock); 29727c478bd9Sstevel@tonic-gate continue; 29737c478bd9Sstevel@tonic-gate } 29747c478bd9Sstevel@tonic-gate do { 29757c478bd9Sstevel@tonic-gate if (tp->t_bind_cpu != cpu) 29767c478bd9Sstevel@tonic-gate continue; 29770b70c467Sakolb /* 29780b70c467Sakolb * Skip threads with hard binding when 29790b70c467Sakolb * `unbind_all_threads' is not specified. 29800b70c467Sakolb */ 29810b70c467Sakolb if (!unbind_all_threads && TB_CPU_IS_HARD(tp)) 29820b70c467Sakolb continue; 29837c478bd9Sstevel@tonic-gate err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr); 29847c478bd9Sstevel@tonic-gate if (ret == 0) 29857c478bd9Sstevel@tonic-gate ret = err; 29867c478bd9Sstevel@tonic-gate } while ((tp = tp->t_forw) != pp->p_tlist); 29877c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_lock); 29887c478bd9Sstevel@tonic-gate } 29897c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 29907c478bd9Sstevel@tonic-gate if (ret == 0) 29917c478bd9Sstevel@tonic-gate ret = berr; 29927c478bd9Sstevel@tonic-gate return (ret); 29937c478bd9Sstevel@tonic-gate } 29947c478bd9Sstevel@tonic-gate 29957c478bd9Sstevel@tonic-gate 29967c478bd9Sstevel@tonic-gate /* 29977c478bd9Sstevel@tonic-gate * Destroy all remaining bound threads on a cpu. 29987c478bd9Sstevel@tonic-gate */ 29997c478bd9Sstevel@tonic-gate void 30007c478bd9Sstevel@tonic-gate cpu_destroy_bound_threads(cpu_t *cp) 30017c478bd9Sstevel@tonic-gate { 30027c478bd9Sstevel@tonic-gate extern id_t syscid; 30037c478bd9Sstevel@tonic-gate register kthread_id_t t, tlist, tnext; 30047c478bd9Sstevel@tonic-gate 30057c478bd9Sstevel@tonic-gate /* 30067c478bd9Sstevel@tonic-gate * Destroy all remaining bound threads on the cpu. This 30077c478bd9Sstevel@tonic-gate * should include both the interrupt threads and the idle thread. 30087c478bd9Sstevel@tonic-gate * This requires some care, since we need to traverse the 30097c478bd9Sstevel@tonic-gate * thread list with the pidlock mutex locked, but thread_free 30107c478bd9Sstevel@tonic-gate * also locks the pidlock mutex. So, we collect the threads 30117c478bd9Sstevel@tonic-gate * we're going to reap in a list headed by "tlist", then we 30127c478bd9Sstevel@tonic-gate * unlock the pidlock mutex and traverse the tlist list, 30137c478bd9Sstevel@tonic-gate * doing thread_free's on the thread's. Simple, n'est pas? 30147c478bd9Sstevel@tonic-gate * Also, this depends on thread_free not mucking with the 30157c478bd9Sstevel@tonic-gate * t_next and t_prev links of the thread. 30167c478bd9Sstevel@tonic-gate */ 30177c478bd9Sstevel@tonic-gate 30187c478bd9Sstevel@tonic-gate if ((t = curthread) != NULL) { 30197c478bd9Sstevel@tonic-gate 30207c478bd9Sstevel@tonic-gate tlist = NULL; 30217c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 30227c478bd9Sstevel@tonic-gate do { 30237c478bd9Sstevel@tonic-gate tnext = t->t_next; 30247c478bd9Sstevel@tonic-gate if (t->t_bound_cpu == cp) { 30257c478bd9Sstevel@tonic-gate 30267c478bd9Sstevel@tonic-gate /* 30277c478bd9Sstevel@tonic-gate * We've found a bound thread, carefully unlink 30287c478bd9Sstevel@tonic-gate * it out of the thread list, and add it to 30297c478bd9Sstevel@tonic-gate * our "tlist". We "know" we don't have to 30307c478bd9Sstevel@tonic-gate * worry about unlinking curthread (the thread 30317c478bd9Sstevel@tonic-gate * that is executing this code). 30327c478bd9Sstevel@tonic-gate */ 30337c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 30347c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 30357c478bd9Sstevel@tonic-gate t->t_next = tlist; 30367c478bd9Sstevel@tonic-gate tlist = t; 30377c478bd9Sstevel@tonic-gate ASSERT(t->t_cid == syscid); 30387c478bd9Sstevel@tonic-gate /* wake up anyone blocked in thread_join */ 30397c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); 30407c478bd9Sstevel@tonic-gate /* 30417c478bd9Sstevel@tonic-gate * t_lwp set by interrupt threads and not 30427c478bd9Sstevel@tonic-gate * cleared. 30437c478bd9Sstevel@tonic-gate */ 30447c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 30457c478bd9Sstevel@tonic-gate /* 30467c478bd9Sstevel@tonic-gate * Pause and idle threads always have 30477c478bd9Sstevel@tonic-gate * t_state set to TS_ONPROC. 30487c478bd9Sstevel@tonic-gate */ 30497c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 30507c478bd9Sstevel@tonic-gate t->t_prev = NULL; /* Just in case */ 30517c478bd9Sstevel@tonic-gate } 30527c478bd9Sstevel@tonic-gate 30537c478bd9Sstevel@tonic-gate } while ((t = tnext) != curthread); 30547c478bd9Sstevel@tonic-gate 30557c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 30567c478bd9Sstevel@tonic-gate 3057575a7426Spt157919 mutex_sync(); 30587c478bd9Sstevel@tonic-gate for (t = tlist; t != NULL; t = tnext) { 30597c478bd9Sstevel@tonic-gate tnext = t->t_next; 30607c478bd9Sstevel@tonic-gate thread_free(t); 30617c478bd9Sstevel@tonic-gate } 30627c478bd9Sstevel@tonic-gate } 30637c478bd9Sstevel@tonic-gate } 30647c478bd9Sstevel@tonic-gate 30657c478bd9Sstevel@tonic-gate /* 306668afbec1Smh27603 * Update the cpu_supp_freqs of this cpu. This information is returned 30674e93b15cSmh27603 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then 30684e93b15cSmh27603 * maintain the kstat data size. 306968afbec1Smh27603 */ 307068afbec1Smh27603 void 307168afbec1Smh27603 cpu_set_supp_freqs(cpu_t *cp, const char *freqs) 307268afbec1Smh27603 { 307368afbec1Smh27603 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */ 307468afbec1Smh27603 const char *lfreqs = clkstr; 30754e93b15cSmh27603 boolean_t kstat_exists = B_FALSE; 30764e93b15cSmh27603 kstat_t *ksp; 30774e93b15cSmh27603 size_t len; 307868afbec1Smh27603 307968afbec1Smh27603 /* 308068afbec1Smh27603 * A NULL pointer means we only support one speed. 308168afbec1Smh27603 */ 308268afbec1Smh27603 if (freqs == NULL) 308368afbec1Smh27603 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64, 308468afbec1Smh27603 cp->cpu_curr_clock); 308568afbec1Smh27603 else 308668afbec1Smh27603 lfreqs = freqs; 308768afbec1Smh27603 308868afbec1Smh27603 /* 308968afbec1Smh27603 * Make sure the frequency doesn't change while a snapshot is 3090e40c2cd2Smh27603 * going on. Of course, we only need to worry about this if 3091e40c2cd2Smh27603 * the kstat exists. 309268afbec1Smh27603 */ 30934e93b15cSmh27603 if ((ksp = cp->cpu_info_kstat) != NULL) { 30944e93b15cSmh27603 mutex_enter(ksp->ks_lock); 30954e93b15cSmh27603 kstat_exists = B_TRUE; 3096e40c2cd2Smh27603 } 309768afbec1Smh27603 309868afbec1Smh27603 /* 30994e93b15cSmh27603 * Free any previously allocated string and if the kstat 31004e93b15cSmh27603 * already exists, then update its data size. 310168afbec1Smh27603 */ 31024e93b15cSmh27603 if (cp->cpu_supp_freqs != NULL) { 31034e93b15cSmh27603 len = strlen(cp->cpu_supp_freqs) + 1; 31044e93b15cSmh27603 kmem_free(cp->cpu_supp_freqs, len); 31054e93b15cSmh27603 if (kstat_exists) 31064e93b15cSmh27603 ksp->ks_data_size -= len; 31074e93b15cSmh27603 } 310868afbec1Smh27603 310968afbec1Smh27603 /* 311068afbec1Smh27603 * Allocate the new string and set the pointer. 311168afbec1Smh27603 */ 31124e93b15cSmh27603 len = strlen(lfreqs) + 1; 31134e93b15cSmh27603 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP); 311468afbec1Smh27603 (void) strcpy(cp->cpu_supp_freqs, lfreqs); 311568afbec1Smh27603 311668afbec1Smh27603 /* 31174e93b15cSmh27603 * If the kstat already exists then update the data size and 31184e93b15cSmh27603 * free the lock. 311968afbec1Smh27603 */ 31204e93b15cSmh27603 if (kstat_exists) { 31214e93b15cSmh27603 ksp->ks_data_size += len; 31224e93b15cSmh27603 mutex_exit(ksp->ks_lock); 31234e93b15cSmh27603 } 312468afbec1Smh27603 } 312568afbec1Smh27603 312668afbec1Smh27603 /* 31270e751525SEric Saxe * Indicate the current CPU's clock freqency (in Hz). 31280e751525SEric Saxe * The calling context must be such that CPU references are safe. 31290e751525SEric Saxe */ 31300e751525SEric Saxe void 31310e751525SEric Saxe cpu_set_curr_clock(uint64_t new_clk) 31320e751525SEric Saxe { 31330e751525SEric Saxe uint64_t old_clk; 31340e751525SEric Saxe 31350e751525SEric Saxe old_clk = CPU->cpu_curr_clock; 31360e751525SEric Saxe CPU->cpu_curr_clock = new_clk; 31370e751525SEric Saxe 31380e751525SEric Saxe /* 31390e751525SEric Saxe * The cpu-change-speed DTrace probe exports the frequency in Hz 31400e751525SEric Saxe */ 31410e751525SEric Saxe DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id, 31420e751525SEric Saxe uint64_t, old_clk, uint64_t, new_clk); 31430e751525SEric Saxe } 31440e751525SEric Saxe 31450e751525SEric Saxe /* 31467c478bd9Sstevel@tonic-gate * processor_info(2) and p_online(2) status support functions 31477c478bd9Sstevel@tonic-gate * The constants returned by the cpu_get_state() and cpu_get_state_str() are 31487c478bd9Sstevel@tonic-gate * for use in communicating processor state information to userland. Kernel 31497c478bd9Sstevel@tonic-gate * subsystems should only be using the cpu_flags value directly. Subsystems 31507c478bd9Sstevel@tonic-gate * modifying cpu_flags should record the state change via a call to the 31517c478bd9Sstevel@tonic-gate * cpu_set_state(). 31527c478bd9Sstevel@tonic-gate */ 31537c478bd9Sstevel@tonic-gate 31547c478bd9Sstevel@tonic-gate /* 31557c478bd9Sstevel@tonic-gate * Update the pi_state of this CPU. This function provides the CPU status for 31567c478bd9Sstevel@tonic-gate * the information returned by processor_info(2). 31577c478bd9Sstevel@tonic-gate */ 31587c478bd9Sstevel@tonic-gate void 31597c478bd9Sstevel@tonic-gate cpu_set_state(cpu_t *cpu) 31607c478bd9Sstevel@tonic-gate { 31617c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 31627c478bd9Sstevel@tonic-gate cpu->cpu_type_info.pi_state = cpu_get_state(cpu); 31637c478bd9Sstevel@tonic-gate cpu->cpu_state_begin = gethrestime_sec(); 31647c478bd9Sstevel@tonic-gate pool_cpu_mod = gethrtime(); 31657c478bd9Sstevel@tonic-gate } 31667c478bd9Sstevel@tonic-gate 31677c478bd9Sstevel@tonic-gate /* 31687c478bd9Sstevel@tonic-gate * Return offline/online/other status for the indicated CPU. Use only for 31697c478bd9Sstevel@tonic-gate * communication with user applications; cpu_flags provides the in-kernel 31707c478bd9Sstevel@tonic-gate * interface. 31717c478bd9Sstevel@tonic-gate */ 3172*c3377ee9SJohn Levon static int 3173*c3377ee9SJohn Levon cpu_flags_to_state(cpu_flag_t flags) 3174*c3377ee9SJohn Levon { 3175*c3377ee9SJohn Levon if (flags & CPU_DISABLED) 3176*c3377ee9SJohn Levon return (P_DISABLED); 3177*c3377ee9SJohn Levon else if (flags & CPU_POWEROFF) 3178*c3377ee9SJohn Levon return (P_POWEROFF); 3179*c3377ee9SJohn Levon else if (flags & CPU_FAULTED) 3180*c3377ee9SJohn Levon return (P_FAULTED); 3181*c3377ee9SJohn Levon else if (flags & CPU_SPARE) 3182*c3377ee9SJohn Levon return (P_SPARE); 3183*c3377ee9SJohn Levon else if ((flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY) 3184*c3377ee9SJohn Levon return (P_OFFLINE); 3185*c3377ee9SJohn Levon else if (flags & CPU_ENABLE) 3186*c3377ee9SJohn Levon return (P_ONLINE); 3187*c3377ee9SJohn Levon else 3188*c3377ee9SJohn Levon return (P_NOINTR); 3189*c3377ee9SJohn Levon } 3190*c3377ee9SJohn Levon 31917c478bd9Sstevel@tonic-gate int 31927c478bd9Sstevel@tonic-gate cpu_get_state(cpu_t *cpu) 31937c478bd9Sstevel@tonic-gate { 31947c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 3195*c3377ee9SJohn Levon return (cpu_flags_to_state(cpu->cpu_flags)); 31967c478bd9Sstevel@tonic-gate } 31977c478bd9Sstevel@tonic-gate 31987c478bd9Sstevel@tonic-gate /* 31997c478bd9Sstevel@tonic-gate * Return processor_info(2) state as a string. 32007c478bd9Sstevel@tonic-gate */ 32017c478bd9Sstevel@tonic-gate const char * 3202*c3377ee9SJohn Levon cpu_get_state_str(cpu_flag_t flags) 32037c478bd9Sstevel@tonic-gate { 32047c478bd9Sstevel@tonic-gate const char *string; 32057c478bd9Sstevel@tonic-gate 3206*c3377ee9SJohn Levon switch (cpu_flags_to_state(flags)) { 32077c478bd9Sstevel@tonic-gate case P_ONLINE: 32087c478bd9Sstevel@tonic-gate string = PS_ONLINE; 32097c478bd9Sstevel@tonic-gate break; 32107c478bd9Sstevel@tonic-gate case P_POWEROFF: 32117c478bd9Sstevel@tonic-gate string = PS_POWEROFF; 32127c478bd9Sstevel@tonic-gate break; 32137c478bd9Sstevel@tonic-gate case P_NOINTR: 32147c478bd9Sstevel@tonic-gate string = PS_NOINTR; 32157c478bd9Sstevel@tonic-gate break; 32167c478bd9Sstevel@tonic-gate case P_SPARE: 32177c478bd9Sstevel@tonic-gate string = PS_SPARE; 32187c478bd9Sstevel@tonic-gate break; 32197c478bd9Sstevel@tonic-gate case P_FAULTED: 32207c478bd9Sstevel@tonic-gate string = PS_FAULTED; 32217c478bd9Sstevel@tonic-gate break; 32227c478bd9Sstevel@tonic-gate case P_OFFLINE: 32237c478bd9Sstevel@tonic-gate string = PS_OFFLINE; 32247c478bd9Sstevel@tonic-gate break; 3225*c3377ee9SJohn Levon case P_DISABLED: 3226*c3377ee9SJohn Levon string = PS_DISABLED; 3227*c3377ee9SJohn Levon break; 32287c478bd9Sstevel@tonic-gate default: 32297c478bd9Sstevel@tonic-gate string = "unknown"; 32307c478bd9Sstevel@tonic-gate break; 32317c478bd9Sstevel@tonic-gate } 32327c478bd9Sstevel@tonic-gate return (string); 32337c478bd9Sstevel@tonic-gate } 32347c478bd9Sstevel@tonic-gate 32357c478bd9Sstevel@tonic-gate /* 32367c478bd9Sstevel@tonic-gate * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named 32377c478bd9Sstevel@tonic-gate * kstats, respectively. This is done when a CPU is initialized or placed 32387c478bd9Sstevel@tonic-gate * online via p_online(2). 32397c478bd9Sstevel@tonic-gate */ 32407c478bd9Sstevel@tonic-gate static void 32417c478bd9Sstevel@tonic-gate cpu_stats_kstat_create(cpu_t *cp) 32427c478bd9Sstevel@tonic-gate { 32437c478bd9Sstevel@tonic-gate int instance = cp->cpu_id; 32447c478bd9Sstevel@tonic-gate char *module = "cpu"; 32457c478bd9Sstevel@tonic-gate char *class = "misc"; 32467c478bd9Sstevel@tonic-gate kstat_t *ksp; 32477c478bd9Sstevel@tonic-gate zoneid_t zoneid; 32487c478bd9Sstevel@tonic-gate 32497c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 32507c478bd9Sstevel@tonic-gate 32517c478bd9Sstevel@tonic-gate if (pool_pset_enabled()) 32527c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 32537c478bd9Sstevel@tonic-gate else 32547c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES; 32557c478bd9Sstevel@tonic-gate /* 32567c478bd9Sstevel@tonic-gate * Create named kstats 32577c478bd9Sstevel@tonic-gate */ 32587c478bd9Sstevel@tonic-gate #define CPU_STATS_KS_CREATE(name, tsize, update_func) \ 32597c478bd9Sstevel@tonic-gate ksp = kstat_create_zone(module, instance, (name), class, \ 32607c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \ 32617c478bd9Sstevel@tonic-gate zoneid); \ 32627c478bd9Sstevel@tonic-gate if (ksp != NULL) { \ 32637c478bd9Sstevel@tonic-gate ksp->ks_private = cp; \ 32647c478bd9Sstevel@tonic-gate ksp->ks_update = (update_func); \ 32657c478bd9Sstevel@tonic-gate kstat_install(ksp); \ 32667c478bd9Sstevel@tonic-gate } else \ 32677c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \ 32687c478bd9Sstevel@tonic-gate module, instance, (name)); 32697c478bd9Sstevel@tonic-gate 32707c478bd9Sstevel@tonic-gate CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template), 32717c478bd9Sstevel@tonic-gate cpu_sys_stats_ks_update); 32727c478bd9Sstevel@tonic-gate CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template), 32737c478bd9Sstevel@tonic-gate cpu_vm_stats_ks_update); 32747c478bd9Sstevel@tonic-gate 32757c478bd9Sstevel@tonic-gate /* 32767c478bd9Sstevel@tonic-gate * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat. 32777c478bd9Sstevel@tonic-gate */ 32787c478bd9Sstevel@tonic-gate ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL, 32797c478bd9Sstevel@tonic-gate "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid); 32807c478bd9Sstevel@tonic-gate if (ksp != NULL) { 32817c478bd9Sstevel@tonic-gate ksp->ks_update = cpu_stat_ks_update; 32827c478bd9Sstevel@tonic-gate ksp->ks_private = cp; 32837c478bd9Sstevel@tonic-gate kstat_install(ksp); 32847c478bd9Sstevel@tonic-gate } 32857c478bd9Sstevel@tonic-gate } 32867c478bd9Sstevel@tonic-gate 32877c478bd9Sstevel@tonic-gate static void 32887c478bd9Sstevel@tonic-gate cpu_stats_kstat_destroy(cpu_t *cp) 32897c478bd9Sstevel@tonic-gate { 32907c478bd9Sstevel@tonic-gate char ks_name[KSTAT_STRLEN]; 32917c478bd9Sstevel@tonic-gate 32927c478bd9Sstevel@tonic-gate (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id); 32937c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name); 32947c478bd9Sstevel@tonic-gate 32957c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu", cp->cpu_id, "sys"); 32967c478bd9Sstevel@tonic-gate kstat_delete_byname("cpu", cp->cpu_id, "vm"); 32977c478bd9Sstevel@tonic-gate } 32987c478bd9Sstevel@tonic-gate 32997c478bd9Sstevel@tonic-gate static int 33007c478bd9Sstevel@tonic-gate cpu_sys_stats_ks_update(kstat_t *ksp, int rw) 33017c478bd9Sstevel@tonic-gate { 33027c478bd9Sstevel@tonic-gate cpu_t *cp = (cpu_t *)ksp->ks_private; 33037c478bd9Sstevel@tonic-gate struct cpu_sys_stats_ks_data *csskd; 33047c478bd9Sstevel@tonic-gate cpu_sys_stats_t *css; 3305eda89462Sesolom hrtime_t msnsecs[NCMSTATES]; 33067c478bd9Sstevel@tonic-gate int i; 33077c478bd9Sstevel@tonic-gate 33087c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 33097c478bd9Sstevel@tonic-gate return (EACCES); 33107c478bd9Sstevel@tonic-gate 33117c478bd9Sstevel@tonic-gate csskd = ksp->ks_data; 33127c478bd9Sstevel@tonic-gate css = &cp->cpu_stats.sys; 33137c478bd9Sstevel@tonic-gate 3314eda89462Sesolom /* 3315eda89462Sesolom * Read CPU mstate, but compare with the last values we 3316eda89462Sesolom * received to make sure that the returned kstats never 3317eda89462Sesolom * decrease. 3318eda89462Sesolom */ 3319eda89462Sesolom 3320eda89462Sesolom get_cpu_mstate(cp, msnsecs); 3321eda89462Sesolom if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE]) 3322eda89462Sesolom msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64; 3323eda89462Sesolom if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER]) 3324eda89462Sesolom msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64; 3325eda89462Sesolom if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM]) 3326eda89462Sesolom msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64; 3327eda89462Sesolom 33287c478bd9Sstevel@tonic-gate bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data, 33297c478bd9Sstevel@tonic-gate sizeof (cpu_sys_stats_ks_data_template)); 3330eda89462Sesolom 33317c478bd9Sstevel@tonic-gate csskd->cpu_ticks_wait.value.ui64 = 0; 33327c478bd9Sstevel@tonic-gate csskd->wait_ticks_io.value.ui64 = 0; 33337c478bd9Sstevel@tonic-gate 3334eda89462Sesolom csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE]; 3335eda89462Sesolom csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER]; 3336eda89462Sesolom csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM]; 33377c478bd9Sstevel@tonic-gate csskd->cpu_ticks_idle.value.ui64 = 33387c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64); 33397c478bd9Sstevel@tonic-gate csskd->cpu_ticks_user.value.ui64 = 33407c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64); 33417c478bd9Sstevel@tonic-gate csskd->cpu_ticks_kernel.value.ui64 = 33427c478bd9Sstevel@tonic-gate NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64); 33431f9f06cfSMatthew Ahrens csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec; 33441f9f06cfSMatthew Ahrens csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes; 33453aedfe0bSmishra csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast; 33463aedfe0bSmishra csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload; 33477c478bd9Sstevel@tonic-gate csskd->bread.value.ui64 = css->bread; 33487c478bd9Sstevel@tonic-gate csskd->bwrite.value.ui64 = css->bwrite; 33497c478bd9Sstevel@tonic-gate csskd->lread.value.ui64 = css->lread; 33507c478bd9Sstevel@tonic-gate csskd->lwrite.value.ui64 = css->lwrite; 33517c478bd9Sstevel@tonic-gate csskd->phread.value.ui64 = css->phread; 33527c478bd9Sstevel@tonic-gate csskd->phwrite.value.ui64 = css->phwrite; 33537c478bd9Sstevel@tonic-gate csskd->pswitch.value.ui64 = css->pswitch; 33547c478bd9Sstevel@tonic-gate csskd->trap.value.ui64 = css->trap; 33557c478bd9Sstevel@tonic-gate csskd->intr.value.ui64 = 0; 33567c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) 33577c478bd9Sstevel@tonic-gate csskd->intr.value.ui64 += css->intr[i]; 33587c478bd9Sstevel@tonic-gate csskd->syscall.value.ui64 = css->syscall; 33597c478bd9Sstevel@tonic-gate csskd->sysread.value.ui64 = css->sysread; 33607c478bd9Sstevel@tonic-gate csskd->syswrite.value.ui64 = css->syswrite; 33617c478bd9Sstevel@tonic-gate csskd->sysfork.value.ui64 = css->sysfork; 33627c478bd9Sstevel@tonic-gate csskd->sysvfork.value.ui64 = css->sysvfork; 33637c478bd9Sstevel@tonic-gate csskd->sysexec.value.ui64 = css->sysexec; 33647c478bd9Sstevel@tonic-gate csskd->readch.value.ui64 = css->readch; 33657c478bd9Sstevel@tonic-gate csskd->writech.value.ui64 = css->writech; 33667c478bd9Sstevel@tonic-gate csskd->rcvint.value.ui64 = css->rcvint; 33677c478bd9Sstevel@tonic-gate csskd->xmtint.value.ui64 = css->xmtint; 33687c478bd9Sstevel@tonic-gate csskd->mdmint.value.ui64 = css->mdmint; 33697c478bd9Sstevel@tonic-gate csskd->rawch.value.ui64 = css->rawch; 33707c478bd9Sstevel@tonic-gate csskd->canch.value.ui64 = css->canch; 33717c478bd9Sstevel@tonic-gate csskd->outch.value.ui64 = css->outch; 33727c478bd9Sstevel@tonic-gate csskd->msg.value.ui64 = css->msg; 33737c478bd9Sstevel@tonic-gate csskd->sema.value.ui64 = css->sema; 33747c478bd9Sstevel@tonic-gate csskd->namei.value.ui64 = css->namei; 33757c478bd9Sstevel@tonic-gate csskd->ufsiget.value.ui64 = css->ufsiget; 33767c478bd9Sstevel@tonic-gate csskd->ufsdirblk.value.ui64 = css->ufsdirblk; 33777c478bd9Sstevel@tonic-gate csskd->ufsipage.value.ui64 = css->ufsipage; 33787c478bd9Sstevel@tonic-gate csskd->ufsinopage.value.ui64 = css->ufsinopage; 33797c478bd9Sstevel@tonic-gate csskd->procovf.value.ui64 = css->procovf; 33807c478bd9Sstevel@tonic-gate csskd->intrthread.value.ui64 = 0; 338186e8def1Sethindra for (i = 0; i < LOCK_LEVEL - 1; i++) 33827c478bd9Sstevel@tonic-gate csskd->intrthread.value.ui64 += css->intr[i]; 33837c478bd9Sstevel@tonic-gate csskd->intrblk.value.ui64 = css->intrblk; 33847c478bd9Sstevel@tonic-gate csskd->intrunpin.value.ui64 = css->intrunpin; 33857c478bd9Sstevel@tonic-gate csskd->idlethread.value.ui64 = css->idlethread; 33867c478bd9Sstevel@tonic-gate csskd->inv_swtch.value.ui64 = css->inv_swtch; 33877c478bd9Sstevel@tonic-gate csskd->nthreads.value.ui64 = css->nthreads; 33887c478bd9Sstevel@tonic-gate csskd->cpumigrate.value.ui64 = css->cpumigrate; 33897c478bd9Sstevel@tonic-gate csskd->xcalls.value.ui64 = css->xcalls; 33907c478bd9Sstevel@tonic-gate csskd->mutex_adenters.value.ui64 = css->mutex_adenters; 33917c478bd9Sstevel@tonic-gate csskd->rw_rdfails.value.ui64 = css->rw_rdfails; 33927c478bd9Sstevel@tonic-gate csskd->rw_wrfails.value.ui64 = css->rw_wrfails; 33937c478bd9Sstevel@tonic-gate csskd->modload.value.ui64 = css->modload; 33947c478bd9Sstevel@tonic-gate csskd->modunload.value.ui64 = css->modunload; 33957c478bd9Sstevel@tonic-gate csskd->bawrite.value.ui64 = css->bawrite; 3396ae115bc7Smrj csskd->iowait.value.ui64 = css->iowait; 33977c478bd9Sstevel@tonic-gate 33987c478bd9Sstevel@tonic-gate return (0); 33997c478bd9Sstevel@tonic-gate } 34007c478bd9Sstevel@tonic-gate 34017c478bd9Sstevel@tonic-gate static int 34027c478bd9Sstevel@tonic-gate cpu_vm_stats_ks_update(kstat_t *ksp, int rw) 34037c478bd9Sstevel@tonic-gate { 34047c478bd9Sstevel@tonic-gate cpu_t *cp = (cpu_t *)ksp->ks_private; 34057c478bd9Sstevel@tonic-gate struct cpu_vm_stats_ks_data *cvskd; 34067c478bd9Sstevel@tonic-gate cpu_vm_stats_t *cvs; 34077c478bd9Sstevel@tonic-gate 34087c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 34097c478bd9Sstevel@tonic-gate return (EACCES); 34107c478bd9Sstevel@tonic-gate 34117c478bd9Sstevel@tonic-gate cvs = &cp->cpu_stats.vm; 34127c478bd9Sstevel@tonic-gate cvskd = ksp->ks_data; 34137c478bd9Sstevel@tonic-gate 34147c478bd9Sstevel@tonic-gate bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data, 34157c478bd9Sstevel@tonic-gate sizeof (cpu_vm_stats_ks_data_template)); 34167c478bd9Sstevel@tonic-gate cvskd->pgrec.value.ui64 = cvs->pgrec; 34177c478bd9Sstevel@tonic-gate cvskd->pgfrec.value.ui64 = cvs->pgfrec; 34187c478bd9Sstevel@tonic-gate cvskd->pgin.value.ui64 = cvs->pgin; 34197c478bd9Sstevel@tonic-gate cvskd->pgpgin.value.ui64 = cvs->pgpgin; 34207c478bd9Sstevel@tonic-gate cvskd->pgout.value.ui64 = cvs->pgout; 34217c478bd9Sstevel@tonic-gate cvskd->pgpgout.value.ui64 = cvs->pgpgout; 34227c478bd9Sstevel@tonic-gate cvskd->swapin.value.ui64 = cvs->swapin; 34237c478bd9Sstevel@tonic-gate cvskd->pgswapin.value.ui64 = cvs->pgswapin; 34247c478bd9Sstevel@tonic-gate cvskd->swapout.value.ui64 = cvs->swapout; 34257c478bd9Sstevel@tonic-gate cvskd->pgswapout.value.ui64 = cvs->pgswapout; 34267c478bd9Sstevel@tonic-gate cvskd->zfod.value.ui64 = cvs->zfod; 34277c478bd9Sstevel@tonic-gate cvskd->dfree.value.ui64 = cvs->dfree; 34287c478bd9Sstevel@tonic-gate cvskd->scan.value.ui64 = cvs->scan; 34297c478bd9Sstevel@tonic-gate cvskd->rev.value.ui64 = cvs->rev; 34307c478bd9Sstevel@tonic-gate cvskd->hat_fault.value.ui64 = cvs->hat_fault; 34317c478bd9Sstevel@tonic-gate cvskd->as_fault.value.ui64 = cvs->as_fault; 34327c478bd9Sstevel@tonic-gate cvskd->maj_fault.value.ui64 = cvs->maj_fault; 34337c478bd9Sstevel@tonic-gate cvskd->cow_fault.value.ui64 = cvs->cow_fault; 34347c478bd9Sstevel@tonic-gate cvskd->prot_fault.value.ui64 = cvs->prot_fault; 34357c478bd9Sstevel@tonic-gate cvskd->softlock.value.ui64 = cvs->softlock; 34367c478bd9Sstevel@tonic-gate cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt; 34377c478bd9Sstevel@tonic-gate cvskd->pgrrun.value.ui64 = cvs->pgrrun; 34387c478bd9Sstevel@tonic-gate cvskd->execpgin.value.ui64 = cvs->execpgin; 34397c478bd9Sstevel@tonic-gate cvskd->execpgout.value.ui64 = cvs->execpgout; 34407c478bd9Sstevel@tonic-gate cvskd->execfree.value.ui64 = cvs->execfree; 34417c478bd9Sstevel@tonic-gate cvskd->anonpgin.value.ui64 = cvs->anonpgin; 34427c478bd9Sstevel@tonic-gate cvskd->anonpgout.value.ui64 = cvs->anonpgout; 34437c478bd9Sstevel@tonic-gate cvskd->anonfree.value.ui64 = cvs->anonfree; 34447c478bd9Sstevel@tonic-gate cvskd->fspgin.value.ui64 = cvs->fspgin; 34457c478bd9Sstevel@tonic-gate cvskd->fspgout.value.ui64 = cvs->fspgout; 34467c478bd9Sstevel@tonic-gate cvskd->fsfree.value.ui64 = cvs->fsfree; 34477c478bd9Sstevel@tonic-gate 34487c478bd9Sstevel@tonic-gate return (0); 34497c478bd9Sstevel@tonic-gate } 34507c478bd9Sstevel@tonic-gate 34517c478bd9Sstevel@tonic-gate static int 34527c478bd9Sstevel@tonic-gate cpu_stat_ks_update(kstat_t *ksp, int rw) 34537c478bd9Sstevel@tonic-gate { 34547c478bd9Sstevel@tonic-gate cpu_stat_t *cso; 34557c478bd9Sstevel@tonic-gate cpu_t *cp; 34567c478bd9Sstevel@tonic-gate int i; 3457eda89462Sesolom hrtime_t msnsecs[NCMSTATES]; 34587c478bd9Sstevel@tonic-gate 34597c478bd9Sstevel@tonic-gate cso = (cpu_stat_t *)ksp->ks_data; 34607c478bd9Sstevel@tonic-gate cp = (cpu_t *)ksp->ks_private; 34617c478bd9Sstevel@tonic-gate 34627c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 34637c478bd9Sstevel@tonic-gate return (EACCES); 34647c478bd9Sstevel@tonic-gate 34657c478bd9Sstevel@tonic-gate /* 3466eda89462Sesolom * Read CPU mstate, but compare with the last values we 3467eda89462Sesolom * received to make sure that the returned kstats never 3468eda89462Sesolom * decrease. 34697c478bd9Sstevel@tonic-gate */ 34707c478bd9Sstevel@tonic-gate 3471eda89462Sesolom get_cpu_mstate(cp, msnsecs); 3472eda89462Sesolom msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]); 3473eda89462Sesolom msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]); 3474eda89462Sesolom msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]); 3475eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE]) 3476eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE]; 3477eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER]) 3478eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; 3479eda89462Sesolom if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) 3480eda89462Sesolom cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; 34817c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; 34827c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_IO] = 0; 34837c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_SWAP] = 0; 34847c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.wait[W_PIO] = 0; 34857c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); 34867c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); 34877c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); 34887c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); 34897c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); 34907c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); 34917c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); 34927c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); 34937c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intr = 0; 34947c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) 34957c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); 34967c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall); 34977c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread); 34987c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite); 34997c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork); 35007c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork); 35017c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec); 35027c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch); 35037c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech); 35047c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint); 35057c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint); 35067c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint); 35077c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch); 35087c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch); 35097c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch); 35107c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg); 35117c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema); 35127c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei); 35137c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget); 35147c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk); 35157c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage); 35167c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage); 35177c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.inodeovf = 0; 35187c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.fileovf = 0; 35197c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf); 35207c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrthread = 0; 352186e8def1Sethindra for (i = 0; i < LOCK_LEVEL - 1; i++) 35227c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]); 35237c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk); 35247c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread); 35257c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch); 35267c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads); 35277c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate); 35287c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls); 35297c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters); 35307c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails); 35317c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails); 35327c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload); 35337c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload); 35347c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite); 35357c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.rw_enters = 0; 35367c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_uo_cnt = 0; 35377c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_uu_cnt = 0; 35387c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_so_cnt = 0; 35397c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_su_cnt = 0; 35407c478bd9Sstevel@tonic-gate cso->cpu_sysinfo.win_suo_cnt = 0; 35417c478bd9Sstevel@tonic-gate 3542ae115bc7Smrj cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait); 35437c478bd9Sstevel@tonic-gate cso->cpu_syswait.swap = 0; 35447c478bd9Sstevel@tonic-gate cso->cpu_syswait.physio = 0; 35457c478bd9Sstevel@tonic-gate 35467c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec); 35477c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec); 35487c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin); 35497c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin); 35507c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout); 35517c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout); 35527c478bd9Sstevel@tonic-gate cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin); 35537c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin); 35547c478bd9Sstevel@tonic-gate cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout); 35557c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout); 35567c478bd9Sstevel@tonic-gate cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod); 35577c478bd9Sstevel@tonic-gate cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree); 35587c478bd9Sstevel@tonic-gate cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan); 35597c478bd9Sstevel@tonic-gate cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev); 35607c478bd9Sstevel@tonic-gate cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault); 35617c478bd9Sstevel@tonic-gate cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault); 35627c478bd9Sstevel@tonic-gate cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault); 35637c478bd9Sstevel@tonic-gate cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault); 35647c478bd9Sstevel@tonic-gate cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault); 35657c478bd9Sstevel@tonic-gate cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock); 35667c478bd9Sstevel@tonic-gate cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt); 35677c478bd9Sstevel@tonic-gate cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun); 35687c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin); 35697c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout); 35707c478bd9Sstevel@tonic-gate cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree); 35717c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin); 35727c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout); 35737c478bd9Sstevel@tonic-gate cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree); 35747c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin); 35757c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout); 35767c478bd9Sstevel@tonic-gate cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree); 35777c478bd9Sstevel@tonic-gate 35787c478bd9Sstevel@tonic-gate return (0); 35797c478bd9Sstevel@tonic-gate } 3580