1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Architecture-independent CPU control functions. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/var.h> 35 #include <sys/thread.h> 36 #include <sys/cpuvar.h> 37 #include <sys/kstat.h> 38 #include <sys/uadmin.h> 39 #include <sys/systm.h> 40 #include <sys/errno.h> 41 #include <sys/cmn_err.h> 42 #include <sys/procset.h> 43 #include <sys/processor.h> 44 #include <sys/debug.h> 45 #include <sys/cpupart.h> 46 #include <sys/lgrp.h> 47 #include <sys/pset.h> 48 #include <sys/pghw.h> 49 #include <sys/kmem.h> 50 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */ 51 #include <sys/atomic.h> 52 #include <sys/callb.h> 53 #include <sys/vtrace.h> 54 #include <sys/cyclic.h> 55 #include <sys/bitmap.h> 56 #include <sys/nvpair.h> 57 #include <sys/pool_pset.h> 58 #include <sys/msacct.h> 59 #include <sys/time.h> 60 #include <sys/archsystm.h> 61 #if defined(__x86) || defined(__amd64) 62 #include <sys/x86_archext.h> 63 #endif 64 65 extern int mp_cpu_start(cpu_t *); 66 extern int mp_cpu_stop(cpu_t *); 67 extern int mp_cpu_poweron(cpu_t *); 68 extern int mp_cpu_poweroff(cpu_t *); 69 extern int mp_cpu_configure(int); 70 extern int mp_cpu_unconfigure(int); 71 extern void mp_cpu_faulted_enter(cpu_t *); 72 extern void mp_cpu_faulted_exit(cpu_t *); 73 74 extern int cmp_cpu_to_chip(processorid_t cpuid); 75 #ifdef __sparcv9 76 extern char *cpu_fru_fmri(cpu_t *cp); 77 #endif 78 79 static void cpu_add_active_internal(cpu_t *cp); 80 static void cpu_remove_active(cpu_t *cp); 81 static void cpu_info_kstat_create(cpu_t *cp); 82 static void cpu_info_kstat_destroy(cpu_t *cp); 83 static void cpu_stats_kstat_create(cpu_t *cp); 84 static void cpu_stats_kstat_destroy(cpu_t *cp); 85 86 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw); 87 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw); 88 static int cpu_stat_ks_update(kstat_t *ksp, int rw); 89 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t); 90 91 /* 92 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active, 93 * and dispatch queue reallocations. The lock ordering with respect to 94 * related locks is: 95 * 96 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock() 97 * 98 * Warning: Certain sections of code do not use the cpu_lock when 99 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since 100 * all cpus are paused during modifications to this list, a solution 101 * to protect the list is too either disable kernel preemption while 102 * walking the list, *or* recheck the cpu_next pointer at each 103 * iteration in the loop. Note that in no cases can any cached 104 * copies of the cpu pointers be kept as they may become invalid. 105 */ 106 kmutex_t cpu_lock; 107 cpu_t *cpu_list; /* list of all CPUs */ 108 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ 109 cpu_t *cpu_active; /* list of active CPUs */ 110 static cpuset_t cpu_available; /* set of available CPUs */ 111 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ 112 113 /* 114 * max_ncpus keeps the max cpus the system can have. Initially 115 * it's NCPU, but since most archs scan the devtree for cpus 116 * fairly early on during boot, the real max can be known before 117 * ncpus is set (useful for early NCPU based allocations). 118 */ 119 int max_ncpus = NCPU; 120 /* 121 * platforms that set max_ncpus to maxiumum number of cpus that can be 122 * dynamically added will set boot_max_ncpus to the number of cpus found 123 * at device tree scan time during boot. 124 */ 125 int boot_max_ncpus = -1; 126 /* 127 * Maximum possible CPU id. This can never be >= NCPU since NCPU is 128 * used to size arrays that are indexed by CPU id. 129 */ 130 processorid_t max_cpuid = NCPU - 1; 131 132 int ncpus = 1; 133 int ncpus_online = 1; 134 135 /* 136 * CPU that we're trying to offline. Protected by cpu_lock. 137 */ 138 cpu_t *cpu_inmotion; 139 140 /* 141 * Can be raised to suppress further weakbinding, which are instead 142 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock, 143 * while individual thread weakbinding synchronisation is done under thread 144 * lock. 145 */ 146 int weakbindingbarrier; 147 148 /* 149 * Variables used in pause_cpus(). 150 */ 151 static volatile char safe_list[NCPU]; 152 153 static struct _cpu_pause_info { 154 int cp_spl; /* spl saved in pause_cpus() */ 155 volatile int cp_go; /* Go signal sent after all ready */ 156 int cp_count; /* # of CPUs to pause */ 157 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */ 158 kthread_id_t cp_paused; 159 } cpu_pause_info; 160 161 static kmutex_t pause_free_mutex; 162 static kcondvar_t pause_free_cv; 163 164 void *(*cpu_pause_func)(void *) = NULL; 165 166 167 static struct cpu_sys_stats_ks_data { 168 kstat_named_t cpu_ticks_idle; 169 kstat_named_t cpu_ticks_user; 170 kstat_named_t cpu_ticks_kernel; 171 kstat_named_t cpu_ticks_wait; 172 kstat_named_t cpu_nsec_idle; 173 kstat_named_t cpu_nsec_user; 174 kstat_named_t cpu_nsec_kernel; 175 kstat_named_t cpu_nsec_intr; 176 kstat_named_t cpu_load_intr; 177 kstat_named_t wait_ticks_io; 178 kstat_named_t bread; 179 kstat_named_t bwrite; 180 kstat_named_t lread; 181 kstat_named_t lwrite; 182 kstat_named_t phread; 183 kstat_named_t phwrite; 184 kstat_named_t pswitch; 185 kstat_named_t trap; 186 kstat_named_t intr; 187 kstat_named_t syscall; 188 kstat_named_t sysread; 189 kstat_named_t syswrite; 190 kstat_named_t sysfork; 191 kstat_named_t sysvfork; 192 kstat_named_t sysexec; 193 kstat_named_t readch; 194 kstat_named_t writech; 195 kstat_named_t rcvint; 196 kstat_named_t xmtint; 197 kstat_named_t mdmint; 198 kstat_named_t rawch; 199 kstat_named_t canch; 200 kstat_named_t outch; 201 kstat_named_t msg; 202 kstat_named_t sema; 203 kstat_named_t namei; 204 kstat_named_t ufsiget; 205 kstat_named_t ufsdirblk; 206 kstat_named_t ufsipage; 207 kstat_named_t ufsinopage; 208 kstat_named_t procovf; 209 kstat_named_t intrthread; 210 kstat_named_t intrblk; 211 kstat_named_t intrunpin; 212 kstat_named_t idlethread; 213 kstat_named_t inv_swtch; 214 kstat_named_t nthreads; 215 kstat_named_t cpumigrate; 216 kstat_named_t xcalls; 217 kstat_named_t mutex_adenters; 218 kstat_named_t rw_rdfails; 219 kstat_named_t rw_wrfails; 220 kstat_named_t modload; 221 kstat_named_t modunload; 222 kstat_named_t bawrite; 223 kstat_named_t iowait; 224 } cpu_sys_stats_ks_data_template = { 225 { "cpu_ticks_idle", KSTAT_DATA_UINT64 }, 226 { "cpu_ticks_user", KSTAT_DATA_UINT64 }, 227 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 }, 228 { "cpu_ticks_wait", KSTAT_DATA_UINT64 }, 229 { "cpu_nsec_idle", KSTAT_DATA_UINT64 }, 230 { "cpu_nsec_user", KSTAT_DATA_UINT64 }, 231 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 }, 232 { "cpu_nsec_intr", KSTAT_DATA_UINT64 }, 233 { "cpu_load_intr", KSTAT_DATA_UINT64 }, 234 { "wait_ticks_io", KSTAT_DATA_UINT64 }, 235 { "bread", KSTAT_DATA_UINT64 }, 236 { "bwrite", KSTAT_DATA_UINT64 }, 237 { "lread", KSTAT_DATA_UINT64 }, 238 { "lwrite", KSTAT_DATA_UINT64 }, 239 { "phread", KSTAT_DATA_UINT64 }, 240 { "phwrite", KSTAT_DATA_UINT64 }, 241 { "pswitch", KSTAT_DATA_UINT64 }, 242 { "trap", KSTAT_DATA_UINT64 }, 243 { "intr", KSTAT_DATA_UINT64 }, 244 { "syscall", KSTAT_DATA_UINT64 }, 245 { "sysread", KSTAT_DATA_UINT64 }, 246 { "syswrite", KSTAT_DATA_UINT64 }, 247 { "sysfork", KSTAT_DATA_UINT64 }, 248 { "sysvfork", KSTAT_DATA_UINT64 }, 249 { "sysexec", KSTAT_DATA_UINT64 }, 250 { "readch", KSTAT_DATA_UINT64 }, 251 { "writech", KSTAT_DATA_UINT64 }, 252 { "rcvint", KSTAT_DATA_UINT64 }, 253 { "xmtint", KSTAT_DATA_UINT64 }, 254 { "mdmint", KSTAT_DATA_UINT64 }, 255 { "rawch", KSTAT_DATA_UINT64 }, 256 { "canch", KSTAT_DATA_UINT64 }, 257 { "outch", KSTAT_DATA_UINT64 }, 258 { "msg", KSTAT_DATA_UINT64 }, 259 { "sema", KSTAT_DATA_UINT64 }, 260 { "namei", KSTAT_DATA_UINT64 }, 261 { "ufsiget", KSTAT_DATA_UINT64 }, 262 { "ufsdirblk", KSTAT_DATA_UINT64 }, 263 { "ufsipage", KSTAT_DATA_UINT64 }, 264 { "ufsinopage", KSTAT_DATA_UINT64 }, 265 { "procovf", KSTAT_DATA_UINT64 }, 266 { "intrthread", KSTAT_DATA_UINT64 }, 267 { "intrblk", KSTAT_DATA_UINT64 }, 268 { "intrunpin", KSTAT_DATA_UINT64 }, 269 { "idlethread", KSTAT_DATA_UINT64 }, 270 { "inv_swtch", KSTAT_DATA_UINT64 }, 271 { "nthreads", KSTAT_DATA_UINT64 }, 272 { "cpumigrate", KSTAT_DATA_UINT64 }, 273 { "xcalls", KSTAT_DATA_UINT64 }, 274 { "mutex_adenters", KSTAT_DATA_UINT64 }, 275 { "rw_rdfails", KSTAT_DATA_UINT64 }, 276 { "rw_wrfails", KSTAT_DATA_UINT64 }, 277 { "modload", KSTAT_DATA_UINT64 }, 278 { "modunload", KSTAT_DATA_UINT64 }, 279 { "bawrite", KSTAT_DATA_UINT64 }, 280 { "iowait", KSTAT_DATA_UINT64 }, 281 }; 282 283 static struct cpu_vm_stats_ks_data { 284 kstat_named_t pgrec; 285 kstat_named_t pgfrec; 286 kstat_named_t pgin; 287 kstat_named_t pgpgin; 288 kstat_named_t pgout; 289 kstat_named_t pgpgout; 290 kstat_named_t swapin; 291 kstat_named_t pgswapin; 292 kstat_named_t swapout; 293 kstat_named_t pgswapout; 294 kstat_named_t zfod; 295 kstat_named_t dfree; 296 kstat_named_t scan; 297 kstat_named_t rev; 298 kstat_named_t hat_fault; 299 kstat_named_t as_fault; 300 kstat_named_t maj_fault; 301 kstat_named_t cow_fault; 302 kstat_named_t prot_fault; 303 kstat_named_t softlock; 304 kstat_named_t kernel_asflt; 305 kstat_named_t pgrrun; 306 kstat_named_t execpgin; 307 kstat_named_t execpgout; 308 kstat_named_t execfree; 309 kstat_named_t anonpgin; 310 kstat_named_t anonpgout; 311 kstat_named_t anonfree; 312 kstat_named_t fspgin; 313 kstat_named_t fspgout; 314 kstat_named_t fsfree; 315 } cpu_vm_stats_ks_data_template = { 316 { "pgrec", KSTAT_DATA_UINT64 }, 317 { "pgfrec", KSTAT_DATA_UINT64 }, 318 { "pgin", KSTAT_DATA_UINT64 }, 319 { "pgpgin", KSTAT_DATA_UINT64 }, 320 { "pgout", KSTAT_DATA_UINT64 }, 321 { "pgpgout", KSTAT_DATA_UINT64 }, 322 { "swapin", KSTAT_DATA_UINT64 }, 323 { "pgswapin", KSTAT_DATA_UINT64 }, 324 { "swapout", KSTAT_DATA_UINT64 }, 325 { "pgswapout", KSTAT_DATA_UINT64 }, 326 { "zfod", KSTAT_DATA_UINT64 }, 327 { "dfree", KSTAT_DATA_UINT64 }, 328 { "scan", KSTAT_DATA_UINT64 }, 329 { "rev", KSTAT_DATA_UINT64 }, 330 { "hat_fault", KSTAT_DATA_UINT64 }, 331 { "as_fault", KSTAT_DATA_UINT64 }, 332 { "maj_fault", KSTAT_DATA_UINT64 }, 333 { "cow_fault", KSTAT_DATA_UINT64 }, 334 { "prot_fault", KSTAT_DATA_UINT64 }, 335 { "softlock", KSTAT_DATA_UINT64 }, 336 { "kernel_asflt", KSTAT_DATA_UINT64 }, 337 { "pgrrun", KSTAT_DATA_UINT64 }, 338 { "execpgin", KSTAT_DATA_UINT64 }, 339 { "execpgout", KSTAT_DATA_UINT64 }, 340 { "execfree", KSTAT_DATA_UINT64 }, 341 { "anonpgin", KSTAT_DATA_UINT64 }, 342 { "anonpgout", KSTAT_DATA_UINT64 }, 343 { "anonfree", KSTAT_DATA_UINT64 }, 344 { "fspgin", KSTAT_DATA_UINT64 }, 345 { "fspgout", KSTAT_DATA_UINT64 }, 346 { "fsfree", KSTAT_DATA_UINT64 }, 347 }; 348 349 /* 350 * Force the specified thread to migrate to the appropriate processor. 351 * Called with thread lock held, returns with it dropped. 352 */ 353 static void 354 force_thread_migrate(kthread_id_t tp) 355 { 356 ASSERT(THREAD_LOCK_HELD(tp)); 357 if (tp == curthread) { 358 THREAD_TRANSITION(tp); 359 CL_SETRUN(tp); 360 thread_unlock_nopreempt(tp); 361 swtch(); 362 } else { 363 if (tp->t_state == TS_ONPROC) { 364 cpu_surrender(tp); 365 } else if (tp->t_state == TS_RUN) { 366 (void) dispdeq(tp); 367 setbackdq(tp); 368 } 369 thread_unlock(tp); 370 } 371 } 372 373 /* 374 * Set affinity for a specified CPU. 375 * A reference count is incremented and the affinity is held until the 376 * reference count is decremented to zero by thread_affinity_clear(). 377 * This is so regions of code requiring affinity can be nested. 378 * Caller needs to ensure that cpu_id remains valid, which can be 379 * done by holding cpu_lock across this call, unless the caller 380 * specifies CPU_CURRENT in which case the cpu_lock will be acquired 381 * by thread_affinity_set and CPU->cpu_id will be the target CPU. 382 */ 383 void 384 thread_affinity_set(kthread_id_t t, int cpu_id) 385 { 386 cpu_t *cp; 387 int c; 388 389 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); 390 391 if ((c = cpu_id) == CPU_CURRENT) { 392 mutex_enter(&cpu_lock); 393 cpu_id = CPU->cpu_id; 394 } 395 /* 396 * We should be asserting that cpu_lock is held here, but 397 * the NCA code doesn't acquire it. The following assert 398 * should be uncommented when the NCA code is fixed. 399 * 400 * ASSERT(MUTEX_HELD(&cpu_lock)); 401 */ 402 ASSERT((cpu_id >= 0) && (cpu_id < NCPU)); 403 cp = cpu[cpu_id]; 404 ASSERT(cp != NULL); /* user must provide a good cpu_id */ 405 /* 406 * If there is already a hard affinity requested, and this affinity 407 * conflicts with that, panic. 408 */ 409 thread_lock(t); 410 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) { 411 panic("affinity_set: setting %p but already bound to %p", 412 (void *)cp, (void *)t->t_bound_cpu); 413 } 414 t->t_affinitycnt++; 415 t->t_bound_cpu = cp; 416 417 /* 418 * Make sure we're running on the right CPU. 419 */ 420 if (cp != t->t_cpu || t != curthread) { 421 force_thread_migrate(t); /* drops thread lock */ 422 } else { 423 thread_unlock(t); 424 } 425 426 if (c == CPU_CURRENT) 427 mutex_exit(&cpu_lock); 428 } 429 430 /* 431 * Wrapper for backward compatibility. 432 */ 433 void 434 affinity_set(int cpu_id) 435 { 436 thread_affinity_set(curthread, cpu_id); 437 } 438 439 /* 440 * Decrement the affinity reservation count and if it becomes zero, 441 * clear the CPU affinity for the current thread, or set it to the user's 442 * software binding request. 443 */ 444 void 445 thread_affinity_clear(kthread_id_t t) 446 { 447 register processorid_t binding; 448 449 thread_lock(t); 450 if (--t->t_affinitycnt == 0) { 451 if ((binding = t->t_bind_cpu) == PBIND_NONE) { 452 /* 453 * Adjust disp_max_unbound_pri if necessary. 454 */ 455 disp_adjust_unbound_pri(t); 456 t->t_bound_cpu = NULL; 457 if (t->t_cpu->cpu_part != t->t_cpupart) { 458 force_thread_migrate(t); 459 return; 460 } 461 } else { 462 t->t_bound_cpu = cpu[binding]; 463 /* 464 * Make sure the thread is running on the bound CPU. 465 */ 466 if (t->t_cpu != t->t_bound_cpu) { 467 force_thread_migrate(t); 468 return; /* already dropped lock */ 469 } 470 } 471 } 472 thread_unlock(t); 473 } 474 475 /* 476 * Wrapper for backward compatibility. 477 */ 478 void 479 affinity_clear(void) 480 { 481 thread_affinity_clear(curthread); 482 } 483 484 /* 485 * Weak cpu affinity. Bind to the "current" cpu for short periods 486 * of time during which the thread must not block (but may be preempted). 487 * Use this instead of kpreempt_disable() when it is only "no migration" 488 * rather than "no preemption" semantics that are required - disabling 489 * preemption holds higher priority threads off of cpu and if the 490 * operation that is protected is more than momentary this is not good 491 * for realtime etc. 492 * 493 * Weakly bound threads will not prevent a cpu from being offlined - 494 * we'll only run them on the cpu to which they are weakly bound but 495 * (because they do not block) we'll always be able to move them on to 496 * another cpu at offline time if we give them just a short moment to 497 * run during which they will unbind. To give a cpu a chance of offlining, 498 * however, we require a barrier to weak bindings that may be raised for a 499 * given cpu (offline/move code may set this and then wait a short time for 500 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier. 501 * 502 * There are few restrictions on the calling context of thread_nomigrate. 503 * The caller must not hold the thread lock. Calls may be nested. 504 * 505 * After weakbinding a thread must not perform actions that may block. 506 * In particular it must not call thread_affinity_set; calling that when 507 * already weakbound is nonsensical anyway. 508 * 509 * If curthread is prevented from migrating for other reasons 510 * (kernel preemption disabled; high pil; strongly bound; interrupt thread) 511 * then the weak binding will succeed even if this cpu is the target of an 512 * offline/move request. 513 */ 514 void 515 thread_nomigrate(void) 516 { 517 cpu_t *cp; 518 kthread_id_t t = curthread; 519 520 again: 521 kpreempt_disable(); 522 cp = CPU; 523 524 /* 525 * A highlevel interrupt must not modify t_nomigrate or 526 * t_weakbound_cpu of the thread it has interrupted. A lowlevel 527 * interrupt thread cannot migrate and we can avoid the 528 * thread_lock call below by short-circuiting here. In either 529 * case we can just return since no migration is possible and 530 * the condition will persist (ie, when we test for these again 531 * in thread_allowmigrate they can't have changed). Migration 532 * is also impossible if we're at or above DISP_LEVEL pil. 533 */ 534 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD || 535 getpil() >= DISP_LEVEL) { 536 kpreempt_enable(); 537 return; 538 } 539 540 /* 541 * We must be consistent with existing weak bindings. Since we 542 * may be interrupted between the increment of t_nomigrate and 543 * the store to t_weakbound_cpu below we cannot assume that 544 * t_weakbound_cpu will be set if t_nomigrate is. Note that we 545 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not 546 * always the case. 547 */ 548 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) { 549 if (!panicstr) 550 panic("thread_nomigrate: binding to %p but already " 551 "bound to %p", (void *)cp, 552 (void *)t->t_weakbound_cpu); 553 } 554 555 /* 556 * At this point we have preemption disabled and we don't yet hold 557 * the thread lock. So it's possible that somebody else could 558 * set t_bind_cpu here and not be able to force us across to the 559 * new cpu (since we have preemption disabled). 560 */ 561 thread_lock(curthread); 562 563 /* 564 * If further weak bindings are being (temporarily) suppressed then 565 * we'll settle for disabling kernel preemption (which assures 566 * no migration provided the thread does not block which it is 567 * not allowed to if using thread_nomigrate). We must remember 568 * this disposition so we can take appropriate action in 569 * thread_allowmigrate. If this is a nested call and the 570 * thread is already weakbound then fall through as normal. 571 * We remember the decision to settle for kpreempt_disable through 572 * negative nesting counting in t_nomigrate. Once a thread has had one 573 * weakbinding request satisfied in this way any further (nested) 574 * requests will continue to be satisfied in the same way, 575 * even if weak bindings have recommenced. 576 */ 577 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) { 578 --t->t_nomigrate; 579 thread_unlock(curthread); 580 return; /* with kpreempt_disable still active */ 581 } 582 583 /* 584 * We hold thread_lock so t_bind_cpu cannot change. We could, 585 * however, be running on a different cpu to which we are t_bound_cpu 586 * to (as explained above). If we grant the weak binding request 587 * in that case then the dispatcher must favour our weak binding 588 * over our strong (in which case, just as when preemption is 589 * disabled, we can continue to run on a cpu other than the one to 590 * which we are strongbound; the difference in this case is that 591 * this thread can be preempted and so can appear on the dispatch 592 * queues of a cpu other than the one it is strongbound to). 593 * 594 * If the cpu we are running on does not appear to be a current 595 * offline target (we check cpu_inmotion to determine this - since 596 * we don't hold cpu_lock we may not see a recent store to that, 597 * so it's possible that we at times can grant a weak binding to a 598 * cpu that is an offline target, but that one request will not 599 * prevent the offline from succeeding) then we will always grant 600 * the weak binding request. This includes the case above where 601 * we grant a weakbinding not commensurate with our strong binding. 602 * 603 * If our cpu does appear to be an offline target then we're inclined 604 * not to grant the weakbinding request just yet - we'd prefer to 605 * migrate to another cpu and grant the request there. The 606 * exceptions are those cases where going through preemption code 607 * will not result in us changing cpu: 608 * 609 * . interrupts have already bypassed this case (see above) 610 * . we are already weakbound to this cpu (dispatcher code will 611 * always return us to the weakbound cpu) 612 * . preemption was disabled even before we disabled it above 613 * . we are strongbound to this cpu (if we're strongbound to 614 * another and not yet running there the trip through the 615 * dispatcher will move us to the strongbound cpu and we 616 * will grant the weak binding there) 617 */ 618 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 || 619 t->t_bound_cpu == cp) { 620 /* 621 * Don't be tempted to store to t_weakbound_cpu only on 622 * the first nested bind request - if we're interrupted 623 * after the increment of t_nomigrate and before the 624 * store to t_weakbound_cpu and the interrupt calls 625 * thread_nomigrate then the assertion in thread_allowmigrate 626 * would fail. 627 */ 628 t->t_nomigrate++; 629 t->t_weakbound_cpu = cp; 630 membar_producer(); 631 thread_unlock(curthread); 632 /* 633 * Now that we have dropped the thread_lock another thread 634 * can set our t_weakbound_cpu, and will try to migrate us 635 * to the strongbound cpu (which will not be prevented by 636 * preemption being disabled since we're about to enable 637 * preemption). We have granted the weakbinding to the current 638 * cpu, so again we are in the position that is is is possible 639 * that our weak and strong bindings differ. Again this 640 * is catered for by dispatcher code which will favour our 641 * weak binding. 642 */ 643 kpreempt_enable(); 644 } else { 645 /* 646 * Move to another cpu before granting the request by 647 * forcing this thread through preemption code. When we 648 * get to set{front,back}dq called from CL_PREEMPT() 649 * cpu_choose() will be used to select a cpu to queue 650 * us on - that will see cpu_inmotion and take 651 * steps to avoid returning us to this cpu. 652 */ 653 cp->cpu_kprunrun = 1; 654 thread_unlock(curthread); 655 kpreempt_enable(); /* will call preempt() */ 656 goto again; 657 } 658 } 659 660 void 661 thread_allowmigrate(void) 662 { 663 kthread_id_t t = curthread; 664 665 ASSERT(t->t_weakbound_cpu == CPU || 666 (t->t_nomigrate < 0 && t->t_preempt > 0) || 667 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD || 668 getpil() >= DISP_LEVEL); 669 670 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) || 671 getpil() >= DISP_LEVEL) 672 return; 673 674 if (t->t_nomigrate < 0) { 675 /* 676 * This thread was granted "weak binding" in the 677 * stronger form of kernel preemption disabling. 678 * Undo a level of nesting for both t_nomigrate 679 * and t_preempt. 680 */ 681 ++t->t_nomigrate; 682 kpreempt_enable(); 683 } else if (--t->t_nomigrate == 0) { 684 /* 685 * Time to drop the weak binding. We need to cater 686 * for the case where we're weakbound to a different 687 * cpu than that to which we're strongbound (a very 688 * temporary arrangement that must only persist until 689 * weak binding drops). We don't acquire thread_lock 690 * here so even as this code executes t_bound_cpu 691 * may be changing. So we disable preemption and 692 * a) in the case that t_bound_cpu changes while we 693 * have preemption disabled kprunrun will be set 694 * asynchronously, and b) if before disabling 695 * preemption we were already on a different cpu to 696 * our t_bound_cpu then we set kprunrun ourselves 697 * to force a trip through the dispatcher when 698 * preemption is enabled. 699 */ 700 kpreempt_disable(); 701 if (t->t_bound_cpu && 702 t->t_weakbound_cpu != t->t_bound_cpu) 703 CPU->cpu_kprunrun = 1; 704 t->t_weakbound_cpu = NULL; 705 membar_producer(); 706 kpreempt_enable(); 707 } 708 } 709 710 /* 711 * weakbinding_stop can be used to temporarily cause weakbindings made 712 * with thread_nomigrate to be satisfied through the stronger action of 713 * kpreempt_disable. weakbinding_start recommences normal weakbinding. 714 */ 715 716 void 717 weakbinding_stop(void) 718 { 719 ASSERT(MUTEX_HELD(&cpu_lock)); 720 weakbindingbarrier = 1; 721 membar_producer(); /* make visible before subsequent thread_lock */ 722 } 723 724 void 725 weakbinding_start(void) 726 { 727 ASSERT(MUTEX_HELD(&cpu_lock)); 728 weakbindingbarrier = 0; 729 } 730 731 void 732 null_xcall(void) 733 { 734 } 735 736 /* 737 * This routine is called to place the CPUs in a safe place so that 738 * one of them can be taken off line or placed on line. What we are 739 * trying to do here is prevent a thread from traversing the list 740 * of active CPUs while we are changing it or from getting placed on 741 * the run queue of a CPU that has just gone off line. We do this by 742 * creating a thread with the highest possible prio for each CPU and 743 * having it call this routine. The advantage of this method is that 744 * we can eliminate all checks for CPU_ACTIVE in the disp routines. 745 * This makes disp faster at the expense of making p_online() slower 746 * which is a good trade off. 747 */ 748 static void 749 cpu_pause(int index) 750 { 751 int s; 752 struct _cpu_pause_info *cpi = &cpu_pause_info; 753 volatile char *safe = &safe_list[index]; 754 long lindex = index; 755 756 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE)); 757 758 while (*safe != PAUSE_DIE) { 759 *safe = PAUSE_READY; 760 membar_enter(); /* make sure stores are flushed */ 761 sema_v(&cpi->cp_sem); /* signal requesting thread */ 762 763 /* 764 * Wait here until all pause threads are running. That 765 * indicates that it's safe to do the spl. Until 766 * cpu_pause_info.cp_go is set, we don't want to spl 767 * because that might block clock interrupts needed 768 * to preempt threads on other CPUs. 769 */ 770 while (cpi->cp_go == 0) 771 ; 772 /* 773 * Even though we are at the highest disp prio, we need 774 * to block out all interrupts below LOCK_LEVEL so that 775 * an intr doesn't come in, wake up a thread, and call 776 * setbackdq/setfrontdq. 777 */ 778 s = splhigh(); 779 /* 780 * if cpu_pause_func() has been set then call it using 781 * index as the argument, currently only used by 782 * cpr_suspend_cpus(). This function is used as the 783 * code to execute on the "paused" cpu's when a machine 784 * comes out of a sleep state and CPU's were powered off. 785 * (could also be used for hotplugging CPU's). 786 */ 787 if (cpu_pause_func != NULL) 788 (*cpu_pause_func)((void *)lindex); 789 790 mach_cpu_pause(safe); 791 792 splx(s); 793 /* 794 * Waiting is at an end. Switch out of cpu_pause 795 * loop and resume useful work. 796 */ 797 swtch(); 798 } 799 800 mutex_enter(&pause_free_mutex); 801 *safe = PAUSE_DEAD; 802 cv_broadcast(&pause_free_cv); 803 mutex_exit(&pause_free_mutex); 804 } 805 806 /* 807 * Allow the cpus to start running again. 808 */ 809 void 810 start_cpus() 811 { 812 int i; 813 814 ASSERT(MUTEX_HELD(&cpu_lock)); 815 ASSERT(cpu_pause_info.cp_paused); 816 cpu_pause_info.cp_paused = NULL; 817 for (i = 0; i < NCPU; i++) 818 safe_list[i] = PAUSE_IDLE; 819 membar_enter(); /* make sure stores are flushed */ 820 affinity_clear(); 821 splx(cpu_pause_info.cp_spl); 822 kpreempt_enable(); 823 } 824 825 /* 826 * Allocate a pause thread for a CPU. 827 */ 828 static void 829 cpu_pause_alloc(cpu_t *cp) 830 { 831 kthread_id_t t; 832 long cpun = cp->cpu_id; 833 834 /* 835 * Note, v.v_nglobpris will not change value as long as I hold 836 * cpu_lock. 837 */ 838 t = thread_create(NULL, 0, cpu_pause, (void *)cpun, 839 0, &p0, TS_STOPPED, v.v_nglobpris - 1); 840 thread_lock(t); 841 t->t_bound_cpu = cp; 842 t->t_disp_queue = cp->cpu_disp; 843 t->t_affinitycnt = 1; 844 t->t_preempt = 1; 845 thread_unlock(t); 846 cp->cpu_pause_thread = t; 847 /* 848 * Registering a thread in the callback table is usually done 849 * in the initialization code of the thread. In this 850 * case, we do it right after thread creation because the 851 * thread itself may never run, and we need to register the 852 * fact that it is safe for cpr suspend. 853 */ 854 CALLB_CPR_INIT_SAFE(t, "cpu_pause"); 855 } 856 857 /* 858 * Free a pause thread for a CPU. 859 */ 860 static void 861 cpu_pause_free(cpu_t *cp) 862 { 863 kthread_id_t t; 864 int cpun = cp->cpu_id; 865 866 ASSERT(MUTEX_HELD(&cpu_lock)); 867 /* 868 * We have to get the thread and tell him to die. 869 */ 870 if ((t = cp->cpu_pause_thread) == NULL) { 871 ASSERT(safe_list[cpun] == PAUSE_IDLE); 872 return; 873 } 874 thread_lock(t); 875 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */ 876 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */ 877 t->t_pri = v.v_nglobpris - 1; 878 ASSERT(safe_list[cpun] == PAUSE_IDLE); 879 safe_list[cpun] = PAUSE_DIE; 880 THREAD_TRANSITION(t); 881 setbackdq(t); 882 thread_unlock_nopreempt(t); 883 884 /* 885 * If we don't wait for the thread to actually die, it may try to 886 * run on the wrong cpu as part of an actual call to pause_cpus(). 887 */ 888 mutex_enter(&pause_free_mutex); 889 while (safe_list[cpun] != PAUSE_DEAD) { 890 cv_wait(&pause_free_cv, &pause_free_mutex); 891 } 892 mutex_exit(&pause_free_mutex); 893 safe_list[cpun] = PAUSE_IDLE; 894 895 cp->cpu_pause_thread = NULL; 896 } 897 898 /* 899 * Initialize basic structures for pausing CPUs. 900 */ 901 void 902 cpu_pause_init() 903 { 904 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL); 905 /* 906 * Create initial CPU pause thread. 907 */ 908 cpu_pause_alloc(CPU); 909 } 910 911 /* 912 * Start the threads used to pause another CPU. 913 */ 914 static int 915 cpu_pause_start(processorid_t cpu_id) 916 { 917 int i; 918 int cpu_count = 0; 919 920 for (i = 0; i < NCPU; i++) { 921 cpu_t *cp; 922 kthread_id_t t; 923 924 cp = cpu[i]; 925 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) { 926 safe_list[i] = PAUSE_WAIT; 927 continue; 928 } 929 930 /* 931 * Skip CPU if it is quiesced or not yet started. 932 */ 933 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) { 934 safe_list[i] = PAUSE_WAIT; 935 continue; 936 } 937 938 /* 939 * Start this CPU's pause thread. 940 */ 941 t = cp->cpu_pause_thread; 942 thread_lock(t); 943 /* 944 * Reset the priority, since nglobpris may have 945 * changed since the thread was created, if someone 946 * has loaded the RT (or some other) scheduling 947 * class. 948 */ 949 t->t_pri = v.v_nglobpris - 1; 950 THREAD_TRANSITION(t); 951 setbackdq(t); 952 thread_unlock_nopreempt(t); 953 ++cpu_count; 954 } 955 return (cpu_count); 956 } 957 958 959 /* 960 * Pause all of the CPUs except the one we are on by creating a high 961 * priority thread bound to those CPUs. 962 * 963 * Note that one must be extremely careful regarding code 964 * executed while CPUs are paused. Since a CPU may be paused 965 * while a thread scheduling on that CPU is holding an adaptive 966 * lock, code executed with CPUs paused must not acquire adaptive 967 * (or low-level spin) locks. Also, such code must not block, 968 * since the thread that is supposed to initiate the wakeup may 969 * never run. 970 * 971 * With a few exceptions, the restrictions on code executed with CPUs 972 * paused match those for code executed at high-level interrupt 973 * context. 974 */ 975 void 976 pause_cpus(cpu_t *off_cp) 977 { 978 processorid_t cpu_id; 979 int i; 980 struct _cpu_pause_info *cpi = &cpu_pause_info; 981 982 ASSERT(MUTEX_HELD(&cpu_lock)); 983 ASSERT(cpi->cp_paused == NULL); 984 cpi->cp_count = 0; 985 cpi->cp_go = 0; 986 for (i = 0; i < NCPU; i++) 987 safe_list[i] = PAUSE_IDLE; 988 kpreempt_disable(); 989 990 /* 991 * If running on the cpu that is going offline, get off it. 992 * This is so that it won't be necessary to rechoose a CPU 993 * when done. 994 */ 995 if (CPU == off_cp) 996 cpu_id = off_cp->cpu_next_part->cpu_id; 997 else 998 cpu_id = CPU->cpu_id; 999 affinity_set(cpu_id); 1000 1001 /* 1002 * Start the pause threads and record how many were started 1003 */ 1004 cpi->cp_count = cpu_pause_start(cpu_id); 1005 1006 /* 1007 * Now wait for all CPUs to be running the pause thread. 1008 */ 1009 while (cpi->cp_count > 0) { 1010 /* 1011 * Spin reading the count without grabbing the disp 1012 * lock to make sure we don't prevent the pause 1013 * threads from getting the lock. 1014 */ 1015 while (sema_held(&cpi->cp_sem)) 1016 ; 1017 if (sema_tryp(&cpi->cp_sem)) 1018 --cpi->cp_count; 1019 } 1020 cpi->cp_go = 1; /* all have reached cpu_pause */ 1021 1022 /* 1023 * Now wait for all CPUs to spl. (Transition from PAUSE_READY 1024 * to PAUSE_WAIT.) 1025 */ 1026 for (i = 0; i < NCPU; i++) { 1027 while (safe_list[i] != PAUSE_WAIT) 1028 ; 1029 } 1030 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */ 1031 cpi->cp_paused = curthread; 1032 } 1033 1034 /* 1035 * Check whether the current thread has CPUs paused 1036 */ 1037 int 1038 cpus_paused(void) 1039 { 1040 if (cpu_pause_info.cp_paused != NULL) { 1041 ASSERT(cpu_pause_info.cp_paused == curthread); 1042 return (1); 1043 } 1044 return (0); 1045 } 1046 1047 static cpu_t * 1048 cpu_get_all(processorid_t cpun) 1049 { 1050 ASSERT(MUTEX_HELD(&cpu_lock)); 1051 1052 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun)) 1053 return (NULL); 1054 return (cpu[cpun]); 1055 } 1056 1057 /* 1058 * Check whether cpun is a valid processor id and whether it should be 1059 * visible from the current zone. If it is, return a pointer to the 1060 * associated CPU structure. 1061 */ 1062 cpu_t * 1063 cpu_get(processorid_t cpun) 1064 { 1065 cpu_t *c; 1066 1067 ASSERT(MUTEX_HELD(&cpu_lock)); 1068 c = cpu_get_all(cpun); 1069 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() && 1070 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c)) 1071 return (NULL); 1072 return (c); 1073 } 1074 1075 /* 1076 * The following functions should be used to check CPU states in the kernel. 1077 * They should be invoked with cpu_lock held. Kernel subsystems interested 1078 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc 1079 * states. Those are for user-land (and system call) use only. 1080 */ 1081 1082 /* 1083 * Determine whether the CPU is online and handling interrupts. 1084 */ 1085 int 1086 cpu_is_online(cpu_t *cpu) 1087 { 1088 ASSERT(MUTEX_HELD(&cpu_lock)); 1089 return (cpu_flagged_online(cpu->cpu_flags)); 1090 } 1091 1092 /* 1093 * Determine whether the CPU is offline (this includes spare and faulted). 1094 */ 1095 int 1096 cpu_is_offline(cpu_t *cpu) 1097 { 1098 ASSERT(MUTEX_HELD(&cpu_lock)); 1099 return (cpu_flagged_offline(cpu->cpu_flags)); 1100 } 1101 1102 /* 1103 * Determine whether the CPU is powered off. 1104 */ 1105 int 1106 cpu_is_poweredoff(cpu_t *cpu) 1107 { 1108 ASSERT(MUTEX_HELD(&cpu_lock)); 1109 return (cpu_flagged_poweredoff(cpu->cpu_flags)); 1110 } 1111 1112 /* 1113 * Determine whether the CPU is handling interrupts. 1114 */ 1115 int 1116 cpu_is_nointr(cpu_t *cpu) 1117 { 1118 ASSERT(MUTEX_HELD(&cpu_lock)); 1119 return (cpu_flagged_nointr(cpu->cpu_flags)); 1120 } 1121 1122 /* 1123 * Determine whether the CPU is active (scheduling threads). 1124 */ 1125 int 1126 cpu_is_active(cpu_t *cpu) 1127 { 1128 ASSERT(MUTEX_HELD(&cpu_lock)); 1129 return (cpu_flagged_active(cpu->cpu_flags)); 1130 } 1131 1132 /* 1133 * Same as above, but these require cpu_flags instead of cpu_t pointers. 1134 */ 1135 int 1136 cpu_flagged_online(cpu_flag_t cpu_flags) 1137 { 1138 return (cpu_flagged_active(cpu_flags) && 1139 (cpu_flags & CPU_ENABLE)); 1140 } 1141 1142 int 1143 cpu_flagged_offline(cpu_flag_t cpu_flags) 1144 { 1145 return (((cpu_flags & CPU_POWEROFF) == 0) && 1146 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)); 1147 } 1148 1149 int 1150 cpu_flagged_poweredoff(cpu_flag_t cpu_flags) 1151 { 1152 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF); 1153 } 1154 1155 int 1156 cpu_flagged_nointr(cpu_flag_t cpu_flags) 1157 { 1158 return (cpu_flagged_active(cpu_flags) && 1159 (cpu_flags & CPU_ENABLE) == 0); 1160 } 1161 1162 int 1163 cpu_flagged_active(cpu_flag_t cpu_flags) 1164 { 1165 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) && 1166 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY)); 1167 } 1168 1169 /* 1170 * Bring the indicated CPU online. 1171 */ 1172 int 1173 cpu_online(cpu_t *cp) 1174 { 1175 int error = 0; 1176 1177 /* 1178 * Handle on-line request. 1179 * This code must put the new CPU on the active list before 1180 * starting it because it will not be paused, and will start 1181 * using the active list immediately. The real start occurs 1182 * when the CPU_QUIESCED flag is turned off. 1183 */ 1184 1185 ASSERT(MUTEX_HELD(&cpu_lock)); 1186 1187 /* 1188 * Put all the cpus into a known safe place. 1189 * No mutexes can be entered while CPUs are paused. 1190 */ 1191 error = mp_cpu_start(cp); /* arch-dep hook */ 1192 if (error == 0) { 1193 pg_cpupart_in(cp, cp->cpu_part); 1194 pause_cpus(NULL); 1195 cpu_add_active_internal(cp); 1196 if (cp->cpu_flags & CPU_FAULTED) { 1197 cp->cpu_flags &= ~CPU_FAULTED; 1198 mp_cpu_faulted_exit(cp); 1199 } 1200 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN | 1201 CPU_SPARE); 1202 start_cpus(); 1203 cpu_stats_kstat_create(cp); 1204 cpu_create_intrstat(cp); 1205 lgrp_kstat_create(cp); 1206 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1207 cpu_intr_enable(cp); /* arch-dep hook */ 1208 cpu_set_state(cp); 1209 cyclic_online(cp); 1210 poke_cpu(cp->cpu_id); 1211 } 1212 1213 return (error); 1214 } 1215 1216 /* 1217 * Take the indicated CPU offline. 1218 */ 1219 int 1220 cpu_offline(cpu_t *cp, int flags) 1221 { 1222 cpupart_t *pp; 1223 int error = 0; 1224 cpu_t *ncp; 1225 int intr_enable; 1226 int cyclic_off = 0; 1227 int loop_count; 1228 int no_quiesce = 0; 1229 int (*bound_func)(struct cpu *, int); 1230 kthread_t *t; 1231 lpl_t *cpu_lpl; 1232 proc_t *p; 1233 int lgrp_diff_lpl; 1234 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0; 1235 1236 ASSERT(MUTEX_HELD(&cpu_lock)); 1237 1238 /* 1239 * If we're going from faulted or spare to offline, just 1240 * clear these flags and update CPU state. 1241 */ 1242 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) { 1243 if (cp->cpu_flags & CPU_FAULTED) { 1244 cp->cpu_flags &= ~CPU_FAULTED; 1245 mp_cpu_faulted_exit(cp); 1246 } 1247 cp->cpu_flags &= ~CPU_SPARE; 1248 cpu_set_state(cp); 1249 return (0); 1250 } 1251 1252 /* 1253 * Handle off-line request. 1254 */ 1255 pp = cp->cpu_part; 1256 /* 1257 * Don't offline last online CPU in partition 1258 */ 1259 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2) 1260 return (EBUSY); 1261 /* 1262 * Unbind all soft-bound threads bound to our CPU and hard bound threads 1263 * if we were asked to. 1264 */ 1265 error = cpu_unbind(cp->cpu_id, unbind_all_threads); 1266 if (error != 0) 1267 return (error); 1268 /* 1269 * We shouldn't be bound to this CPU ourselves. 1270 */ 1271 if (curthread->t_bound_cpu == cp) 1272 return (EBUSY); 1273 1274 /* 1275 * Tell interested parties that this CPU is going offline. 1276 */ 1277 cpu_state_change_notify(cp->cpu_id, CPU_OFF); 1278 1279 /* 1280 * Tell the PG subsystem that the CPU is leaving the partition 1281 */ 1282 pg_cpupart_out(cp, pp); 1283 1284 /* 1285 * Take the CPU out of interrupt participation so we won't find 1286 * bound kernel threads. If the architecture cannot completely 1287 * shut off interrupts on the CPU, don't quiesce it, but don't 1288 * run anything but interrupt thread... this is indicated by 1289 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being 1290 * off. 1291 */ 1292 intr_enable = cp->cpu_flags & CPU_ENABLE; 1293 if (intr_enable) 1294 no_quiesce = cpu_intr_disable(cp); 1295 1296 /* 1297 * Record that we are aiming to offline this cpu. This acts as 1298 * a barrier to further weak binding requests in thread_nomigrate 1299 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to 1300 * lean away from this cpu. Further strong bindings are already 1301 * avoided since we hold cpu_lock. Since threads that are set 1302 * runnable around now and others coming off the target cpu are 1303 * directed away from the target, existing strong and weak bindings 1304 * (especially the latter) to the target cpu stand maximum chance of 1305 * being able to unbind during the short delay loop below (if other 1306 * unbound threads compete they may not see cpu in time to unbind 1307 * even if they would do so immediately. 1308 */ 1309 cpu_inmotion = cp; 1310 membar_enter(); 1311 1312 /* 1313 * Check for kernel threads (strong or weak) bound to that CPU. 1314 * Strongly bound threads may not unbind, and we'll have to return 1315 * EBUSY. Weakly bound threads should always disappear - we've 1316 * stopped more weak binding with cpu_inmotion and existing 1317 * bindings will drain imminently (they may not block). Nonetheless 1318 * we will wait for a fixed period for all bound threads to disappear. 1319 * Inactive interrupt threads are OK (they'll be in TS_FREE 1320 * state). If test finds some bound threads, wait a few ticks 1321 * to give short-lived threads (such as interrupts) chance to 1322 * complete. Note that if no_quiesce is set, i.e. this cpu 1323 * is required to service interrupts, then we take the route 1324 * that permits interrupt threads to be active (or bypassed). 1325 */ 1326 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads; 1327 1328 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) { 1329 if (loop_count >= 5) { 1330 error = EBUSY; /* some threads still bound */ 1331 break; 1332 } 1333 1334 /* 1335 * If some threads were assigned, give them 1336 * a chance to complete or move. 1337 * 1338 * This assumes that the clock_thread is not bound 1339 * to any CPU, because the clock_thread is needed to 1340 * do the delay(hz/100). 1341 * 1342 * Note: we still hold the cpu_lock while waiting for 1343 * the next clock tick. This is OK since it isn't 1344 * needed for anything else except processor_bind(2), 1345 * and system initialization. If we drop the lock, 1346 * we would risk another p_online disabling the last 1347 * processor. 1348 */ 1349 delay(hz/100); 1350 } 1351 1352 if (error == 0 && cyclic_off == 0) { 1353 if (!cyclic_offline(cp)) { 1354 /* 1355 * We must have bound cyclics... 1356 */ 1357 error = EBUSY; 1358 goto out; 1359 } 1360 cyclic_off = 1; 1361 } 1362 1363 /* 1364 * Call mp_cpu_stop() to perform any special operations 1365 * needed for this machine architecture to offline a CPU. 1366 */ 1367 if (error == 0) 1368 error = mp_cpu_stop(cp); /* arch-dep hook */ 1369 1370 /* 1371 * If that all worked, take the CPU offline and decrement 1372 * ncpus_online. 1373 */ 1374 if (error == 0) { 1375 /* 1376 * Put all the cpus into a known safe place. 1377 * No mutexes can be entered while CPUs are paused. 1378 */ 1379 pause_cpus(cp); 1380 /* 1381 * Repeat the operation, if necessary, to make sure that 1382 * all outstanding low-level interrupts run to completion 1383 * before we set the CPU_QUIESCED flag. It's also possible 1384 * that a thread has weak bound to the cpu despite our raising 1385 * cpu_inmotion above since it may have loaded that 1386 * value before the barrier became visible (this would have 1387 * to be the thread that was on the target cpu at the time 1388 * we raised the barrier). 1389 */ 1390 if ((!no_quiesce && cp->cpu_intr_actv != 0) || 1391 (*bound_func)(cp, 1)) { 1392 start_cpus(); 1393 (void) mp_cpu_start(cp); 1394 goto again; 1395 } 1396 ncp = cp->cpu_next_part; 1397 cpu_lpl = cp->cpu_lpl; 1398 ASSERT(cpu_lpl != NULL); 1399 1400 /* 1401 * Remove the CPU from the list of active CPUs. 1402 */ 1403 cpu_remove_active(cp); 1404 1405 /* 1406 * Walk the active process list and look for threads 1407 * whose home lgroup needs to be updated, or 1408 * the last CPU they run on is the one being offlined now. 1409 */ 1410 1411 ASSERT(curthread->t_cpu != cp); 1412 for (p = practive; p != NULL; p = p->p_next) { 1413 1414 t = p->p_tlist; 1415 1416 if (t == NULL) 1417 continue; 1418 1419 lgrp_diff_lpl = 0; 1420 1421 do { 1422 ASSERT(t->t_lpl != NULL); 1423 /* 1424 * Taking last CPU in lpl offline 1425 * Rehome thread if it is in this lpl 1426 * Otherwise, update the count of how many 1427 * threads are in this CPU's lgroup but have 1428 * a different lpl. 1429 */ 1430 1431 if (cpu_lpl->lpl_ncpu == 0) { 1432 if (t->t_lpl == cpu_lpl) 1433 lgrp_move_thread(t, 1434 lgrp_choose(t, 1435 t->t_cpupart), 0); 1436 else if (t->t_lpl->lpl_lgrpid == 1437 cpu_lpl->lpl_lgrpid) 1438 lgrp_diff_lpl++; 1439 } 1440 ASSERT(t->t_lpl->lpl_ncpu > 0); 1441 1442 /* 1443 * Update CPU last ran on if it was this CPU 1444 */ 1445 if (t->t_cpu == cp && t->t_bound_cpu != cp) 1446 t->t_cpu = disp_lowpri_cpu(ncp, 1447 t->t_lpl, t->t_pri, NULL); 1448 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1449 t->t_weakbound_cpu == cp); 1450 1451 t = t->t_forw; 1452 } while (t != p->p_tlist); 1453 1454 /* 1455 * Didn't find any threads in the same lgroup as this 1456 * CPU with a different lpl, so remove the lgroup from 1457 * the process lgroup bitmask. 1458 */ 1459 1460 if (lgrp_diff_lpl == 0) 1461 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid); 1462 } 1463 1464 /* 1465 * Walk thread list looking for threads that need to be 1466 * rehomed, since there are some threads that are not in 1467 * their process's p_tlist. 1468 */ 1469 1470 t = curthread; 1471 do { 1472 ASSERT(t != NULL && t->t_lpl != NULL); 1473 1474 /* 1475 * Rehome threads with same lpl as this CPU when this 1476 * is the last CPU in the lpl. 1477 */ 1478 1479 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl)) 1480 lgrp_move_thread(t, 1481 lgrp_choose(t, t->t_cpupart), 1); 1482 1483 ASSERT(t->t_lpl->lpl_ncpu > 0); 1484 1485 /* 1486 * Update CPU last ran on if it was this CPU 1487 */ 1488 1489 if (t->t_cpu == cp && t->t_bound_cpu != cp) { 1490 t->t_cpu = disp_lowpri_cpu(ncp, 1491 t->t_lpl, t->t_pri, NULL); 1492 } 1493 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1494 t->t_weakbound_cpu == cp); 1495 t = t->t_next; 1496 1497 } while (t != curthread); 1498 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0); 1499 cp->cpu_flags |= CPU_OFFLINE; 1500 disp_cpu_inactive(cp); 1501 if (!no_quiesce) 1502 cp->cpu_flags |= CPU_QUIESCED; 1503 ncpus_online--; 1504 cpu_set_state(cp); 1505 cpu_inmotion = NULL; 1506 start_cpus(); 1507 cpu_stats_kstat_destroy(cp); 1508 cpu_delete_intrstat(cp); 1509 lgrp_kstat_destroy(cp); 1510 } 1511 1512 out: 1513 cpu_inmotion = NULL; 1514 1515 /* 1516 * If we failed, re-enable interrupts. 1517 * Do this even if cpu_intr_disable returned an error, because 1518 * it may have partially disabled interrupts. 1519 */ 1520 if (error && intr_enable) 1521 cpu_intr_enable(cp); 1522 1523 /* 1524 * If we failed, but managed to offline the cyclic subsystem on this 1525 * CPU, bring it back online. 1526 */ 1527 if (error && cyclic_off) 1528 cyclic_online(cp); 1529 1530 /* 1531 * If we failed, tell the PG subsystem that the CPU is back 1532 */ 1533 pg_cpupart_in(cp, pp); 1534 1535 /* 1536 * If we failed, we need to notify everyone that this CPU is back on. 1537 */ 1538 if (error != 0) 1539 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1540 1541 return (error); 1542 } 1543 1544 /* 1545 * Mark the indicated CPU as faulted, taking it offline. 1546 */ 1547 int 1548 cpu_faulted(cpu_t *cp, int flags) 1549 { 1550 int error = 0; 1551 1552 ASSERT(MUTEX_HELD(&cpu_lock)); 1553 ASSERT(!cpu_is_poweredoff(cp)); 1554 1555 if (cpu_is_offline(cp)) { 1556 cp->cpu_flags &= ~CPU_SPARE; 1557 cp->cpu_flags |= CPU_FAULTED; 1558 mp_cpu_faulted_enter(cp); 1559 cpu_set_state(cp); 1560 return (0); 1561 } 1562 1563 if ((error = cpu_offline(cp, flags)) == 0) { 1564 cp->cpu_flags |= CPU_FAULTED; 1565 mp_cpu_faulted_enter(cp); 1566 cpu_set_state(cp); 1567 } 1568 1569 return (error); 1570 } 1571 1572 /* 1573 * Mark the indicated CPU as a spare, taking it offline. 1574 */ 1575 int 1576 cpu_spare(cpu_t *cp, int flags) 1577 { 1578 int error = 0; 1579 1580 ASSERT(MUTEX_HELD(&cpu_lock)); 1581 ASSERT(!cpu_is_poweredoff(cp)); 1582 1583 if (cpu_is_offline(cp)) { 1584 if (cp->cpu_flags & CPU_FAULTED) { 1585 cp->cpu_flags &= ~CPU_FAULTED; 1586 mp_cpu_faulted_exit(cp); 1587 } 1588 cp->cpu_flags |= CPU_SPARE; 1589 cpu_set_state(cp); 1590 return (0); 1591 } 1592 1593 if ((error = cpu_offline(cp, flags)) == 0) { 1594 cp->cpu_flags |= CPU_SPARE; 1595 cpu_set_state(cp); 1596 } 1597 1598 return (error); 1599 } 1600 1601 /* 1602 * Take the indicated CPU from poweroff to offline. 1603 */ 1604 int 1605 cpu_poweron(cpu_t *cp) 1606 { 1607 int error = ENOTSUP; 1608 1609 ASSERT(MUTEX_HELD(&cpu_lock)); 1610 ASSERT(cpu_is_poweredoff(cp)); 1611 1612 error = mp_cpu_poweron(cp); /* arch-dep hook */ 1613 if (error == 0) 1614 cpu_set_state(cp); 1615 1616 return (error); 1617 } 1618 1619 /* 1620 * Take the indicated CPU from any inactive state to powered off. 1621 */ 1622 int 1623 cpu_poweroff(cpu_t *cp) 1624 { 1625 int error = ENOTSUP; 1626 1627 ASSERT(MUTEX_HELD(&cpu_lock)); 1628 ASSERT(cpu_is_offline(cp)); 1629 1630 if (!(cp->cpu_flags & CPU_QUIESCED)) 1631 return (EBUSY); /* not completely idle */ 1632 1633 error = mp_cpu_poweroff(cp); /* arch-dep hook */ 1634 if (error == 0) 1635 cpu_set_state(cp); 1636 1637 return (error); 1638 } 1639 1640 /* 1641 * Initialize the CPU lists for the first CPU. 1642 */ 1643 void 1644 cpu_list_init(cpu_t *cp) 1645 { 1646 cp->cpu_next = cp; 1647 cp->cpu_prev = cp; 1648 cpu_list = cp; 1649 clock_cpu_list = cp; 1650 1651 cp->cpu_next_onln = cp; 1652 cp->cpu_prev_onln = cp; 1653 cpu_active = cp; 1654 1655 cp->cpu_seqid = 0; 1656 CPUSET_ADD(cpu_seqid_inuse, 0); 1657 cp->cpu_cache_offset = KMEM_CACHE_SIZE(cp->cpu_seqid); 1658 cp_default.cp_mach = &cp_default_mach; 1659 cp_default.cp_cpulist = cp; 1660 cp_default.cp_ncpus = 1; 1661 cp->cpu_next_part = cp; 1662 cp->cpu_prev_part = cp; 1663 cp->cpu_part = &cp_default; 1664 1665 CPUSET_ADD(cpu_available, cp->cpu_id); 1666 } 1667 1668 /* 1669 * Insert a CPU into the list of available CPUs. 1670 */ 1671 void 1672 cpu_add_unit(cpu_t *cp) 1673 { 1674 int seqid; 1675 1676 ASSERT(MUTEX_HELD(&cpu_lock)); 1677 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1678 1679 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0); 1680 1681 /* 1682 * Note: most users of the cpu_list will grab the 1683 * cpu_lock to insure that it isn't modified. However, 1684 * certain users can't or won't do that. To allow this 1685 * we pause the other cpus. Users who walk the list 1686 * without cpu_lock, must disable kernel preemption 1687 * to insure that the list isn't modified underneath 1688 * them. Also, any cached pointers to cpu structures 1689 * must be revalidated by checking to see if the 1690 * cpu_next pointer points to itself. This check must 1691 * be done with the cpu_lock held or kernel preemption 1692 * disabled. This check relies upon the fact that 1693 * old cpu structures are not free'ed or cleared after 1694 * then are removed from the cpu_list. 1695 * 1696 * Note that the clock code walks the cpu list dereferencing 1697 * the cpu_part pointer, so we need to initialize it before 1698 * adding the cpu to the list. 1699 */ 1700 cp->cpu_part = &cp_default; 1701 (void) pause_cpus(NULL); 1702 cp->cpu_next = cpu_list; 1703 cp->cpu_prev = cpu_list->cpu_prev; 1704 cpu_list->cpu_prev->cpu_next = cp; 1705 cpu_list->cpu_prev = cp; 1706 start_cpus(); 1707 1708 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++) 1709 continue; 1710 CPUSET_ADD(cpu_seqid_inuse, seqid); 1711 cp->cpu_seqid = seqid; 1712 ASSERT(ncpus < max_ncpus); 1713 ncpus++; 1714 cp->cpu_cache_offset = KMEM_CACHE_SIZE(cp->cpu_seqid); 1715 cpu[cp->cpu_id] = cp; 1716 CPUSET_ADD(cpu_available, cp->cpu_id); 1717 1718 /* 1719 * allocate a pause thread for this CPU. 1720 */ 1721 cpu_pause_alloc(cp); 1722 1723 /* 1724 * So that new CPUs won't have NULL prev_onln and next_onln pointers, 1725 * link them into a list of just that CPU. 1726 * This is so that disp_lowpri_cpu will work for thread_create in 1727 * pause_cpus() when called from the startup thread in a new CPU. 1728 */ 1729 cp->cpu_next_onln = cp; 1730 cp->cpu_prev_onln = cp; 1731 cpu_info_kstat_create(cp); 1732 cp->cpu_next_part = cp; 1733 cp->cpu_prev_part = cp; 1734 1735 init_cpu_mstate(cp, CMS_SYSTEM); 1736 1737 pool_pset_mod = gethrtime(); 1738 } 1739 1740 /* 1741 * Do the opposite of cpu_add_unit(). 1742 */ 1743 void 1744 cpu_del_unit(int cpuid) 1745 { 1746 struct cpu *cp, *cpnext; 1747 1748 ASSERT(MUTEX_HELD(&cpu_lock)); 1749 cp = cpu[cpuid]; 1750 ASSERT(cp != NULL); 1751 1752 ASSERT(cp->cpu_next_onln == cp); 1753 ASSERT(cp->cpu_prev_onln == cp); 1754 ASSERT(cp->cpu_next_part == cp); 1755 ASSERT(cp->cpu_prev_part == cp); 1756 1757 /* 1758 * Tear down the CPU's physical ID cache, and update any 1759 * processor groups 1760 */ 1761 pg_cpu_fini(cp); 1762 pghw_physid_destroy(cp); 1763 1764 /* 1765 * Destroy kstat stuff. 1766 */ 1767 cpu_info_kstat_destroy(cp); 1768 term_cpu_mstate(cp); 1769 /* 1770 * Free up pause thread. 1771 */ 1772 cpu_pause_free(cp); 1773 CPUSET_DEL(cpu_available, cp->cpu_id); 1774 cpu[cp->cpu_id] = NULL; 1775 /* 1776 * The clock thread and mutex_vector_enter cannot hold the 1777 * cpu_lock while traversing the cpu list, therefore we pause 1778 * all other threads by pausing the other cpus. These, and any 1779 * other routines holding cpu pointers while possibly sleeping 1780 * must be sure to call kpreempt_disable before processing the 1781 * list and be sure to check that the cpu has not been deleted 1782 * after any sleeps (check cp->cpu_next != NULL). We guarantee 1783 * to keep the deleted cpu structure around. 1784 * 1785 * Note that this MUST be done AFTER cpu_available 1786 * has been updated so that we don't waste time 1787 * trying to pause the cpu we're trying to delete. 1788 */ 1789 (void) pause_cpus(NULL); 1790 1791 cpnext = cp->cpu_next; 1792 cp->cpu_prev->cpu_next = cp->cpu_next; 1793 cp->cpu_next->cpu_prev = cp->cpu_prev; 1794 if (cp == cpu_list) 1795 cpu_list = cpnext; 1796 1797 /* 1798 * Signals that the cpu has been deleted (see above). 1799 */ 1800 cp->cpu_next = NULL; 1801 cp->cpu_prev = NULL; 1802 1803 start_cpus(); 1804 1805 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid); 1806 ncpus--; 1807 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0); 1808 1809 pool_pset_mod = gethrtime(); 1810 } 1811 1812 /* 1813 * Add a CPU to the list of active CPUs. 1814 * This routine must not get any locks, because other CPUs are paused. 1815 */ 1816 static void 1817 cpu_add_active_internal(cpu_t *cp) 1818 { 1819 cpupart_t *pp = cp->cpu_part; 1820 1821 ASSERT(MUTEX_HELD(&cpu_lock)); 1822 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1823 1824 ncpus_online++; 1825 cpu_set_state(cp); 1826 cp->cpu_next_onln = cpu_active; 1827 cp->cpu_prev_onln = cpu_active->cpu_prev_onln; 1828 cpu_active->cpu_prev_onln->cpu_next_onln = cp; 1829 cpu_active->cpu_prev_onln = cp; 1830 1831 if (pp->cp_cpulist) { 1832 cp->cpu_next_part = pp->cp_cpulist; 1833 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part; 1834 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp; 1835 pp->cp_cpulist->cpu_prev_part = cp; 1836 } else { 1837 ASSERT(pp->cp_ncpus == 0); 1838 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp; 1839 } 1840 pp->cp_ncpus++; 1841 if (pp->cp_ncpus == 1) { 1842 cp_numparts_nonempty++; 1843 ASSERT(cp_numparts_nonempty != 0); 1844 } 1845 1846 pg_cpu_active(cp); 1847 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0); 1848 1849 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg)); 1850 } 1851 1852 /* 1853 * Add a CPU to the list of active CPUs. 1854 * This is called from machine-dependent layers when a new CPU is started. 1855 */ 1856 void 1857 cpu_add_active(cpu_t *cp) 1858 { 1859 pg_cpupart_in(cp, cp->cpu_part); 1860 1861 pause_cpus(NULL); 1862 cpu_add_active_internal(cp); 1863 start_cpus(); 1864 1865 cpu_stats_kstat_create(cp); 1866 cpu_create_intrstat(cp); 1867 lgrp_kstat_create(cp); 1868 cpu_state_change_notify(cp->cpu_id, CPU_INIT); 1869 } 1870 1871 1872 /* 1873 * Remove a CPU from the list of active CPUs. 1874 * This routine must not get any locks, because other CPUs are paused. 1875 */ 1876 /* ARGSUSED */ 1877 static void 1878 cpu_remove_active(cpu_t *cp) 1879 { 1880 cpupart_t *pp = cp->cpu_part; 1881 1882 ASSERT(MUTEX_HELD(&cpu_lock)); 1883 ASSERT(cp->cpu_next_onln != cp); /* not the last one */ 1884 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */ 1885 1886 pg_cpu_inactive(cp); 1887 1888 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0); 1889 1890 if (cp == clock_cpu_list) 1891 clock_cpu_list = cp->cpu_next_onln; 1892 1893 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln; 1894 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln; 1895 if (cpu_active == cp) { 1896 cpu_active = cp->cpu_next_onln; 1897 } 1898 cp->cpu_next_onln = cp; 1899 cp->cpu_prev_onln = cp; 1900 1901 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; 1902 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; 1903 if (pp->cp_cpulist == cp) { 1904 pp->cp_cpulist = cp->cpu_next_part; 1905 ASSERT(pp->cp_cpulist != cp); 1906 } 1907 cp->cpu_next_part = cp; 1908 cp->cpu_prev_part = cp; 1909 pp->cp_ncpus--; 1910 if (pp->cp_ncpus == 0) { 1911 cp_numparts_nonempty--; 1912 ASSERT(cp_numparts_nonempty != 0); 1913 } 1914 } 1915 1916 /* 1917 * Routine used to setup a newly inserted CPU in preparation for starting 1918 * it running code. 1919 */ 1920 int 1921 cpu_configure(int cpuid) 1922 { 1923 int retval = 0; 1924 1925 ASSERT(MUTEX_HELD(&cpu_lock)); 1926 1927 /* 1928 * Some structures are statically allocated based upon 1929 * the maximum number of cpus the system supports. Do not 1930 * try to add anything beyond this limit. 1931 */ 1932 if (cpuid < 0 || cpuid >= NCPU) { 1933 return (EINVAL); 1934 } 1935 1936 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) { 1937 return (EALREADY); 1938 } 1939 1940 if ((retval = mp_cpu_configure(cpuid)) != 0) { 1941 return (retval); 1942 } 1943 1944 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF; 1945 cpu_set_state(cpu[cpuid]); 1946 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG); 1947 if (retval != 0) 1948 (void) mp_cpu_unconfigure(cpuid); 1949 1950 return (retval); 1951 } 1952 1953 /* 1954 * Routine used to cleanup a CPU that has been powered off. This will 1955 * destroy all per-cpu information related to this cpu. 1956 */ 1957 int 1958 cpu_unconfigure(int cpuid) 1959 { 1960 int error; 1961 1962 ASSERT(MUTEX_HELD(&cpu_lock)); 1963 1964 if (cpu[cpuid] == NULL) { 1965 return (ENODEV); 1966 } 1967 1968 if (cpu[cpuid]->cpu_flags == 0) { 1969 return (EALREADY); 1970 } 1971 1972 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) { 1973 return (EBUSY); 1974 } 1975 1976 if (cpu[cpuid]->cpu_props != NULL) { 1977 (void) nvlist_free(cpu[cpuid]->cpu_props); 1978 cpu[cpuid]->cpu_props = NULL; 1979 } 1980 1981 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG); 1982 1983 if (error != 0) 1984 return (error); 1985 1986 return (mp_cpu_unconfigure(cpuid)); 1987 } 1988 1989 /* 1990 * Routines for registering and de-registering cpu_setup callback functions. 1991 * 1992 * Caller's context 1993 * These routines must not be called from a driver's attach(9E) or 1994 * detach(9E) entry point. 1995 * 1996 * NOTE: CPU callbacks should not block. They are called with cpu_lock held. 1997 */ 1998 1999 /* 2000 * Ideally, these would be dynamically allocated and put into a linked 2001 * list; however that is not feasible because the registration routine 2002 * has to be available before the kmem allocator is working (in fact, 2003 * it is called by the kmem allocator init code). In any case, there 2004 * are quite a few extra entries for future users. 2005 */ 2006 #define NCPU_SETUPS 20 2007 2008 struct cpu_setup { 2009 cpu_setup_func_t *func; 2010 void *arg; 2011 } cpu_setups[NCPU_SETUPS]; 2012 2013 void 2014 register_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2015 { 2016 int i; 2017 2018 ASSERT(MUTEX_HELD(&cpu_lock)); 2019 2020 for (i = 0; i < NCPU_SETUPS; i++) 2021 if (cpu_setups[i].func == NULL) 2022 break; 2023 if (i >= NCPU_SETUPS) 2024 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries"); 2025 2026 cpu_setups[i].func = func; 2027 cpu_setups[i].arg = arg; 2028 } 2029 2030 void 2031 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2032 { 2033 int i; 2034 2035 ASSERT(MUTEX_HELD(&cpu_lock)); 2036 2037 for (i = 0; i < NCPU_SETUPS; i++) 2038 if ((cpu_setups[i].func == func) && 2039 (cpu_setups[i].arg == arg)) 2040 break; 2041 if (i >= NCPU_SETUPS) 2042 cmn_err(CE_PANIC, "Could not find cpu_setup callback to " 2043 "deregister"); 2044 2045 cpu_setups[i].func = NULL; 2046 cpu_setups[i].arg = 0; 2047 } 2048 2049 /* 2050 * Call any state change hooks for this CPU, ignore any errors. 2051 */ 2052 void 2053 cpu_state_change_notify(int id, cpu_setup_t what) 2054 { 2055 int i; 2056 2057 ASSERT(MUTEX_HELD(&cpu_lock)); 2058 2059 for (i = 0; i < NCPU_SETUPS; i++) { 2060 if (cpu_setups[i].func != NULL) { 2061 cpu_setups[i].func(what, id, cpu_setups[i].arg); 2062 } 2063 } 2064 } 2065 2066 /* 2067 * Call any state change hooks for this CPU, undo it if error found. 2068 */ 2069 static int 2070 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo) 2071 { 2072 int i; 2073 int retval = 0; 2074 2075 ASSERT(MUTEX_HELD(&cpu_lock)); 2076 2077 for (i = 0; i < NCPU_SETUPS; i++) { 2078 if (cpu_setups[i].func != NULL) { 2079 retval = cpu_setups[i].func(what, id, 2080 cpu_setups[i].arg); 2081 if (retval) { 2082 for (i--; i >= 0; i--) { 2083 if (cpu_setups[i].func != NULL) 2084 cpu_setups[i].func(undo, 2085 id, cpu_setups[i].arg); 2086 } 2087 break; 2088 } 2089 } 2090 } 2091 return (retval); 2092 } 2093 2094 /* 2095 * Export information about this CPU via the kstat mechanism. 2096 */ 2097 static struct { 2098 kstat_named_t ci_state; 2099 kstat_named_t ci_state_begin; 2100 kstat_named_t ci_cpu_type; 2101 kstat_named_t ci_fpu_type; 2102 kstat_named_t ci_clock_MHz; 2103 kstat_named_t ci_chip_id; 2104 kstat_named_t ci_implementation; 2105 kstat_named_t ci_brandstr; 2106 kstat_named_t ci_core_id; 2107 kstat_named_t ci_curr_clock_Hz; 2108 kstat_named_t ci_supp_freq_Hz; 2109 #if defined(__sparcv9) 2110 kstat_named_t ci_device_ID; 2111 kstat_named_t ci_cpu_fru; 2112 #endif 2113 #if defined(__x86) 2114 kstat_named_t ci_vendorstr; 2115 kstat_named_t ci_family; 2116 kstat_named_t ci_model; 2117 kstat_named_t ci_step; 2118 kstat_named_t ci_clogid; 2119 kstat_named_t ci_pkg_core_id; 2120 kstat_named_t ci_ncpuperchip; 2121 kstat_named_t ci_ncoreperchip; 2122 #endif 2123 } cpu_info_template = { 2124 { "state", KSTAT_DATA_CHAR }, 2125 { "state_begin", KSTAT_DATA_LONG }, 2126 { "cpu_type", KSTAT_DATA_CHAR }, 2127 { "fpu_type", KSTAT_DATA_CHAR }, 2128 { "clock_MHz", KSTAT_DATA_LONG }, 2129 { "chip_id", KSTAT_DATA_LONG }, 2130 { "implementation", KSTAT_DATA_STRING }, 2131 { "brand", KSTAT_DATA_STRING }, 2132 { "core_id", KSTAT_DATA_LONG }, 2133 { "current_clock_Hz", KSTAT_DATA_UINT64 }, 2134 { "supported_frequencies_Hz", KSTAT_DATA_STRING }, 2135 #if defined(__sparcv9) 2136 { "device_ID", KSTAT_DATA_UINT64 }, 2137 { "cpu_fru", KSTAT_DATA_STRING }, 2138 #endif 2139 #if defined(__x86) 2140 { "vendor_id", KSTAT_DATA_STRING }, 2141 { "family", KSTAT_DATA_INT32 }, 2142 { "model", KSTAT_DATA_INT32 }, 2143 { "stepping", KSTAT_DATA_INT32 }, 2144 { "clog_id", KSTAT_DATA_INT32 }, 2145 { "pkg_core_id", KSTAT_DATA_LONG }, 2146 { "ncpu_per_chip", KSTAT_DATA_INT32 }, 2147 { "ncore_per_chip", KSTAT_DATA_INT32 }, 2148 #endif 2149 }; 2150 2151 static kmutex_t cpu_info_template_lock; 2152 2153 static int 2154 cpu_info_kstat_update(kstat_t *ksp, int rw) 2155 { 2156 cpu_t *cp = ksp->ks_private; 2157 const char *pi_state; 2158 2159 if (rw == KSTAT_WRITE) 2160 return (EACCES); 2161 2162 switch (cp->cpu_type_info.pi_state) { 2163 case P_ONLINE: 2164 pi_state = PS_ONLINE; 2165 break; 2166 case P_POWEROFF: 2167 pi_state = PS_POWEROFF; 2168 break; 2169 case P_NOINTR: 2170 pi_state = PS_NOINTR; 2171 break; 2172 case P_FAULTED: 2173 pi_state = PS_FAULTED; 2174 break; 2175 case P_SPARE: 2176 pi_state = PS_SPARE; 2177 break; 2178 case P_OFFLINE: 2179 pi_state = PS_OFFLINE; 2180 break; 2181 default: 2182 pi_state = "unknown"; 2183 } 2184 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state); 2185 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin; 2186 (void) strncpy(cpu_info_template.ci_cpu_type.value.c, 2187 cp->cpu_type_info.pi_processor_type, 15); 2188 (void) strncpy(cpu_info_template.ci_fpu_type.value.c, 2189 cp->cpu_type_info.pi_fputypes, 15); 2190 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock; 2191 cpu_info_template.ci_chip_id.value.l = 2192 pg_plat_hw_instance_id(cp, PGHW_CHIP); 2193 kstat_named_setstr(&cpu_info_template.ci_implementation, 2194 cp->cpu_idstr); 2195 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr); 2196 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp); 2197 cpu_info_template.ci_curr_clock_Hz.value.ui64 = 2198 cp->cpu_curr_clock; 2199 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz, 2200 cp->cpu_supp_freqs); 2201 #if defined(__sparcv9) 2202 cpu_info_template.ci_device_ID.value.ui64 = 2203 cpunodes[cp->cpu_id].device_id; 2204 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp)); 2205 #endif 2206 #if defined(__x86) 2207 kstat_named_setstr(&cpu_info_template.ci_vendorstr, 2208 cpuid_getvendorstr(cp)); 2209 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp); 2210 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp); 2211 cpu_info_template.ci_step.value.l = cpuid_getstep(cp); 2212 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp); 2213 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp); 2214 cpu_info_template.ci_ncoreperchip.value.l = 2215 cpuid_get_ncore_per_chip(cp); 2216 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp); 2217 #endif 2218 2219 return (0); 2220 } 2221 2222 static void 2223 cpu_info_kstat_create(cpu_t *cp) 2224 { 2225 zoneid_t zoneid; 2226 2227 ASSERT(MUTEX_HELD(&cpu_lock)); 2228 2229 if (pool_pset_enabled()) 2230 zoneid = GLOBAL_ZONEID; 2231 else 2232 zoneid = ALL_ZONES; 2233 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id, 2234 NULL, "misc", KSTAT_TYPE_NAMED, 2235 sizeof (cpu_info_template) / sizeof (kstat_named_t), 2236 KSTAT_FLAG_VIRTUAL, zoneid)) != NULL) { 2237 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN; 2238 #if defined(__sparcv9) 2239 cp->cpu_info_kstat->ks_data_size += 2240 strlen(cpu_fru_fmri(cp)) + 1; 2241 #endif 2242 #if defined(__x86) 2243 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN; 2244 #endif 2245 if (cp->cpu_supp_freqs != NULL) 2246 cp->cpu_info_kstat->ks_data_size += 2247 strlen(cp->cpu_supp_freqs) + 1; 2248 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock; 2249 cp->cpu_info_kstat->ks_data = &cpu_info_template; 2250 cp->cpu_info_kstat->ks_private = cp; 2251 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update; 2252 kstat_install(cp->cpu_info_kstat); 2253 } 2254 } 2255 2256 static void 2257 cpu_info_kstat_destroy(cpu_t *cp) 2258 { 2259 ASSERT(MUTEX_HELD(&cpu_lock)); 2260 2261 kstat_delete(cp->cpu_info_kstat); 2262 cp->cpu_info_kstat = NULL; 2263 } 2264 2265 /* 2266 * Create and install kstats for the boot CPU. 2267 */ 2268 void 2269 cpu_kstat_init(cpu_t *cp) 2270 { 2271 mutex_enter(&cpu_lock); 2272 cpu_info_kstat_create(cp); 2273 cpu_stats_kstat_create(cp); 2274 cpu_create_intrstat(cp); 2275 cpu_set_state(cp); 2276 mutex_exit(&cpu_lock); 2277 } 2278 2279 /* 2280 * Make visible to the zone that subset of the cpu information that would be 2281 * initialized when a cpu is configured (but still offline). 2282 */ 2283 void 2284 cpu_visibility_configure(cpu_t *cp, zone_t *zone) 2285 { 2286 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2287 2288 ASSERT(MUTEX_HELD(&cpu_lock)); 2289 ASSERT(pool_pset_enabled()); 2290 ASSERT(cp != NULL); 2291 2292 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2293 zone->zone_ncpus++; 2294 ASSERT(zone->zone_ncpus <= ncpus); 2295 } 2296 if (cp->cpu_info_kstat != NULL) 2297 kstat_zone_add(cp->cpu_info_kstat, zoneid); 2298 } 2299 2300 /* 2301 * Make visible to the zone that subset of the cpu information that would be 2302 * initialized when a previously configured cpu is onlined. 2303 */ 2304 void 2305 cpu_visibility_online(cpu_t *cp, zone_t *zone) 2306 { 2307 kstat_t *ksp; 2308 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2309 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2310 processorid_t cpun; 2311 2312 ASSERT(MUTEX_HELD(&cpu_lock)); 2313 ASSERT(pool_pset_enabled()); 2314 ASSERT(cp != NULL); 2315 ASSERT(cpu_is_active(cp)); 2316 2317 cpun = cp->cpu_id; 2318 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2319 zone->zone_ncpus_online++; 2320 ASSERT(zone->zone_ncpus_online <= ncpus_online); 2321 } 2322 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2323 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2324 != NULL) { 2325 kstat_zone_add(ksp, zoneid); 2326 kstat_rele(ksp); 2327 } 2328 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2329 kstat_zone_add(ksp, zoneid); 2330 kstat_rele(ksp); 2331 } 2332 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2333 kstat_zone_add(ksp, zoneid); 2334 kstat_rele(ksp); 2335 } 2336 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2337 NULL) { 2338 kstat_zone_add(ksp, zoneid); 2339 kstat_rele(ksp); 2340 } 2341 } 2342 2343 /* 2344 * Update relevant kstats such that cpu is now visible to processes 2345 * executing in specified zone. 2346 */ 2347 void 2348 cpu_visibility_add(cpu_t *cp, zone_t *zone) 2349 { 2350 cpu_visibility_configure(cp, zone); 2351 if (cpu_is_active(cp)) 2352 cpu_visibility_online(cp, zone); 2353 } 2354 2355 /* 2356 * Make invisible to the zone that subset of the cpu information that would be 2357 * torn down when a previously offlined cpu is unconfigured. 2358 */ 2359 void 2360 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone) 2361 { 2362 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2363 2364 ASSERT(MUTEX_HELD(&cpu_lock)); 2365 ASSERT(pool_pset_enabled()); 2366 ASSERT(cp != NULL); 2367 2368 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2369 ASSERT(zone->zone_ncpus != 0); 2370 zone->zone_ncpus--; 2371 } 2372 if (cp->cpu_info_kstat) 2373 kstat_zone_remove(cp->cpu_info_kstat, zoneid); 2374 } 2375 2376 /* 2377 * Make invisible to the zone that subset of the cpu information that would be 2378 * torn down when a cpu is offlined (but still configured). 2379 */ 2380 void 2381 cpu_visibility_offline(cpu_t *cp, zone_t *zone) 2382 { 2383 kstat_t *ksp; 2384 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2385 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2386 processorid_t cpun; 2387 2388 ASSERT(MUTEX_HELD(&cpu_lock)); 2389 ASSERT(pool_pset_enabled()); 2390 ASSERT(cp != NULL); 2391 ASSERT(cpu_is_active(cp)); 2392 2393 cpun = cp->cpu_id; 2394 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2395 ASSERT(zone->zone_ncpus_online != 0); 2396 zone->zone_ncpus_online--; 2397 } 2398 2399 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2400 NULL) { 2401 kstat_zone_remove(ksp, zoneid); 2402 kstat_rele(ksp); 2403 } 2404 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2405 kstat_zone_remove(ksp, zoneid); 2406 kstat_rele(ksp); 2407 } 2408 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2409 kstat_zone_remove(ksp, zoneid); 2410 kstat_rele(ksp); 2411 } 2412 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2413 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2414 != NULL) { 2415 kstat_zone_remove(ksp, zoneid); 2416 kstat_rele(ksp); 2417 } 2418 } 2419 2420 /* 2421 * Update relevant kstats such that cpu is no longer visible to processes 2422 * executing in specified zone. 2423 */ 2424 void 2425 cpu_visibility_remove(cpu_t *cp, zone_t *zone) 2426 { 2427 if (cpu_is_active(cp)) 2428 cpu_visibility_offline(cp, zone); 2429 cpu_visibility_unconfigure(cp, zone); 2430 } 2431 2432 /* 2433 * Bind a thread to a CPU as requested. 2434 */ 2435 int 2436 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, 2437 int *error) 2438 { 2439 processorid_t binding; 2440 cpu_t *cp = NULL; 2441 2442 ASSERT(MUTEX_HELD(&cpu_lock)); 2443 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 2444 2445 thread_lock(tp); 2446 2447 /* 2448 * Record old binding, but change the obind, which was initialized 2449 * to PBIND_NONE, only if this thread has a binding. This avoids 2450 * reporting PBIND_NONE for a process when some LWPs are bound. 2451 */ 2452 binding = tp->t_bind_cpu; 2453 2454 switch (bind) { 2455 case PBIND_QUERY: 2456 /* Just return the old binding */ 2457 *obind = binding; 2458 thread_unlock(tp); 2459 return (0); 2460 2461 case PBIND_QUERY_TYPE: 2462 /* Return the binding type */ 2463 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD; 2464 thread_unlock(tp); 2465 return (0); 2466 2467 case PBIND_SOFT: 2468 /* 2469 * Set soft binding for this thread and return the actual 2470 * binding 2471 */ 2472 TB_CPU_SOFT_SET(tp); 2473 *obind = binding; 2474 thread_unlock(tp); 2475 return (0); 2476 2477 case PBIND_HARD: 2478 /* 2479 * Set hard binding for this thread and return the actual 2480 * binding 2481 */ 2482 TB_CPU_HARD_SET(tp); 2483 *obind = binding; 2484 thread_unlock(tp); 2485 return (0); 2486 2487 case PBIND_NONE: 2488 break; 2489 2490 default: 2491 /* record old binding */ 2492 *obind = binding; 2493 break; 2494 } 2495 2496 /* 2497 * If this thread/LWP cannot be bound because of permission 2498 * problems, just note that and return success so that the 2499 * other threads/LWPs will be bound. This is the way 2500 * processor_bind() is defined to work. 2501 * 2502 * Binding will get EPERM if the thread is of system class 2503 * or hasprocperm() fails. 2504 */ 2505 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) { 2506 *error = EPERM; 2507 thread_unlock(tp); 2508 return (0); 2509 } 2510 2511 binding = bind; 2512 if (binding != PBIND_NONE) { 2513 cp = cpu_get((processorid_t)binding); 2514 /* 2515 * Make sure binding is valid and is in right partition. 2516 */ 2517 if (cp == NULL || tp->t_cpupart != cp->cpu_part) { 2518 *error = EINVAL; 2519 thread_unlock(tp); 2520 return (0); 2521 } 2522 } 2523 tp->t_bind_cpu = binding; /* set new binding */ 2524 2525 /* 2526 * If there is no system-set reason for affinity, set 2527 * the t_bound_cpu field to reflect the binding. 2528 */ 2529 if (tp->t_affinitycnt == 0) { 2530 if (binding == PBIND_NONE) { 2531 /* 2532 * We may need to adjust disp_max_unbound_pri 2533 * since we're becoming unbound. 2534 */ 2535 disp_adjust_unbound_pri(tp); 2536 2537 tp->t_bound_cpu = NULL; /* set new binding */ 2538 2539 /* 2540 * Move thread to lgroup with strongest affinity 2541 * after unbinding 2542 */ 2543 if (tp->t_lgrp_affinity) 2544 lgrp_move_thread(tp, 2545 lgrp_choose(tp, tp->t_cpupart), 1); 2546 2547 if (tp->t_state == TS_ONPROC && 2548 tp->t_cpu->cpu_part != tp->t_cpupart) 2549 cpu_surrender(tp); 2550 } else { 2551 lpl_t *lpl; 2552 2553 tp->t_bound_cpu = cp; 2554 ASSERT(cp->cpu_lpl != NULL); 2555 2556 /* 2557 * Set home to lgroup with most affinity containing CPU 2558 * that thread is being bound or minimum bounding 2559 * lgroup if no affinities set 2560 */ 2561 if (tp->t_lgrp_affinity) 2562 lpl = lgrp_affinity_best(tp, tp->t_cpupart, 2563 LGRP_NONE, B_FALSE); 2564 else 2565 lpl = cp->cpu_lpl; 2566 2567 if (tp->t_lpl != lpl) { 2568 /* can't grab cpu_lock */ 2569 lgrp_move_thread(tp, lpl, 1); 2570 } 2571 2572 /* 2573 * Make the thread switch to the bound CPU. 2574 * If the thread is runnable, we need to 2575 * requeue it even if t_cpu is already set 2576 * to the right CPU, since it may be on a 2577 * kpreempt queue and need to move to a local 2578 * queue. We could check t_disp_queue to 2579 * avoid unnecessary overhead if it's already 2580 * on the right queue, but since this isn't 2581 * a performance-critical operation it doesn't 2582 * seem worth the extra code and complexity. 2583 * 2584 * If the thread is weakbound to the cpu then it will 2585 * resist the new binding request until the weak 2586 * binding drops. The cpu_surrender or requeueing 2587 * below could be skipped in such cases (since it 2588 * will have no effect), but that would require 2589 * thread_allowmigrate to acquire thread_lock so 2590 * we'll take the very occasional hit here instead. 2591 */ 2592 if (tp->t_state == TS_ONPROC) { 2593 cpu_surrender(tp); 2594 } else if (tp->t_state == TS_RUN) { 2595 cpu_t *ocp = tp->t_cpu; 2596 2597 (void) dispdeq(tp); 2598 setbackdq(tp); 2599 /* 2600 * Either on the bound CPU's disp queue now, 2601 * or swapped out or on the swap queue. 2602 */ 2603 ASSERT(tp->t_disp_queue == cp->cpu_disp || 2604 tp->t_weakbound_cpu == ocp || 2605 (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) 2606 != TS_LOAD); 2607 } 2608 } 2609 } 2610 2611 /* 2612 * Our binding has changed; set TP_CHANGEBIND. 2613 */ 2614 tp->t_proc_flag |= TP_CHANGEBIND; 2615 aston(tp); 2616 2617 thread_unlock(tp); 2618 2619 return (0); 2620 } 2621 2622 #if CPUSET_WORDS > 1 2623 2624 /* 2625 * Functions for implementing cpuset operations when a cpuset is more 2626 * than one word. On platforms where a cpuset is a single word these 2627 * are implemented as macros in cpuvar.h. 2628 */ 2629 2630 void 2631 cpuset_all(cpuset_t *s) 2632 { 2633 int i; 2634 2635 for (i = 0; i < CPUSET_WORDS; i++) 2636 s->cpub[i] = ~0UL; 2637 } 2638 2639 void 2640 cpuset_all_but(cpuset_t *s, uint_t cpu) 2641 { 2642 cpuset_all(s); 2643 CPUSET_DEL(*s, cpu); 2644 } 2645 2646 void 2647 cpuset_only(cpuset_t *s, uint_t cpu) 2648 { 2649 CPUSET_ZERO(*s); 2650 CPUSET_ADD(*s, cpu); 2651 } 2652 2653 int 2654 cpuset_isnull(cpuset_t *s) 2655 { 2656 int i; 2657 2658 for (i = 0; i < CPUSET_WORDS; i++) 2659 if (s->cpub[i] != 0) 2660 return (0); 2661 return (1); 2662 } 2663 2664 int 2665 cpuset_cmp(cpuset_t *s1, cpuset_t *s2) 2666 { 2667 int i; 2668 2669 for (i = 0; i < CPUSET_WORDS; i++) 2670 if (s1->cpub[i] != s2->cpub[i]) 2671 return (0); 2672 return (1); 2673 } 2674 2675 uint_t 2676 cpuset_find(cpuset_t *s) 2677 { 2678 2679 uint_t i; 2680 uint_t cpu = (uint_t)-1; 2681 2682 /* 2683 * Find a cpu in the cpuset 2684 */ 2685 for (i = 0; i < CPUSET_WORDS; i++) { 2686 cpu = (uint_t)(lowbit(s->cpub[i]) - 1); 2687 if (cpu != (uint_t)-1) { 2688 cpu += i * BT_NBIPUL; 2689 break; 2690 } 2691 } 2692 return (cpu); 2693 } 2694 2695 void 2696 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid) 2697 { 2698 int i, j; 2699 uint_t bit; 2700 2701 /* 2702 * First, find the smallest cpu id in the set. 2703 */ 2704 for (i = 0; i < CPUSET_WORDS; i++) { 2705 if (s->cpub[i] != 0) { 2706 bit = (uint_t)(lowbit(s->cpub[i]) - 1); 2707 ASSERT(bit != (uint_t)-1); 2708 *smallestid = bit + (i * BT_NBIPUL); 2709 2710 /* 2711 * Now find the largest cpu id in 2712 * the set and return immediately. 2713 * Done in an inner loop to avoid 2714 * having to break out of the first 2715 * loop. 2716 */ 2717 for (j = CPUSET_WORDS - 1; j >= i; j--) { 2718 if (s->cpub[j] != 0) { 2719 bit = (uint_t)(highbit(s->cpub[j]) - 1); 2720 ASSERT(bit != (uint_t)-1); 2721 *largestid = bit + (j * BT_NBIPUL); 2722 ASSERT(*largestid >= *smallestid); 2723 return; 2724 } 2725 } 2726 2727 /* 2728 * If this code is reached, a 2729 * smallestid was found, but not a 2730 * largestid. The cpuset must have 2731 * been changed during the course 2732 * of this function call. 2733 */ 2734 ASSERT(0); 2735 } 2736 } 2737 *smallestid = *largestid = CPUSET_NOTINSET; 2738 } 2739 2740 #endif /* CPUSET_WORDS */ 2741 2742 /* 2743 * Unbind threads bound to specified CPU. 2744 * 2745 * If `unbind_all_threads' is true, unbind all user threads bound to a given 2746 * CPU. Otherwise unbind all soft-bound user threads. 2747 */ 2748 int 2749 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads) 2750 { 2751 processorid_t obind; 2752 kthread_t *tp; 2753 int ret = 0; 2754 proc_t *pp; 2755 int err, berr = 0; 2756 2757 ASSERT(MUTEX_HELD(&cpu_lock)); 2758 2759 mutex_enter(&pidlock); 2760 for (pp = practive; pp != NULL; pp = pp->p_next) { 2761 mutex_enter(&pp->p_lock); 2762 tp = pp->p_tlist; 2763 /* 2764 * Skip zombies, kernel processes, and processes in 2765 * other zones, if called from a non-global zone. 2766 */ 2767 if (tp == NULL || (pp->p_flag & SSYS) || 2768 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 2769 mutex_exit(&pp->p_lock); 2770 continue; 2771 } 2772 do { 2773 if (tp->t_bind_cpu != cpu) 2774 continue; 2775 /* 2776 * Skip threads with hard binding when 2777 * `unbind_all_threads' is not specified. 2778 */ 2779 if (!unbind_all_threads && TB_CPU_IS_HARD(tp)) 2780 continue; 2781 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr); 2782 if (ret == 0) 2783 ret = err; 2784 } while ((tp = tp->t_forw) != pp->p_tlist); 2785 mutex_exit(&pp->p_lock); 2786 } 2787 mutex_exit(&pidlock); 2788 if (ret == 0) 2789 ret = berr; 2790 return (ret); 2791 } 2792 2793 2794 /* 2795 * Destroy all remaining bound threads on a cpu. 2796 */ 2797 void 2798 cpu_destroy_bound_threads(cpu_t *cp) 2799 { 2800 extern id_t syscid; 2801 register kthread_id_t t, tlist, tnext; 2802 2803 /* 2804 * Destroy all remaining bound threads on the cpu. This 2805 * should include both the interrupt threads and the idle thread. 2806 * This requires some care, since we need to traverse the 2807 * thread list with the pidlock mutex locked, but thread_free 2808 * also locks the pidlock mutex. So, we collect the threads 2809 * we're going to reap in a list headed by "tlist", then we 2810 * unlock the pidlock mutex and traverse the tlist list, 2811 * doing thread_free's on the thread's. Simple, n'est pas? 2812 * Also, this depends on thread_free not mucking with the 2813 * t_next and t_prev links of the thread. 2814 */ 2815 2816 if ((t = curthread) != NULL) { 2817 2818 tlist = NULL; 2819 mutex_enter(&pidlock); 2820 do { 2821 tnext = t->t_next; 2822 if (t->t_bound_cpu == cp) { 2823 2824 /* 2825 * We've found a bound thread, carefully unlink 2826 * it out of the thread list, and add it to 2827 * our "tlist". We "know" we don't have to 2828 * worry about unlinking curthread (the thread 2829 * that is executing this code). 2830 */ 2831 t->t_next->t_prev = t->t_prev; 2832 t->t_prev->t_next = t->t_next; 2833 t->t_next = tlist; 2834 tlist = t; 2835 ASSERT(t->t_cid == syscid); 2836 /* wake up anyone blocked in thread_join */ 2837 cv_broadcast(&t->t_joincv); 2838 /* 2839 * t_lwp set by interrupt threads and not 2840 * cleared. 2841 */ 2842 t->t_lwp = NULL; 2843 /* 2844 * Pause and idle threads always have 2845 * t_state set to TS_ONPROC. 2846 */ 2847 t->t_state = TS_FREE; 2848 t->t_prev = NULL; /* Just in case */ 2849 } 2850 2851 } while ((t = tnext) != curthread); 2852 2853 mutex_exit(&pidlock); 2854 2855 mutex_sync(); 2856 for (t = tlist; t != NULL; t = tnext) { 2857 tnext = t->t_next; 2858 thread_free(t); 2859 } 2860 } 2861 } 2862 2863 /* 2864 * Update the cpu_supp_freqs of this cpu. This information is returned 2865 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then 2866 * maintain the kstat data size. 2867 */ 2868 void 2869 cpu_set_supp_freqs(cpu_t *cp, const char *freqs) 2870 { 2871 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */ 2872 const char *lfreqs = clkstr; 2873 boolean_t kstat_exists = B_FALSE; 2874 kstat_t *ksp; 2875 size_t len; 2876 2877 /* 2878 * A NULL pointer means we only support one speed. 2879 */ 2880 if (freqs == NULL) 2881 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64, 2882 cp->cpu_curr_clock); 2883 else 2884 lfreqs = freqs; 2885 2886 /* 2887 * Make sure the frequency doesn't change while a snapshot is 2888 * going on. Of course, we only need to worry about this if 2889 * the kstat exists. 2890 */ 2891 if ((ksp = cp->cpu_info_kstat) != NULL) { 2892 mutex_enter(ksp->ks_lock); 2893 kstat_exists = B_TRUE; 2894 } 2895 2896 /* 2897 * Free any previously allocated string and if the kstat 2898 * already exists, then update its data size. 2899 */ 2900 if (cp->cpu_supp_freqs != NULL) { 2901 len = strlen(cp->cpu_supp_freqs) + 1; 2902 kmem_free(cp->cpu_supp_freqs, len); 2903 if (kstat_exists) 2904 ksp->ks_data_size -= len; 2905 } 2906 2907 /* 2908 * Allocate the new string and set the pointer. 2909 */ 2910 len = strlen(lfreqs) + 1; 2911 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP); 2912 (void) strcpy(cp->cpu_supp_freqs, lfreqs); 2913 2914 /* 2915 * If the kstat already exists then update the data size and 2916 * free the lock. 2917 */ 2918 if (kstat_exists) { 2919 ksp->ks_data_size += len; 2920 mutex_exit(ksp->ks_lock); 2921 } 2922 } 2923 2924 /* 2925 * processor_info(2) and p_online(2) status support functions 2926 * The constants returned by the cpu_get_state() and cpu_get_state_str() are 2927 * for use in communicating processor state information to userland. Kernel 2928 * subsystems should only be using the cpu_flags value directly. Subsystems 2929 * modifying cpu_flags should record the state change via a call to the 2930 * cpu_set_state(). 2931 */ 2932 2933 /* 2934 * Update the pi_state of this CPU. This function provides the CPU status for 2935 * the information returned by processor_info(2). 2936 */ 2937 void 2938 cpu_set_state(cpu_t *cpu) 2939 { 2940 ASSERT(MUTEX_HELD(&cpu_lock)); 2941 cpu->cpu_type_info.pi_state = cpu_get_state(cpu); 2942 cpu->cpu_state_begin = gethrestime_sec(); 2943 pool_cpu_mod = gethrtime(); 2944 } 2945 2946 /* 2947 * Return offline/online/other status for the indicated CPU. Use only for 2948 * communication with user applications; cpu_flags provides the in-kernel 2949 * interface. 2950 */ 2951 int 2952 cpu_get_state(cpu_t *cpu) 2953 { 2954 ASSERT(MUTEX_HELD(&cpu_lock)); 2955 if (cpu->cpu_flags & CPU_POWEROFF) 2956 return (P_POWEROFF); 2957 else if (cpu->cpu_flags & CPU_FAULTED) 2958 return (P_FAULTED); 2959 else if (cpu->cpu_flags & CPU_SPARE) 2960 return (P_SPARE); 2961 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY) 2962 return (P_OFFLINE); 2963 else if (cpu->cpu_flags & CPU_ENABLE) 2964 return (P_ONLINE); 2965 else 2966 return (P_NOINTR); 2967 } 2968 2969 /* 2970 * Return processor_info(2) state as a string. 2971 */ 2972 const char * 2973 cpu_get_state_str(cpu_t *cpu) 2974 { 2975 const char *string; 2976 2977 switch (cpu_get_state(cpu)) { 2978 case P_ONLINE: 2979 string = PS_ONLINE; 2980 break; 2981 case P_POWEROFF: 2982 string = PS_POWEROFF; 2983 break; 2984 case P_NOINTR: 2985 string = PS_NOINTR; 2986 break; 2987 case P_SPARE: 2988 string = PS_SPARE; 2989 break; 2990 case P_FAULTED: 2991 string = PS_FAULTED; 2992 break; 2993 case P_OFFLINE: 2994 string = PS_OFFLINE; 2995 break; 2996 default: 2997 string = "unknown"; 2998 break; 2999 } 3000 return (string); 3001 } 3002 3003 /* 3004 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named 3005 * kstats, respectively. This is done when a CPU is initialized or placed 3006 * online via p_online(2). 3007 */ 3008 static void 3009 cpu_stats_kstat_create(cpu_t *cp) 3010 { 3011 int instance = cp->cpu_id; 3012 char *module = "cpu"; 3013 char *class = "misc"; 3014 kstat_t *ksp; 3015 zoneid_t zoneid; 3016 3017 ASSERT(MUTEX_HELD(&cpu_lock)); 3018 3019 if (pool_pset_enabled()) 3020 zoneid = GLOBAL_ZONEID; 3021 else 3022 zoneid = ALL_ZONES; 3023 /* 3024 * Create named kstats 3025 */ 3026 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \ 3027 ksp = kstat_create_zone(module, instance, (name), class, \ 3028 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \ 3029 zoneid); \ 3030 if (ksp != NULL) { \ 3031 ksp->ks_private = cp; \ 3032 ksp->ks_update = (update_func); \ 3033 kstat_install(ksp); \ 3034 } else \ 3035 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \ 3036 module, instance, (name)); 3037 3038 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template), 3039 cpu_sys_stats_ks_update); 3040 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template), 3041 cpu_vm_stats_ks_update); 3042 3043 /* 3044 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat. 3045 */ 3046 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL, 3047 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid); 3048 if (ksp != NULL) { 3049 ksp->ks_update = cpu_stat_ks_update; 3050 ksp->ks_private = cp; 3051 kstat_install(ksp); 3052 } 3053 } 3054 3055 static void 3056 cpu_stats_kstat_destroy(cpu_t *cp) 3057 { 3058 char ks_name[KSTAT_STRLEN]; 3059 3060 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id); 3061 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name); 3062 3063 kstat_delete_byname("cpu", cp->cpu_id, "sys"); 3064 kstat_delete_byname("cpu", cp->cpu_id, "vm"); 3065 } 3066 3067 static int 3068 cpu_sys_stats_ks_update(kstat_t *ksp, int rw) 3069 { 3070 cpu_t *cp = (cpu_t *)ksp->ks_private; 3071 struct cpu_sys_stats_ks_data *csskd; 3072 cpu_sys_stats_t *css; 3073 hrtime_t msnsecs[NCMSTATES]; 3074 int i; 3075 3076 if (rw == KSTAT_WRITE) 3077 return (EACCES); 3078 3079 csskd = ksp->ks_data; 3080 css = &cp->cpu_stats.sys; 3081 3082 /* 3083 * Read CPU mstate, but compare with the last values we 3084 * received to make sure that the returned kstats never 3085 * decrease. 3086 */ 3087 3088 get_cpu_mstate(cp, msnsecs); 3089 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE]) 3090 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64; 3091 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER]) 3092 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64; 3093 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM]) 3094 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64; 3095 3096 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data, 3097 sizeof (cpu_sys_stats_ks_data_template)); 3098 3099 csskd->cpu_ticks_wait.value.ui64 = 0; 3100 csskd->wait_ticks_io.value.ui64 = 0; 3101 3102 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE]; 3103 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER]; 3104 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM]; 3105 csskd->cpu_ticks_idle.value.ui64 = 3106 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64); 3107 csskd->cpu_ticks_user.value.ui64 = 3108 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64); 3109 csskd->cpu_ticks_kernel.value.ui64 = 3110 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64); 3111 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast; 3112 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload; 3113 csskd->bread.value.ui64 = css->bread; 3114 csskd->bwrite.value.ui64 = css->bwrite; 3115 csskd->lread.value.ui64 = css->lread; 3116 csskd->lwrite.value.ui64 = css->lwrite; 3117 csskd->phread.value.ui64 = css->phread; 3118 csskd->phwrite.value.ui64 = css->phwrite; 3119 csskd->pswitch.value.ui64 = css->pswitch; 3120 csskd->trap.value.ui64 = css->trap; 3121 csskd->intr.value.ui64 = 0; 3122 for (i = 0; i < PIL_MAX; i++) 3123 csskd->intr.value.ui64 += css->intr[i]; 3124 csskd->syscall.value.ui64 = css->syscall; 3125 csskd->sysread.value.ui64 = css->sysread; 3126 csskd->syswrite.value.ui64 = css->syswrite; 3127 csskd->sysfork.value.ui64 = css->sysfork; 3128 csskd->sysvfork.value.ui64 = css->sysvfork; 3129 csskd->sysexec.value.ui64 = css->sysexec; 3130 csskd->readch.value.ui64 = css->readch; 3131 csskd->writech.value.ui64 = css->writech; 3132 csskd->rcvint.value.ui64 = css->rcvint; 3133 csskd->xmtint.value.ui64 = css->xmtint; 3134 csskd->mdmint.value.ui64 = css->mdmint; 3135 csskd->rawch.value.ui64 = css->rawch; 3136 csskd->canch.value.ui64 = css->canch; 3137 csskd->outch.value.ui64 = css->outch; 3138 csskd->msg.value.ui64 = css->msg; 3139 csskd->sema.value.ui64 = css->sema; 3140 csskd->namei.value.ui64 = css->namei; 3141 csskd->ufsiget.value.ui64 = css->ufsiget; 3142 csskd->ufsdirblk.value.ui64 = css->ufsdirblk; 3143 csskd->ufsipage.value.ui64 = css->ufsipage; 3144 csskd->ufsinopage.value.ui64 = css->ufsinopage; 3145 csskd->procovf.value.ui64 = css->procovf; 3146 csskd->intrthread.value.ui64 = 0; 3147 for (i = 0; i < LOCK_LEVEL - 1; i++) 3148 csskd->intrthread.value.ui64 += css->intr[i]; 3149 csskd->intrblk.value.ui64 = css->intrblk; 3150 csskd->intrunpin.value.ui64 = css->intrunpin; 3151 csskd->idlethread.value.ui64 = css->idlethread; 3152 csskd->inv_swtch.value.ui64 = css->inv_swtch; 3153 csskd->nthreads.value.ui64 = css->nthreads; 3154 csskd->cpumigrate.value.ui64 = css->cpumigrate; 3155 csskd->xcalls.value.ui64 = css->xcalls; 3156 csskd->mutex_adenters.value.ui64 = css->mutex_adenters; 3157 csskd->rw_rdfails.value.ui64 = css->rw_rdfails; 3158 csskd->rw_wrfails.value.ui64 = css->rw_wrfails; 3159 csskd->modload.value.ui64 = css->modload; 3160 csskd->modunload.value.ui64 = css->modunload; 3161 csskd->bawrite.value.ui64 = css->bawrite; 3162 csskd->iowait.value.ui64 = css->iowait; 3163 3164 return (0); 3165 } 3166 3167 static int 3168 cpu_vm_stats_ks_update(kstat_t *ksp, int rw) 3169 { 3170 cpu_t *cp = (cpu_t *)ksp->ks_private; 3171 struct cpu_vm_stats_ks_data *cvskd; 3172 cpu_vm_stats_t *cvs; 3173 3174 if (rw == KSTAT_WRITE) 3175 return (EACCES); 3176 3177 cvs = &cp->cpu_stats.vm; 3178 cvskd = ksp->ks_data; 3179 3180 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data, 3181 sizeof (cpu_vm_stats_ks_data_template)); 3182 cvskd->pgrec.value.ui64 = cvs->pgrec; 3183 cvskd->pgfrec.value.ui64 = cvs->pgfrec; 3184 cvskd->pgin.value.ui64 = cvs->pgin; 3185 cvskd->pgpgin.value.ui64 = cvs->pgpgin; 3186 cvskd->pgout.value.ui64 = cvs->pgout; 3187 cvskd->pgpgout.value.ui64 = cvs->pgpgout; 3188 cvskd->swapin.value.ui64 = cvs->swapin; 3189 cvskd->pgswapin.value.ui64 = cvs->pgswapin; 3190 cvskd->swapout.value.ui64 = cvs->swapout; 3191 cvskd->pgswapout.value.ui64 = cvs->pgswapout; 3192 cvskd->zfod.value.ui64 = cvs->zfod; 3193 cvskd->dfree.value.ui64 = cvs->dfree; 3194 cvskd->scan.value.ui64 = cvs->scan; 3195 cvskd->rev.value.ui64 = cvs->rev; 3196 cvskd->hat_fault.value.ui64 = cvs->hat_fault; 3197 cvskd->as_fault.value.ui64 = cvs->as_fault; 3198 cvskd->maj_fault.value.ui64 = cvs->maj_fault; 3199 cvskd->cow_fault.value.ui64 = cvs->cow_fault; 3200 cvskd->prot_fault.value.ui64 = cvs->prot_fault; 3201 cvskd->softlock.value.ui64 = cvs->softlock; 3202 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt; 3203 cvskd->pgrrun.value.ui64 = cvs->pgrrun; 3204 cvskd->execpgin.value.ui64 = cvs->execpgin; 3205 cvskd->execpgout.value.ui64 = cvs->execpgout; 3206 cvskd->execfree.value.ui64 = cvs->execfree; 3207 cvskd->anonpgin.value.ui64 = cvs->anonpgin; 3208 cvskd->anonpgout.value.ui64 = cvs->anonpgout; 3209 cvskd->anonfree.value.ui64 = cvs->anonfree; 3210 cvskd->fspgin.value.ui64 = cvs->fspgin; 3211 cvskd->fspgout.value.ui64 = cvs->fspgout; 3212 cvskd->fsfree.value.ui64 = cvs->fsfree; 3213 3214 return (0); 3215 } 3216 3217 static int 3218 cpu_stat_ks_update(kstat_t *ksp, int rw) 3219 { 3220 cpu_stat_t *cso; 3221 cpu_t *cp; 3222 int i; 3223 hrtime_t msnsecs[NCMSTATES]; 3224 3225 cso = (cpu_stat_t *)ksp->ks_data; 3226 cp = (cpu_t *)ksp->ks_private; 3227 3228 if (rw == KSTAT_WRITE) 3229 return (EACCES); 3230 3231 /* 3232 * Read CPU mstate, but compare with the last values we 3233 * received to make sure that the returned kstats never 3234 * decrease. 3235 */ 3236 3237 get_cpu_mstate(cp, msnsecs); 3238 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]); 3239 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]); 3240 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]); 3241 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE]) 3242 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE]; 3243 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER]) 3244 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; 3245 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) 3246 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; 3247 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; 3248 cso->cpu_sysinfo.wait[W_IO] = 0; 3249 cso->cpu_sysinfo.wait[W_SWAP] = 0; 3250 cso->cpu_sysinfo.wait[W_PIO] = 0; 3251 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); 3252 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); 3253 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); 3254 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); 3255 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); 3256 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); 3257 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); 3258 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); 3259 cso->cpu_sysinfo.intr = 0; 3260 for (i = 0; i < PIL_MAX; i++) 3261 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); 3262 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall); 3263 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread); 3264 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite); 3265 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork); 3266 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork); 3267 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec); 3268 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch); 3269 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech); 3270 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint); 3271 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint); 3272 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint); 3273 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch); 3274 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch); 3275 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch); 3276 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg); 3277 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema); 3278 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei); 3279 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget); 3280 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk); 3281 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage); 3282 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage); 3283 cso->cpu_sysinfo.inodeovf = 0; 3284 cso->cpu_sysinfo.fileovf = 0; 3285 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf); 3286 cso->cpu_sysinfo.intrthread = 0; 3287 for (i = 0; i < LOCK_LEVEL - 1; i++) 3288 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]); 3289 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk); 3290 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread); 3291 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch); 3292 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads); 3293 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate); 3294 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls); 3295 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters); 3296 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails); 3297 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails); 3298 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload); 3299 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload); 3300 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite); 3301 cso->cpu_sysinfo.rw_enters = 0; 3302 cso->cpu_sysinfo.win_uo_cnt = 0; 3303 cso->cpu_sysinfo.win_uu_cnt = 0; 3304 cso->cpu_sysinfo.win_so_cnt = 0; 3305 cso->cpu_sysinfo.win_su_cnt = 0; 3306 cso->cpu_sysinfo.win_suo_cnt = 0; 3307 3308 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait); 3309 cso->cpu_syswait.swap = 0; 3310 cso->cpu_syswait.physio = 0; 3311 3312 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec); 3313 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec); 3314 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin); 3315 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin); 3316 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout); 3317 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout); 3318 cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin); 3319 cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin); 3320 cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout); 3321 cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout); 3322 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod); 3323 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree); 3324 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan); 3325 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev); 3326 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault); 3327 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault); 3328 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault); 3329 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault); 3330 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault); 3331 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock); 3332 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt); 3333 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun); 3334 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin); 3335 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout); 3336 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree); 3337 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin); 3338 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout); 3339 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree); 3340 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin); 3341 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout); 3342 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree); 3343 3344 return (0); 3345 } 3346