1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Architecture-independent CPU control functions. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/param.h> 31 #include <sys/var.h> 32 #include <sys/thread.h> 33 #include <sys/cpuvar.h> 34 #include <sys/cpu_event.h> 35 #include <sys/kstat.h> 36 #include <sys/uadmin.h> 37 #include <sys/systm.h> 38 #include <sys/errno.h> 39 #include <sys/cmn_err.h> 40 #include <sys/procset.h> 41 #include <sys/processor.h> 42 #include <sys/debug.h> 43 #include <sys/cpupart.h> 44 #include <sys/lgrp.h> 45 #include <sys/pset.h> 46 #include <sys/pghw.h> 47 #include <sys/kmem.h> 48 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */ 49 #include <sys/atomic.h> 50 #include <sys/callb.h> 51 #include <sys/vtrace.h> 52 #include <sys/cyclic.h> 53 #include <sys/bitmap.h> 54 #include <sys/nvpair.h> 55 #include <sys/pool_pset.h> 56 #include <sys/msacct.h> 57 #include <sys/time.h> 58 #include <sys/archsystm.h> 59 #include <sys/sdt.h> 60 #if defined(__x86) || defined(__amd64) 61 #include <sys/x86_archext.h> 62 #endif 63 #include <sys/callo.h> 64 65 extern int mp_cpu_start(cpu_t *); 66 extern int mp_cpu_stop(cpu_t *); 67 extern int mp_cpu_poweron(cpu_t *); 68 extern int mp_cpu_poweroff(cpu_t *); 69 extern int mp_cpu_configure(int); 70 extern int mp_cpu_unconfigure(int); 71 extern void mp_cpu_faulted_enter(cpu_t *); 72 extern void mp_cpu_faulted_exit(cpu_t *); 73 74 extern int cmp_cpu_to_chip(processorid_t cpuid); 75 #ifdef __sparcv9 76 extern char *cpu_fru_fmri(cpu_t *cp); 77 #endif 78 79 static void cpu_add_active_internal(cpu_t *cp); 80 static void cpu_remove_active(cpu_t *cp); 81 static void cpu_info_kstat_create(cpu_t *cp); 82 static void cpu_info_kstat_destroy(cpu_t *cp); 83 static void cpu_stats_kstat_create(cpu_t *cp); 84 static void cpu_stats_kstat_destroy(cpu_t *cp); 85 86 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw); 87 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw); 88 static int cpu_stat_ks_update(kstat_t *ksp, int rw); 89 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t); 90 91 /* 92 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active, 93 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with 94 * respect to related locks is: 95 * 96 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock() 97 * 98 * Warning: Certain sections of code do not use the cpu_lock when 99 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since 100 * all cpus are paused during modifications to this list, a solution 101 * to protect the list is too either disable kernel preemption while 102 * walking the list, *or* recheck the cpu_next pointer at each 103 * iteration in the loop. Note that in no cases can any cached 104 * copies of the cpu pointers be kept as they may become invalid. 105 */ 106 kmutex_t cpu_lock; 107 cpu_t *cpu_list; /* list of all CPUs */ 108 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ 109 cpu_t *cpu_active; /* list of active CPUs */ 110 static cpuset_t cpu_available; /* set of available CPUs */ 111 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ 112 113 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */ 114 115 /* 116 * max_ncpus keeps the max cpus the system can have. Initially 117 * it's NCPU, but since most archs scan the devtree for cpus 118 * fairly early on during boot, the real max can be known before 119 * ncpus is set (useful for early NCPU based allocations). 120 */ 121 int max_ncpus = NCPU; 122 /* 123 * platforms that set max_ncpus to maxiumum number of cpus that can be 124 * dynamically added will set boot_max_ncpus to the number of cpus found 125 * at device tree scan time during boot. 126 */ 127 int boot_max_ncpus = -1; 128 int boot_ncpus = -1; 129 /* 130 * Maximum possible CPU id. This can never be >= NCPU since NCPU is 131 * used to size arrays that are indexed by CPU id. 132 */ 133 processorid_t max_cpuid = NCPU - 1; 134 135 /* 136 * Maximum cpu_seqid was given. This number can only grow and never shrink. It 137 * can be used to optimize NCPU loops to avoid going through CPUs which were 138 * never on-line. 139 */ 140 processorid_t max_cpu_seqid_ever = 0; 141 142 int ncpus = 1; 143 int ncpus_online = 1; 144 145 /* 146 * CPU that we're trying to offline. Protected by cpu_lock. 147 */ 148 cpu_t *cpu_inmotion; 149 150 /* 151 * Can be raised to suppress further weakbinding, which are instead 152 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock, 153 * while individual thread weakbinding synchronization is done under thread 154 * lock. 155 */ 156 int weakbindingbarrier; 157 158 /* 159 * Variables used in pause_cpus(). 160 */ 161 static volatile char safe_list[NCPU]; 162 163 static struct _cpu_pause_info { 164 int cp_spl; /* spl saved in pause_cpus() */ 165 volatile int cp_go; /* Go signal sent after all ready */ 166 int cp_count; /* # of CPUs to pause */ 167 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */ 168 kthread_id_t cp_paused; 169 } cpu_pause_info; 170 171 static kmutex_t pause_free_mutex; 172 static kcondvar_t pause_free_cv; 173 174 void *(*cpu_pause_func)(void *) = NULL; 175 176 177 static struct cpu_sys_stats_ks_data { 178 kstat_named_t cpu_ticks_idle; 179 kstat_named_t cpu_ticks_user; 180 kstat_named_t cpu_ticks_kernel; 181 kstat_named_t cpu_ticks_wait; 182 kstat_named_t cpu_nsec_idle; 183 kstat_named_t cpu_nsec_user; 184 kstat_named_t cpu_nsec_kernel; 185 kstat_named_t cpu_nsec_intr; 186 kstat_named_t cpu_load_intr; 187 kstat_named_t wait_ticks_io; 188 kstat_named_t bread; 189 kstat_named_t bwrite; 190 kstat_named_t lread; 191 kstat_named_t lwrite; 192 kstat_named_t phread; 193 kstat_named_t phwrite; 194 kstat_named_t pswitch; 195 kstat_named_t trap; 196 kstat_named_t intr; 197 kstat_named_t syscall; 198 kstat_named_t sysread; 199 kstat_named_t syswrite; 200 kstat_named_t sysfork; 201 kstat_named_t sysvfork; 202 kstat_named_t sysexec; 203 kstat_named_t readch; 204 kstat_named_t writech; 205 kstat_named_t rcvint; 206 kstat_named_t xmtint; 207 kstat_named_t mdmint; 208 kstat_named_t rawch; 209 kstat_named_t canch; 210 kstat_named_t outch; 211 kstat_named_t msg; 212 kstat_named_t sema; 213 kstat_named_t namei; 214 kstat_named_t ufsiget; 215 kstat_named_t ufsdirblk; 216 kstat_named_t ufsipage; 217 kstat_named_t ufsinopage; 218 kstat_named_t procovf; 219 kstat_named_t intrthread; 220 kstat_named_t intrblk; 221 kstat_named_t intrunpin; 222 kstat_named_t idlethread; 223 kstat_named_t inv_swtch; 224 kstat_named_t nthreads; 225 kstat_named_t cpumigrate; 226 kstat_named_t xcalls; 227 kstat_named_t mutex_adenters; 228 kstat_named_t rw_rdfails; 229 kstat_named_t rw_wrfails; 230 kstat_named_t modload; 231 kstat_named_t modunload; 232 kstat_named_t bawrite; 233 kstat_named_t iowait; 234 } cpu_sys_stats_ks_data_template = { 235 { "cpu_ticks_idle", KSTAT_DATA_UINT64 }, 236 { "cpu_ticks_user", KSTAT_DATA_UINT64 }, 237 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 }, 238 { "cpu_ticks_wait", KSTAT_DATA_UINT64 }, 239 { "cpu_nsec_idle", KSTAT_DATA_UINT64 }, 240 { "cpu_nsec_user", KSTAT_DATA_UINT64 }, 241 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 }, 242 { "cpu_nsec_intr", KSTAT_DATA_UINT64 }, 243 { "cpu_load_intr", KSTAT_DATA_UINT64 }, 244 { "wait_ticks_io", KSTAT_DATA_UINT64 }, 245 { "bread", KSTAT_DATA_UINT64 }, 246 { "bwrite", KSTAT_DATA_UINT64 }, 247 { "lread", KSTAT_DATA_UINT64 }, 248 { "lwrite", KSTAT_DATA_UINT64 }, 249 { "phread", KSTAT_DATA_UINT64 }, 250 { "phwrite", KSTAT_DATA_UINT64 }, 251 { "pswitch", KSTAT_DATA_UINT64 }, 252 { "trap", KSTAT_DATA_UINT64 }, 253 { "intr", KSTAT_DATA_UINT64 }, 254 { "syscall", KSTAT_DATA_UINT64 }, 255 { "sysread", KSTAT_DATA_UINT64 }, 256 { "syswrite", KSTAT_DATA_UINT64 }, 257 { "sysfork", KSTAT_DATA_UINT64 }, 258 { "sysvfork", KSTAT_DATA_UINT64 }, 259 { "sysexec", KSTAT_DATA_UINT64 }, 260 { "readch", KSTAT_DATA_UINT64 }, 261 { "writech", KSTAT_DATA_UINT64 }, 262 { "rcvint", KSTAT_DATA_UINT64 }, 263 { "xmtint", KSTAT_DATA_UINT64 }, 264 { "mdmint", KSTAT_DATA_UINT64 }, 265 { "rawch", KSTAT_DATA_UINT64 }, 266 { "canch", KSTAT_DATA_UINT64 }, 267 { "outch", KSTAT_DATA_UINT64 }, 268 { "msg", KSTAT_DATA_UINT64 }, 269 { "sema", KSTAT_DATA_UINT64 }, 270 { "namei", KSTAT_DATA_UINT64 }, 271 { "ufsiget", KSTAT_DATA_UINT64 }, 272 { "ufsdirblk", KSTAT_DATA_UINT64 }, 273 { "ufsipage", KSTAT_DATA_UINT64 }, 274 { "ufsinopage", KSTAT_DATA_UINT64 }, 275 { "procovf", KSTAT_DATA_UINT64 }, 276 { "intrthread", KSTAT_DATA_UINT64 }, 277 { "intrblk", KSTAT_DATA_UINT64 }, 278 { "intrunpin", KSTAT_DATA_UINT64 }, 279 { "idlethread", KSTAT_DATA_UINT64 }, 280 { "inv_swtch", KSTAT_DATA_UINT64 }, 281 { "nthreads", KSTAT_DATA_UINT64 }, 282 { "cpumigrate", KSTAT_DATA_UINT64 }, 283 { "xcalls", KSTAT_DATA_UINT64 }, 284 { "mutex_adenters", KSTAT_DATA_UINT64 }, 285 { "rw_rdfails", KSTAT_DATA_UINT64 }, 286 { "rw_wrfails", KSTAT_DATA_UINT64 }, 287 { "modload", KSTAT_DATA_UINT64 }, 288 { "modunload", KSTAT_DATA_UINT64 }, 289 { "bawrite", KSTAT_DATA_UINT64 }, 290 { "iowait", KSTAT_DATA_UINT64 }, 291 }; 292 293 static struct cpu_vm_stats_ks_data { 294 kstat_named_t pgrec; 295 kstat_named_t pgfrec; 296 kstat_named_t pgin; 297 kstat_named_t pgpgin; 298 kstat_named_t pgout; 299 kstat_named_t pgpgout; 300 kstat_named_t swapin; 301 kstat_named_t pgswapin; 302 kstat_named_t swapout; 303 kstat_named_t pgswapout; 304 kstat_named_t zfod; 305 kstat_named_t dfree; 306 kstat_named_t scan; 307 kstat_named_t rev; 308 kstat_named_t hat_fault; 309 kstat_named_t as_fault; 310 kstat_named_t maj_fault; 311 kstat_named_t cow_fault; 312 kstat_named_t prot_fault; 313 kstat_named_t softlock; 314 kstat_named_t kernel_asflt; 315 kstat_named_t pgrrun; 316 kstat_named_t execpgin; 317 kstat_named_t execpgout; 318 kstat_named_t execfree; 319 kstat_named_t anonpgin; 320 kstat_named_t anonpgout; 321 kstat_named_t anonfree; 322 kstat_named_t fspgin; 323 kstat_named_t fspgout; 324 kstat_named_t fsfree; 325 } cpu_vm_stats_ks_data_template = { 326 { "pgrec", KSTAT_DATA_UINT64 }, 327 { "pgfrec", KSTAT_DATA_UINT64 }, 328 { "pgin", KSTAT_DATA_UINT64 }, 329 { "pgpgin", KSTAT_DATA_UINT64 }, 330 { "pgout", KSTAT_DATA_UINT64 }, 331 { "pgpgout", KSTAT_DATA_UINT64 }, 332 { "swapin", KSTAT_DATA_UINT64 }, 333 { "pgswapin", KSTAT_DATA_UINT64 }, 334 { "swapout", KSTAT_DATA_UINT64 }, 335 { "pgswapout", KSTAT_DATA_UINT64 }, 336 { "zfod", KSTAT_DATA_UINT64 }, 337 { "dfree", KSTAT_DATA_UINT64 }, 338 { "scan", KSTAT_DATA_UINT64 }, 339 { "rev", KSTAT_DATA_UINT64 }, 340 { "hat_fault", KSTAT_DATA_UINT64 }, 341 { "as_fault", KSTAT_DATA_UINT64 }, 342 { "maj_fault", KSTAT_DATA_UINT64 }, 343 { "cow_fault", KSTAT_DATA_UINT64 }, 344 { "prot_fault", KSTAT_DATA_UINT64 }, 345 { "softlock", KSTAT_DATA_UINT64 }, 346 { "kernel_asflt", KSTAT_DATA_UINT64 }, 347 { "pgrrun", KSTAT_DATA_UINT64 }, 348 { "execpgin", KSTAT_DATA_UINT64 }, 349 { "execpgout", KSTAT_DATA_UINT64 }, 350 { "execfree", KSTAT_DATA_UINT64 }, 351 { "anonpgin", KSTAT_DATA_UINT64 }, 352 { "anonpgout", KSTAT_DATA_UINT64 }, 353 { "anonfree", KSTAT_DATA_UINT64 }, 354 { "fspgin", KSTAT_DATA_UINT64 }, 355 { "fspgout", KSTAT_DATA_UINT64 }, 356 { "fsfree", KSTAT_DATA_UINT64 }, 357 }; 358 359 /* 360 * Force the specified thread to migrate to the appropriate processor. 361 * Called with thread lock held, returns with it dropped. 362 */ 363 static void 364 force_thread_migrate(kthread_id_t tp) 365 { 366 ASSERT(THREAD_LOCK_HELD(tp)); 367 if (tp == curthread) { 368 THREAD_TRANSITION(tp); 369 CL_SETRUN(tp); 370 thread_unlock_nopreempt(tp); 371 swtch(); 372 } else { 373 if (tp->t_state == TS_ONPROC) { 374 cpu_surrender(tp); 375 } else if (tp->t_state == TS_RUN) { 376 (void) dispdeq(tp); 377 setbackdq(tp); 378 } 379 thread_unlock(tp); 380 } 381 } 382 383 /* 384 * Set affinity for a specified CPU. 385 * A reference count is incremented and the affinity is held until the 386 * reference count is decremented to zero by thread_affinity_clear(). 387 * This is so regions of code requiring affinity can be nested. 388 * Caller needs to ensure that cpu_id remains valid, which can be 389 * done by holding cpu_lock across this call, unless the caller 390 * specifies CPU_CURRENT in which case the cpu_lock will be acquired 391 * by thread_affinity_set and CPU->cpu_id will be the target CPU. 392 */ 393 void 394 thread_affinity_set(kthread_id_t t, int cpu_id) 395 { 396 cpu_t *cp; 397 int c; 398 399 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); 400 401 if ((c = cpu_id) == CPU_CURRENT) { 402 mutex_enter(&cpu_lock); 403 cpu_id = CPU->cpu_id; 404 } 405 /* 406 * We should be asserting that cpu_lock is held here, but 407 * the NCA code doesn't acquire it. The following assert 408 * should be uncommented when the NCA code is fixed. 409 * 410 * ASSERT(MUTEX_HELD(&cpu_lock)); 411 */ 412 ASSERT((cpu_id >= 0) && (cpu_id < NCPU)); 413 cp = cpu[cpu_id]; 414 ASSERT(cp != NULL); /* user must provide a good cpu_id */ 415 /* 416 * If there is already a hard affinity requested, and this affinity 417 * conflicts with that, panic. 418 */ 419 thread_lock(t); 420 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) { 421 panic("affinity_set: setting %p but already bound to %p", 422 (void *)cp, (void *)t->t_bound_cpu); 423 } 424 t->t_affinitycnt++; 425 t->t_bound_cpu = cp; 426 427 /* 428 * Make sure we're running on the right CPU. 429 */ 430 if (cp != t->t_cpu || t != curthread) { 431 force_thread_migrate(t); /* drops thread lock */ 432 } else { 433 thread_unlock(t); 434 } 435 436 if (c == CPU_CURRENT) 437 mutex_exit(&cpu_lock); 438 } 439 440 /* 441 * Wrapper for backward compatibility. 442 */ 443 void 444 affinity_set(int cpu_id) 445 { 446 thread_affinity_set(curthread, cpu_id); 447 } 448 449 /* 450 * Decrement the affinity reservation count and if it becomes zero, 451 * clear the CPU affinity for the current thread, or set it to the user's 452 * software binding request. 453 */ 454 void 455 thread_affinity_clear(kthread_id_t t) 456 { 457 register processorid_t binding; 458 459 thread_lock(t); 460 if (--t->t_affinitycnt == 0) { 461 if ((binding = t->t_bind_cpu) == PBIND_NONE) { 462 /* 463 * Adjust disp_max_unbound_pri if necessary. 464 */ 465 disp_adjust_unbound_pri(t); 466 t->t_bound_cpu = NULL; 467 if (t->t_cpu->cpu_part != t->t_cpupart) { 468 force_thread_migrate(t); 469 return; 470 } 471 } else { 472 t->t_bound_cpu = cpu[binding]; 473 /* 474 * Make sure the thread is running on the bound CPU. 475 */ 476 if (t->t_cpu != t->t_bound_cpu) { 477 force_thread_migrate(t); 478 return; /* already dropped lock */ 479 } 480 } 481 } 482 thread_unlock(t); 483 } 484 485 /* 486 * Wrapper for backward compatibility. 487 */ 488 void 489 affinity_clear(void) 490 { 491 thread_affinity_clear(curthread); 492 } 493 494 /* 495 * Weak cpu affinity. Bind to the "current" cpu for short periods 496 * of time during which the thread must not block (but may be preempted). 497 * Use this instead of kpreempt_disable() when it is only "no migration" 498 * rather than "no preemption" semantics that are required - disabling 499 * preemption holds higher priority threads off of cpu and if the 500 * operation that is protected is more than momentary this is not good 501 * for realtime etc. 502 * 503 * Weakly bound threads will not prevent a cpu from being offlined - 504 * we'll only run them on the cpu to which they are weakly bound but 505 * (because they do not block) we'll always be able to move them on to 506 * another cpu at offline time if we give them just a short moment to 507 * run during which they will unbind. To give a cpu a chance of offlining, 508 * however, we require a barrier to weak bindings that may be raised for a 509 * given cpu (offline/move code may set this and then wait a short time for 510 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier. 511 * 512 * There are few restrictions on the calling context of thread_nomigrate. 513 * The caller must not hold the thread lock. Calls may be nested. 514 * 515 * After weakbinding a thread must not perform actions that may block. 516 * In particular it must not call thread_affinity_set; calling that when 517 * already weakbound is nonsensical anyway. 518 * 519 * If curthread is prevented from migrating for other reasons 520 * (kernel preemption disabled; high pil; strongly bound; interrupt thread) 521 * then the weak binding will succeed even if this cpu is the target of an 522 * offline/move request. 523 */ 524 void 525 thread_nomigrate(void) 526 { 527 cpu_t *cp; 528 kthread_id_t t = curthread; 529 530 again: 531 kpreempt_disable(); 532 cp = CPU; 533 534 /* 535 * A highlevel interrupt must not modify t_nomigrate or 536 * t_weakbound_cpu of the thread it has interrupted. A lowlevel 537 * interrupt thread cannot migrate and we can avoid the 538 * thread_lock call below by short-circuiting here. In either 539 * case we can just return since no migration is possible and 540 * the condition will persist (ie, when we test for these again 541 * in thread_allowmigrate they can't have changed). Migration 542 * is also impossible if we're at or above DISP_LEVEL pil. 543 */ 544 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD || 545 getpil() >= DISP_LEVEL) { 546 kpreempt_enable(); 547 return; 548 } 549 550 /* 551 * We must be consistent with existing weak bindings. Since we 552 * may be interrupted between the increment of t_nomigrate and 553 * the store to t_weakbound_cpu below we cannot assume that 554 * t_weakbound_cpu will be set if t_nomigrate is. Note that we 555 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not 556 * always the case. 557 */ 558 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) { 559 if (!panicstr) 560 panic("thread_nomigrate: binding to %p but already " 561 "bound to %p", (void *)cp, 562 (void *)t->t_weakbound_cpu); 563 } 564 565 /* 566 * At this point we have preemption disabled and we don't yet hold 567 * the thread lock. So it's possible that somebody else could 568 * set t_bind_cpu here and not be able to force us across to the 569 * new cpu (since we have preemption disabled). 570 */ 571 thread_lock(curthread); 572 573 /* 574 * If further weak bindings are being (temporarily) suppressed then 575 * we'll settle for disabling kernel preemption (which assures 576 * no migration provided the thread does not block which it is 577 * not allowed to if using thread_nomigrate). We must remember 578 * this disposition so we can take appropriate action in 579 * thread_allowmigrate. If this is a nested call and the 580 * thread is already weakbound then fall through as normal. 581 * We remember the decision to settle for kpreempt_disable through 582 * negative nesting counting in t_nomigrate. Once a thread has had one 583 * weakbinding request satisfied in this way any further (nested) 584 * requests will continue to be satisfied in the same way, 585 * even if weak bindings have recommenced. 586 */ 587 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) { 588 --t->t_nomigrate; 589 thread_unlock(curthread); 590 return; /* with kpreempt_disable still active */ 591 } 592 593 /* 594 * We hold thread_lock so t_bind_cpu cannot change. We could, 595 * however, be running on a different cpu to which we are t_bound_cpu 596 * to (as explained above). If we grant the weak binding request 597 * in that case then the dispatcher must favour our weak binding 598 * over our strong (in which case, just as when preemption is 599 * disabled, we can continue to run on a cpu other than the one to 600 * which we are strongbound; the difference in this case is that 601 * this thread can be preempted and so can appear on the dispatch 602 * queues of a cpu other than the one it is strongbound to). 603 * 604 * If the cpu we are running on does not appear to be a current 605 * offline target (we check cpu_inmotion to determine this - since 606 * we don't hold cpu_lock we may not see a recent store to that, 607 * so it's possible that we at times can grant a weak binding to a 608 * cpu that is an offline target, but that one request will not 609 * prevent the offline from succeeding) then we will always grant 610 * the weak binding request. This includes the case above where 611 * we grant a weakbinding not commensurate with our strong binding. 612 * 613 * If our cpu does appear to be an offline target then we're inclined 614 * not to grant the weakbinding request just yet - we'd prefer to 615 * migrate to another cpu and grant the request there. The 616 * exceptions are those cases where going through preemption code 617 * will not result in us changing cpu: 618 * 619 * . interrupts have already bypassed this case (see above) 620 * . we are already weakbound to this cpu (dispatcher code will 621 * always return us to the weakbound cpu) 622 * . preemption was disabled even before we disabled it above 623 * . we are strongbound to this cpu (if we're strongbound to 624 * another and not yet running there the trip through the 625 * dispatcher will move us to the strongbound cpu and we 626 * will grant the weak binding there) 627 */ 628 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 || 629 t->t_bound_cpu == cp) { 630 /* 631 * Don't be tempted to store to t_weakbound_cpu only on 632 * the first nested bind request - if we're interrupted 633 * after the increment of t_nomigrate and before the 634 * store to t_weakbound_cpu and the interrupt calls 635 * thread_nomigrate then the assertion in thread_allowmigrate 636 * would fail. 637 */ 638 t->t_nomigrate++; 639 t->t_weakbound_cpu = cp; 640 membar_producer(); 641 thread_unlock(curthread); 642 /* 643 * Now that we have dropped the thread_lock another thread 644 * can set our t_weakbound_cpu, and will try to migrate us 645 * to the strongbound cpu (which will not be prevented by 646 * preemption being disabled since we're about to enable 647 * preemption). We have granted the weakbinding to the current 648 * cpu, so again we are in the position that is is is possible 649 * that our weak and strong bindings differ. Again this 650 * is catered for by dispatcher code which will favour our 651 * weak binding. 652 */ 653 kpreempt_enable(); 654 } else { 655 /* 656 * Move to another cpu before granting the request by 657 * forcing this thread through preemption code. When we 658 * get to set{front,back}dq called from CL_PREEMPT() 659 * cpu_choose() will be used to select a cpu to queue 660 * us on - that will see cpu_inmotion and take 661 * steps to avoid returning us to this cpu. 662 */ 663 cp->cpu_kprunrun = 1; 664 thread_unlock(curthread); 665 kpreempt_enable(); /* will call preempt() */ 666 goto again; 667 } 668 } 669 670 void 671 thread_allowmigrate(void) 672 { 673 kthread_id_t t = curthread; 674 675 ASSERT(t->t_weakbound_cpu == CPU || 676 (t->t_nomigrate < 0 && t->t_preempt > 0) || 677 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD || 678 getpil() >= DISP_LEVEL); 679 680 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) || 681 getpil() >= DISP_LEVEL) 682 return; 683 684 if (t->t_nomigrate < 0) { 685 /* 686 * This thread was granted "weak binding" in the 687 * stronger form of kernel preemption disabling. 688 * Undo a level of nesting for both t_nomigrate 689 * and t_preempt. 690 */ 691 ++t->t_nomigrate; 692 kpreempt_enable(); 693 } else if (--t->t_nomigrate == 0) { 694 /* 695 * Time to drop the weak binding. We need to cater 696 * for the case where we're weakbound to a different 697 * cpu than that to which we're strongbound (a very 698 * temporary arrangement that must only persist until 699 * weak binding drops). We don't acquire thread_lock 700 * here so even as this code executes t_bound_cpu 701 * may be changing. So we disable preemption and 702 * a) in the case that t_bound_cpu changes while we 703 * have preemption disabled kprunrun will be set 704 * asynchronously, and b) if before disabling 705 * preemption we were already on a different cpu to 706 * our t_bound_cpu then we set kprunrun ourselves 707 * to force a trip through the dispatcher when 708 * preemption is enabled. 709 */ 710 kpreempt_disable(); 711 if (t->t_bound_cpu && 712 t->t_weakbound_cpu != t->t_bound_cpu) 713 CPU->cpu_kprunrun = 1; 714 t->t_weakbound_cpu = NULL; 715 membar_producer(); 716 kpreempt_enable(); 717 } 718 } 719 720 /* 721 * weakbinding_stop can be used to temporarily cause weakbindings made 722 * with thread_nomigrate to be satisfied through the stronger action of 723 * kpreempt_disable. weakbinding_start recommences normal weakbinding. 724 */ 725 726 void 727 weakbinding_stop(void) 728 { 729 ASSERT(MUTEX_HELD(&cpu_lock)); 730 weakbindingbarrier = 1; 731 membar_producer(); /* make visible before subsequent thread_lock */ 732 } 733 734 void 735 weakbinding_start(void) 736 { 737 ASSERT(MUTEX_HELD(&cpu_lock)); 738 weakbindingbarrier = 0; 739 } 740 741 void 742 null_xcall(void) 743 { 744 } 745 746 /* 747 * This routine is called to place the CPUs in a safe place so that 748 * one of them can be taken off line or placed on line. What we are 749 * trying to do here is prevent a thread from traversing the list 750 * of active CPUs while we are changing it or from getting placed on 751 * the run queue of a CPU that has just gone off line. We do this by 752 * creating a thread with the highest possible prio for each CPU and 753 * having it call this routine. The advantage of this method is that 754 * we can eliminate all checks for CPU_ACTIVE in the disp routines. 755 * This makes disp faster at the expense of making p_online() slower 756 * which is a good trade off. 757 */ 758 static void 759 cpu_pause(int index) 760 { 761 int s; 762 struct _cpu_pause_info *cpi = &cpu_pause_info; 763 volatile char *safe = &safe_list[index]; 764 long lindex = index; 765 766 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE)); 767 768 while (*safe != PAUSE_DIE) { 769 *safe = PAUSE_READY; 770 membar_enter(); /* make sure stores are flushed */ 771 sema_v(&cpi->cp_sem); /* signal requesting thread */ 772 773 /* 774 * Wait here until all pause threads are running. That 775 * indicates that it's safe to do the spl. Until 776 * cpu_pause_info.cp_go is set, we don't want to spl 777 * because that might block clock interrupts needed 778 * to preempt threads on other CPUs. 779 */ 780 while (cpi->cp_go == 0) 781 ; 782 /* 783 * Even though we are at the highest disp prio, we need 784 * to block out all interrupts below LOCK_LEVEL so that 785 * an intr doesn't come in, wake up a thread, and call 786 * setbackdq/setfrontdq. 787 */ 788 s = splhigh(); 789 /* 790 * if cpu_pause_func() has been set then call it using 791 * index as the argument, currently only used by 792 * cpr_suspend_cpus(). This function is used as the 793 * code to execute on the "paused" cpu's when a machine 794 * comes out of a sleep state and CPU's were powered off. 795 * (could also be used for hotplugging CPU's). 796 */ 797 if (cpu_pause_func != NULL) 798 (*cpu_pause_func)((void *)lindex); 799 800 mach_cpu_pause(safe); 801 802 splx(s); 803 /* 804 * Waiting is at an end. Switch out of cpu_pause 805 * loop and resume useful work. 806 */ 807 swtch(); 808 } 809 810 mutex_enter(&pause_free_mutex); 811 *safe = PAUSE_DEAD; 812 cv_broadcast(&pause_free_cv); 813 mutex_exit(&pause_free_mutex); 814 } 815 816 /* 817 * Allow the cpus to start running again. 818 */ 819 void 820 start_cpus() 821 { 822 int i; 823 824 ASSERT(MUTEX_HELD(&cpu_lock)); 825 ASSERT(cpu_pause_info.cp_paused); 826 cpu_pause_info.cp_paused = NULL; 827 for (i = 0; i < NCPU; i++) 828 safe_list[i] = PAUSE_IDLE; 829 membar_enter(); /* make sure stores are flushed */ 830 affinity_clear(); 831 splx(cpu_pause_info.cp_spl); 832 kpreempt_enable(); 833 } 834 835 /* 836 * Allocate a pause thread for a CPU. 837 */ 838 static void 839 cpu_pause_alloc(cpu_t *cp) 840 { 841 kthread_id_t t; 842 long cpun = cp->cpu_id; 843 844 /* 845 * Note, v.v_nglobpris will not change value as long as I hold 846 * cpu_lock. 847 */ 848 t = thread_create(NULL, 0, cpu_pause, (void *)cpun, 849 0, &p0, TS_STOPPED, v.v_nglobpris - 1); 850 thread_lock(t); 851 t->t_bound_cpu = cp; 852 t->t_disp_queue = cp->cpu_disp; 853 t->t_affinitycnt = 1; 854 t->t_preempt = 1; 855 thread_unlock(t); 856 cp->cpu_pause_thread = t; 857 /* 858 * Registering a thread in the callback table is usually done 859 * in the initialization code of the thread. In this 860 * case, we do it right after thread creation because the 861 * thread itself may never run, and we need to register the 862 * fact that it is safe for cpr suspend. 863 */ 864 CALLB_CPR_INIT_SAFE(t, "cpu_pause"); 865 } 866 867 /* 868 * Free a pause thread for a CPU. 869 */ 870 static void 871 cpu_pause_free(cpu_t *cp) 872 { 873 kthread_id_t t; 874 int cpun = cp->cpu_id; 875 876 ASSERT(MUTEX_HELD(&cpu_lock)); 877 /* 878 * We have to get the thread and tell him to die. 879 */ 880 if ((t = cp->cpu_pause_thread) == NULL) { 881 ASSERT(safe_list[cpun] == PAUSE_IDLE); 882 return; 883 } 884 thread_lock(t); 885 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */ 886 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */ 887 t->t_pri = v.v_nglobpris - 1; 888 ASSERT(safe_list[cpun] == PAUSE_IDLE); 889 safe_list[cpun] = PAUSE_DIE; 890 THREAD_TRANSITION(t); 891 setbackdq(t); 892 thread_unlock_nopreempt(t); 893 894 /* 895 * If we don't wait for the thread to actually die, it may try to 896 * run on the wrong cpu as part of an actual call to pause_cpus(). 897 */ 898 mutex_enter(&pause_free_mutex); 899 while (safe_list[cpun] != PAUSE_DEAD) { 900 cv_wait(&pause_free_cv, &pause_free_mutex); 901 } 902 mutex_exit(&pause_free_mutex); 903 safe_list[cpun] = PAUSE_IDLE; 904 905 cp->cpu_pause_thread = NULL; 906 } 907 908 /* 909 * Initialize basic structures for pausing CPUs. 910 */ 911 void 912 cpu_pause_init() 913 { 914 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL); 915 /* 916 * Create initial CPU pause thread. 917 */ 918 cpu_pause_alloc(CPU); 919 } 920 921 /* 922 * Start the threads used to pause another CPU. 923 */ 924 static int 925 cpu_pause_start(processorid_t cpu_id) 926 { 927 int i; 928 int cpu_count = 0; 929 930 for (i = 0; i < NCPU; i++) { 931 cpu_t *cp; 932 kthread_id_t t; 933 934 cp = cpu[i]; 935 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) { 936 safe_list[i] = PAUSE_WAIT; 937 continue; 938 } 939 940 /* 941 * Skip CPU if it is quiesced or not yet started. 942 */ 943 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) { 944 safe_list[i] = PAUSE_WAIT; 945 continue; 946 } 947 948 /* 949 * Start this CPU's pause thread. 950 */ 951 t = cp->cpu_pause_thread; 952 thread_lock(t); 953 /* 954 * Reset the priority, since nglobpris may have 955 * changed since the thread was created, if someone 956 * has loaded the RT (or some other) scheduling 957 * class. 958 */ 959 t->t_pri = v.v_nglobpris - 1; 960 THREAD_TRANSITION(t); 961 setbackdq(t); 962 thread_unlock_nopreempt(t); 963 ++cpu_count; 964 } 965 return (cpu_count); 966 } 967 968 969 /* 970 * Pause all of the CPUs except the one we are on by creating a high 971 * priority thread bound to those CPUs. 972 * 973 * Note that one must be extremely careful regarding code 974 * executed while CPUs are paused. Since a CPU may be paused 975 * while a thread scheduling on that CPU is holding an adaptive 976 * lock, code executed with CPUs paused must not acquire adaptive 977 * (or low-level spin) locks. Also, such code must not block, 978 * since the thread that is supposed to initiate the wakeup may 979 * never run. 980 * 981 * With a few exceptions, the restrictions on code executed with CPUs 982 * paused match those for code executed at high-level interrupt 983 * context. 984 */ 985 void 986 pause_cpus(cpu_t *off_cp) 987 { 988 processorid_t cpu_id; 989 int i; 990 struct _cpu_pause_info *cpi = &cpu_pause_info; 991 992 ASSERT(MUTEX_HELD(&cpu_lock)); 993 ASSERT(cpi->cp_paused == NULL); 994 cpi->cp_count = 0; 995 cpi->cp_go = 0; 996 for (i = 0; i < NCPU; i++) 997 safe_list[i] = PAUSE_IDLE; 998 kpreempt_disable(); 999 1000 /* 1001 * If running on the cpu that is going offline, get off it. 1002 * This is so that it won't be necessary to rechoose a CPU 1003 * when done. 1004 */ 1005 if (CPU == off_cp) 1006 cpu_id = off_cp->cpu_next_part->cpu_id; 1007 else 1008 cpu_id = CPU->cpu_id; 1009 affinity_set(cpu_id); 1010 1011 /* 1012 * Start the pause threads and record how many were started 1013 */ 1014 cpi->cp_count = cpu_pause_start(cpu_id); 1015 1016 /* 1017 * Now wait for all CPUs to be running the pause thread. 1018 */ 1019 while (cpi->cp_count > 0) { 1020 /* 1021 * Spin reading the count without grabbing the disp 1022 * lock to make sure we don't prevent the pause 1023 * threads from getting the lock. 1024 */ 1025 while (sema_held(&cpi->cp_sem)) 1026 ; 1027 if (sema_tryp(&cpi->cp_sem)) 1028 --cpi->cp_count; 1029 } 1030 cpi->cp_go = 1; /* all have reached cpu_pause */ 1031 1032 /* 1033 * Now wait for all CPUs to spl. (Transition from PAUSE_READY 1034 * to PAUSE_WAIT.) 1035 */ 1036 for (i = 0; i < NCPU; i++) { 1037 while (safe_list[i] != PAUSE_WAIT) 1038 ; 1039 } 1040 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */ 1041 cpi->cp_paused = curthread; 1042 } 1043 1044 /* 1045 * Check whether the current thread has CPUs paused 1046 */ 1047 int 1048 cpus_paused(void) 1049 { 1050 if (cpu_pause_info.cp_paused != NULL) { 1051 ASSERT(cpu_pause_info.cp_paused == curthread); 1052 return (1); 1053 } 1054 return (0); 1055 } 1056 1057 static cpu_t * 1058 cpu_get_all(processorid_t cpun) 1059 { 1060 ASSERT(MUTEX_HELD(&cpu_lock)); 1061 1062 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun)) 1063 return (NULL); 1064 return (cpu[cpun]); 1065 } 1066 1067 /* 1068 * Check whether cpun is a valid processor id and whether it should be 1069 * visible from the current zone. If it is, return a pointer to the 1070 * associated CPU structure. 1071 */ 1072 cpu_t * 1073 cpu_get(processorid_t cpun) 1074 { 1075 cpu_t *c; 1076 1077 ASSERT(MUTEX_HELD(&cpu_lock)); 1078 c = cpu_get_all(cpun); 1079 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() && 1080 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c)) 1081 return (NULL); 1082 return (c); 1083 } 1084 1085 /* 1086 * The following functions should be used to check CPU states in the kernel. 1087 * They should be invoked with cpu_lock held. Kernel subsystems interested 1088 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc 1089 * states. Those are for user-land (and system call) use only. 1090 */ 1091 1092 /* 1093 * Determine whether the CPU is online and handling interrupts. 1094 */ 1095 int 1096 cpu_is_online(cpu_t *cpu) 1097 { 1098 ASSERT(MUTEX_HELD(&cpu_lock)); 1099 return (cpu_flagged_online(cpu->cpu_flags)); 1100 } 1101 1102 /* 1103 * Determine whether the CPU is offline (this includes spare and faulted). 1104 */ 1105 int 1106 cpu_is_offline(cpu_t *cpu) 1107 { 1108 ASSERT(MUTEX_HELD(&cpu_lock)); 1109 return (cpu_flagged_offline(cpu->cpu_flags)); 1110 } 1111 1112 /* 1113 * Determine whether the CPU is powered off. 1114 */ 1115 int 1116 cpu_is_poweredoff(cpu_t *cpu) 1117 { 1118 ASSERT(MUTEX_HELD(&cpu_lock)); 1119 return (cpu_flagged_poweredoff(cpu->cpu_flags)); 1120 } 1121 1122 /* 1123 * Determine whether the CPU is handling interrupts. 1124 */ 1125 int 1126 cpu_is_nointr(cpu_t *cpu) 1127 { 1128 ASSERT(MUTEX_HELD(&cpu_lock)); 1129 return (cpu_flagged_nointr(cpu->cpu_flags)); 1130 } 1131 1132 /* 1133 * Determine whether the CPU is active (scheduling threads). 1134 */ 1135 int 1136 cpu_is_active(cpu_t *cpu) 1137 { 1138 ASSERT(MUTEX_HELD(&cpu_lock)); 1139 return (cpu_flagged_active(cpu->cpu_flags)); 1140 } 1141 1142 /* 1143 * Same as above, but these require cpu_flags instead of cpu_t pointers. 1144 */ 1145 int 1146 cpu_flagged_online(cpu_flag_t cpu_flags) 1147 { 1148 return (cpu_flagged_active(cpu_flags) && 1149 (cpu_flags & CPU_ENABLE)); 1150 } 1151 1152 int 1153 cpu_flagged_offline(cpu_flag_t cpu_flags) 1154 { 1155 return (((cpu_flags & CPU_POWEROFF) == 0) && 1156 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)); 1157 } 1158 1159 int 1160 cpu_flagged_poweredoff(cpu_flag_t cpu_flags) 1161 { 1162 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF); 1163 } 1164 1165 int 1166 cpu_flagged_nointr(cpu_flag_t cpu_flags) 1167 { 1168 return (cpu_flagged_active(cpu_flags) && 1169 (cpu_flags & CPU_ENABLE) == 0); 1170 } 1171 1172 int 1173 cpu_flagged_active(cpu_flag_t cpu_flags) 1174 { 1175 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) && 1176 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY)); 1177 } 1178 1179 /* 1180 * Bring the indicated CPU online. 1181 */ 1182 int 1183 cpu_online(cpu_t *cp) 1184 { 1185 int error = 0; 1186 1187 /* 1188 * Handle on-line request. 1189 * This code must put the new CPU on the active list before 1190 * starting it because it will not be paused, and will start 1191 * using the active list immediately. The real start occurs 1192 * when the CPU_QUIESCED flag is turned off. 1193 */ 1194 1195 ASSERT(MUTEX_HELD(&cpu_lock)); 1196 1197 /* 1198 * Put all the cpus into a known safe place. 1199 * No mutexes can be entered while CPUs are paused. 1200 */ 1201 error = mp_cpu_start(cp); /* arch-dep hook */ 1202 if (error == 0) { 1203 pg_cpupart_in(cp, cp->cpu_part); 1204 pause_cpus(NULL); 1205 cpu_add_active_internal(cp); 1206 if (cp->cpu_flags & CPU_FAULTED) { 1207 cp->cpu_flags &= ~CPU_FAULTED; 1208 mp_cpu_faulted_exit(cp); 1209 } 1210 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN | 1211 CPU_SPARE); 1212 CPU_NEW_GENERATION(cp); 1213 start_cpus(); 1214 cpu_stats_kstat_create(cp); 1215 cpu_create_intrstat(cp); 1216 lgrp_kstat_create(cp); 1217 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1218 cpu_intr_enable(cp); /* arch-dep hook */ 1219 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1220 cpu_set_state(cp); 1221 cyclic_online(cp); 1222 /* 1223 * This has to be called only after cyclic_online(). This 1224 * function uses cyclics. 1225 */ 1226 callout_cpu_online(cp); 1227 poke_cpu(cp->cpu_id); 1228 } 1229 1230 return (error); 1231 } 1232 1233 /* 1234 * Take the indicated CPU offline. 1235 */ 1236 int 1237 cpu_offline(cpu_t *cp, int flags) 1238 { 1239 cpupart_t *pp; 1240 int error = 0; 1241 cpu_t *ncp; 1242 int intr_enable; 1243 int cyclic_off = 0; 1244 int callout_off = 0; 1245 int loop_count; 1246 int no_quiesce = 0; 1247 int (*bound_func)(struct cpu *, int); 1248 kthread_t *t; 1249 lpl_t *cpu_lpl; 1250 proc_t *p; 1251 int lgrp_diff_lpl; 1252 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0; 1253 1254 ASSERT(MUTEX_HELD(&cpu_lock)); 1255 1256 /* 1257 * If we're going from faulted or spare to offline, just 1258 * clear these flags and update CPU state. 1259 */ 1260 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) { 1261 if (cp->cpu_flags & CPU_FAULTED) { 1262 cp->cpu_flags &= ~CPU_FAULTED; 1263 mp_cpu_faulted_exit(cp); 1264 } 1265 cp->cpu_flags &= ~CPU_SPARE; 1266 cpu_set_state(cp); 1267 return (0); 1268 } 1269 1270 /* 1271 * Handle off-line request. 1272 */ 1273 pp = cp->cpu_part; 1274 /* 1275 * Don't offline last online CPU in partition 1276 */ 1277 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2) 1278 return (EBUSY); 1279 /* 1280 * Unbind all soft-bound threads bound to our CPU and hard bound threads 1281 * if we were asked to. 1282 */ 1283 error = cpu_unbind(cp->cpu_id, unbind_all_threads); 1284 if (error != 0) 1285 return (error); 1286 /* 1287 * We shouldn't be bound to this CPU ourselves. 1288 */ 1289 if (curthread->t_bound_cpu == cp) 1290 return (EBUSY); 1291 1292 /* 1293 * Tell interested parties that this CPU is going offline. 1294 */ 1295 CPU_NEW_GENERATION(cp); 1296 cpu_state_change_notify(cp->cpu_id, CPU_OFF); 1297 1298 /* 1299 * Tell the PG subsystem that the CPU is leaving the partition 1300 */ 1301 pg_cpupart_out(cp, pp); 1302 1303 /* 1304 * Take the CPU out of interrupt participation so we won't find 1305 * bound kernel threads. If the architecture cannot completely 1306 * shut off interrupts on the CPU, don't quiesce it, but don't 1307 * run anything but interrupt thread... this is indicated by 1308 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being 1309 * off. 1310 */ 1311 intr_enable = cp->cpu_flags & CPU_ENABLE; 1312 if (intr_enable) 1313 no_quiesce = cpu_intr_disable(cp); 1314 1315 /* 1316 * Record that we are aiming to offline this cpu. This acts as 1317 * a barrier to further weak binding requests in thread_nomigrate 1318 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to 1319 * lean away from this cpu. Further strong bindings are already 1320 * avoided since we hold cpu_lock. Since threads that are set 1321 * runnable around now and others coming off the target cpu are 1322 * directed away from the target, existing strong and weak bindings 1323 * (especially the latter) to the target cpu stand maximum chance of 1324 * being able to unbind during the short delay loop below (if other 1325 * unbound threads compete they may not see cpu in time to unbind 1326 * even if they would do so immediately. 1327 */ 1328 cpu_inmotion = cp; 1329 membar_enter(); 1330 1331 /* 1332 * Check for kernel threads (strong or weak) bound to that CPU. 1333 * Strongly bound threads may not unbind, and we'll have to return 1334 * EBUSY. Weakly bound threads should always disappear - we've 1335 * stopped more weak binding with cpu_inmotion and existing 1336 * bindings will drain imminently (they may not block). Nonetheless 1337 * we will wait for a fixed period for all bound threads to disappear. 1338 * Inactive interrupt threads are OK (they'll be in TS_FREE 1339 * state). If test finds some bound threads, wait a few ticks 1340 * to give short-lived threads (such as interrupts) chance to 1341 * complete. Note that if no_quiesce is set, i.e. this cpu 1342 * is required to service interrupts, then we take the route 1343 * that permits interrupt threads to be active (or bypassed). 1344 */ 1345 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads; 1346 1347 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) { 1348 if (loop_count >= 5) { 1349 error = EBUSY; /* some threads still bound */ 1350 break; 1351 } 1352 1353 /* 1354 * If some threads were assigned, give them 1355 * a chance to complete or move. 1356 * 1357 * This assumes that the clock_thread is not bound 1358 * to any CPU, because the clock_thread is needed to 1359 * do the delay(hz/100). 1360 * 1361 * Note: we still hold the cpu_lock while waiting for 1362 * the next clock tick. This is OK since it isn't 1363 * needed for anything else except processor_bind(2), 1364 * and system initialization. If we drop the lock, 1365 * we would risk another p_online disabling the last 1366 * processor. 1367 */ 1368 delay(hz/100); 1369 } 1370 1371 if (error == 0 && callout_off == 0) { 1372 callout_cpu_offline(cp); 1373 callout_off = 1; 1374 } 1375 1376 if (error == 0 && cyclic_off == 0) { 1377 if (!cyclic_offline(cp)) { 1378 /* 1379 * We must have bound cyclics... 1380 */ 1381 error = EBUSY; 1382 goto out; 1383 } 1384 cyclic_off = 1; 1385 } 1386 1387 /* 1388 * Call mp_cpu_stop() to perform any special operations 1389 * needed for this machine architecture to offline a CPU. 1390 */ 1391 if (error == 0) 1392 error = mp_cpu_stop(cp); /* arch-dep hook */ 1393 1394 /* 1395 * If that all worked, take the CPU offline and decrement 1396 * ncpus_online. 1397 */ 1398 if (error == 0) { 1399 /* 1400 * Put all the cpus into a known safe place. 1401 * No mutexes can be entered while CPUs are paused. 1402 */ 1403 pause_cpus(cp); 1404 /* 1405 * Repeat the operation, if necessary, to make sure that 1406 * all outstanding low-level interrupts run to completion 1407 * before we set the CPU_QUIESCED flag. It's also possible 1408 * that a thread has weak bound to the cpu despite our raising 1409 * cpu_inmotion above since it may have loaded that 1410 * value before the barrier became visible (this would have 1411 * to be the thread that was on the target cpu at the time 1412 * we raised the barrier). 1413 */ 1414 if ((!no_quiesce && cp->cpu_intr_actv != 0) || 1415 (*bound_func)(cp, 1)) { 1416 start_cpus(); 1417 (void) mp_cpu_start(cp); 1418 goto again; 1419 } 1420 ncp = cp->cpu_next_part; 1421 cpu_lpl = cp->cpu_lpl; 1422 ASSERT(cpu_lpl != NULL); 1423 1424 /* 1425 * Remove the CPU from the list of active CPUs. 1426 */ 1427 cpu_remove_active(cp); 1428 1429 /* 1430 * Walk the active process list and look for threads 1431 * whose home lgroup needs to be updated, or 1432 * the last CPU they run on is the one being offlined now. 1433 */ 1434 1435 ASSERT(curthread->t_cpu != cp); 1436 for (p = practive; p != NULL; p = p->p_next) { 1437 1438 t = p->p_tlist; 1439 1440 if (t == NULL) 1441 continue; 1442 1443 lgrp_diff_lpl = 0; 1444 1445 do { 1446 ASSERT(t->t_lpl != NULL); 1447 /* 1448 * Taking last CPU in lpl offline 1449 * Rehome thread if it is in this lpl 1450 * Otherwise, update the count of how many 1451 * threads are in this CPU's lgroup but have 1452 * a different lpl. 1453 */ 1454 1455 if (cpu_lpl->lpl_ncpu == 0) { 1456 if (t->t_lpl == cpu_lpl) 1457 lgrp_move_thread(t, 1458 lgrp_choose(t, 1459 t->t_cpupart), 0); 1460 else if (t->t_lpl->lpl_lgrpid == 1461 cpu_lpl->lpl_lgrpid) 1462 lgrp_diff_lpl++; 1463 } 1464 ASSERT(t->t_lpl->lpl_ncpu > 0); 1465 1466 /* 1467 * Update CPU last ran on if it was this CPU 1468 */ 1469 if (t->t_cpu == cp && t->t_bound_cpu != cp) 1470 t->t_cpu = disp_lowpri_cpu(ncp, 1471 t->t_lpl, t->t_pri, NULL); 1472 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1473 t->t_weakbound_cpu == cp); 1474 1475 t = t->t_forw; 1476 } while (t != p->p_tlist); 1477 1478 /* 1479 * Didn't find any threads in the same lgroup as this 1480 * CPU with a different lpl, so remove the lgroup from 1481 * the process lgroup bitmask. 1482 */ 1483 1484 if (lgrp_diff_lpl == 0) 1485 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid); 1486 } 1487 1488 /* 1489 * Walk thread list looking for threads that need to be 1490 * rehomed, since there are some threads that are not in 1491 * their process's p_tlist. 1492 */ 1493 1494 t = curthread; 1495 do { 1496 ASSERT(t != NULL && t->t_lpl != NULL); 1497 1498 /* 1499 * Rehome threads with same lpl as this CPU when this 1500 * is the last CPU in the lpl. 1501 */ 1502 1503 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl)) 1504 lgrp_move_thread(t, 1505 lgrp_choose(t, t->t_cpupart), 1); 1506 1507 ASSERT(t->t_lpl->lpl_ncpu > 0); 1508 1509 /* 1510 * Update CPU last ran on if it was this CPU 1511 */ 1512 1513 if (t->t_cpu == cp && t->t_bound_cpu != cp) { 1514 t->t_cpu = disp_lowpri_cpu(ncp, 1515 t->t_lpl, t->t_pri, NULL); 1516 } 1517 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1518 t->t_weakbound_cpu == cp); 1519 t = t->t_next; 1520 1521 } while (t != curthread); 1522 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0); 1523 cp->cpu_flags |= CPU_OFFLINE; 1524 disp_cpu_inactive(cp); 1525 if (!no_quiesce) 1526 cp->cpu_flags |= CPU_QUIESCED; 1527 ncpus_online--; 1528 cpu_set_state(cp); 1529 cpu_inmotion = NULL; 1530 start_cpus(); 1531 cpu_stats_kstat_destroy(cp); 1532 cpu_delete_intrstat(cp); 1533 lgrp_kstat_destroy(cp); 1534 } 1535 1536 out: 1537 cpu_inmotion = NULL; 1538 1539 /* 1540 * If we failed, re-enable interrupts. 1541 * Do this even if cpu_intr_disable returned an error, because 1542 * it may have partially disabled interrupts. 1543 */ 1544 if (error && intr_enable) 1545 cpu_intr_enable(cp); 1546 1547 /* 1548 * If we failed, but managed to offline the cyclic subsystem on this 1549 * CPU, bring it back online. 1550 */ 1551 if (error && cyclic_off) 1552 cyclic_online(cp); 1553 1554 /* 1555 * If we failed, but managed to offline callouts on this CPU, 1556 * bring it back online. 1557 */ 1558 if (error && callout_off) 1559 callout_cpu_online(cp); 1560 1561 /* 1562 * If we failed, tell the PG subsystem that the CPU is back 1563 */ 1564 pg_cpupart_in(cp, pp); 1565 1566 /* 1567 * If we failed, we need to notify everyone that this CPU is back on. 1568 */ 1569 if (error != 0) { 1570 CPU_NEW_GENERATION(cp); 1571 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1572 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1573 } 1574 1575 return (error); 1576 } 1577 1578 /* 1579 * Mark the indicated CPU as faulted, taking it offline. 1580 */ 1581 int 1582 cpu_faulted(cpu_t *cp, int flags) 1583 { 1584 int error = 0; 1585 1586 ASSERT(MUTEX_HELD(&cpu_lock)); 1587 ASSERT(!cpu_is_poweredoff(cp)); 1588 1589 if (cpu_is_offline(cp)) { 1590 cp->cpu_flags &= ~CPU_SPARE; 1591 cp->cpu_flags |= CPU_FAULTED; 1592 mp_cpu_faulted_enter(cp); 1593 cpu_set_state(cp); 1594 return (0); 1595 } 1596 1597 if ((error = cpu_offline(cp, flags)) == 0) { 1598 cp->cpu_flags |= CPU_FAULTED; 1599 mp_cpu_faulted_enter(cp); 1600 cpu_set_state(cp); 1601 } 1602 1603 return (error); 1604 } 1605 1606 /* 1607 * Mark the indicated CPU as a spare, taking it offline. 1608 */ 1609 int 1610 cpu_spare(cpu_t *cp, int flags) 1611 { 1612 int error = 0; 1613 1614 ASSERT(MUTEX_HELD(&cpu_lock)); 1615 ASSERT(!cpu_is_poweredoff(cp)); 1616 1617 if (cpu_is_offline(cp)) { 1618 if (cp->cpu_flags & CPU_FAULTED) { 1619 cp->cpu_flags &= ~CPU_FAULTED; 1620 mp_cpu_faulted_exit(cp); 1621 } 1622 cp->cpu_flags |= CPU_SPARE; 1623 cpu_set_state(cp); 1624 return (0); 1625 } 1626 1627 if ((error = cpu_offline(cp, flags)) == 0) { 1628 cp->cpu_flags |= CPU_SPARE; 1629 cpu_set_state(cp); 1630 } 1631 1632 return (error); 1633 } 1634 1635 /* 1636 * Take the indicated CPU from poweroff to offline. 1637 */ 1638 int 1639 cpu_poweron(cpu_t *cp) 1640 { 1641 int error = ENOTSUP; 1642 1643 ASSERT(MUTEX_HELD(&cpu_lock)); 1644 ASSERT(cpu_is_poweredoff(cp)); 1645 1646 error = mp_cpu_poweron(cp); /* arch-dep hook */ 1647 if (error == 0) 1648 cpu_set_state(cp); 1649 1650 return (error); 1651 } 1652 1653 /* 1654 * Take the indicated CPU from any inactive state to powered off. 1655 */ 1656 int 1657 cpu_poweroff(cpu_t *cp) 1658 { 1659 int error = ENOTSUP; 1660 1661 ASSERT(MUTEX_HELD(&cpu_lock)); 1662 ASSERT(cpu_is_offline(cp)); 1663 1664 if (!(cp->cpu_flags & CPU_QUIESCED)) 1665 return (EBUSY); /* not completely idle */ 1666 1667 error = mp_cpu_poweroff(cp); /* arch-dep hook */ 1668 if (error == 0) 1669 cpu_set_state(cp); 1670 1671 return (error); 1672 } 1673 1674 /* 1675 * Initialize the Sequential CPU id lookup table 1676 */ 1677 void 1678 cpu_seq_tbl_init() 1679 { 1680 cpu_t **tbl; 1681 1682 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP); 1683 tbl[0] = CPU; 1684 1685 cpu_seq = tbl; 1686 } 1687 1688 /* 1689 * Initialize the CPU lists for the first CPU. 1690 */ 1691 void 1692 cpu_list_init(cpu_t *cp) 1693 { 1694 cp->cpu_next = cp; 1695 cp->cpu_prev = cp; 1696 cpu_list = cp; 1697 clock_cpu_list = cp; 1698 1699 cp->cpu_next_onln = cp; 1700 cp->cpu_prev_onln = cp; 1701 cpu_active = cp; 1702 1703 cp->cpu_seqid = 0; 1704 CPUSET_ADD(cpu_seqid_inuse, 0); 1705 1706 /* 1707 * Bootstrap cpu_seq using cpu_list 1708 * The cpu_seq[] table will be dynamically allocated 1709 * when kmem later becomes available (but before going MP) 1710 */ 1711 cpu_seq = &cpu_list; 1712 1713 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1714 cp_default.cp_cpulist = cp; 1715 cp_default.cp_ncpus = 1; 1716 cp->cpu_next_part = cp; 1717 cp->cpu_prev_part = cp; 1718 cp->cpu_part = &cp_default; 1719 1720 CPUSET_ADD(cpu_available, cp->cpu_id); 1721 } 1722 1723 /* 1724 * Insert a CPU into the list of available CPUs. 1725 */ 1726 void 1727 cpu_add_unit(cpu_t *cp) 1728 { 1729 int seqid; 1730 1731 ASSERT(MUTEX_HELD(&cpu_lock)); 1732 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1733 1734 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0); 1735 1736 /* 1737 * Note: most users of the cpu_list will grab the 1738 * cpu_lock to insure that it isn't modified. However, 1739 * certain users can't or won't do that. To allow this 1740 * we pause the other cpus. Users who walk the list 1741 * without cpu_lock, must disable kernel preemption 1742 * to insure that the list isn't modified underneath 1743 * them. Also, any cached pointers to cpu structures 1744 * must be revalidated by checking to see if the 1745 * cpu_next pointer points to itself. This check must 1746 * be done with the cpu_lock held or kernel preemption 1747 * disabled. This check relies upon the fact that 1748 * old cpu structures are not free'ed or cleared after 1749 * then are removed from the cpu_list. 1750 * 1751 * Note that the clock code walks the cpu list dereferencing 1752 * the cpu_part pointer, so we need to initialize it before 1753 * adding the cpu to the list. 1754 */ 1755 cp->cpu_part = &cp_default; 1756 (void) pause_cpus(NULL); 1757 cp->cpu_next = cpu_list; 1758 cp->cpu_prev = cpu_list->cpu_prev; 1759 cpu_list->cpu_prev->cpu_next = cp; 1760 cpu_list->cpu_prev = cp; 1761 start_cpus(); 1762 1763 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++) 1764 continue; 1765 CPUSET_ADD(cpu_seqid_inuse, seqid); 1766 cp->cpu_seqid = seqid; 1767 1768 if (seqid > max_cpu_seqid_ever) 1769 max_cpu_seqid_ever = seqid; 1770 1771 ASSERT(ncpus < max_ncpus); 1772 ncpus++; 1773 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1774 cpu[cp->cpu_id] = cp; 1775 CPUSET_ADD(cpu_available, cp->cpu_id); 1776 cpu_seq[cp->cpu_seqid] = cp; 1777 1778 /* 1779 * allocate a pause thread for this CPU. 1780 */ 1781 cpu_pause_alloc(cp); 1782 1783 /* 1784 * So that new CPUs won't have NULL prev_onln and next_onln pointers, 1785 * link them into a list of just that CPU. 1786 * This is so that disp_lowpri_cpu will work for thread_create in 1787 * pause_cpus() when called from the startup thread in a new CPU. 1788 */ 1789 cp->cpu_next_onln = cp; 1790 cp->cpu_prev_onln = cp; 1791 cpu_info_kstat_create(cp); 1792 cp->cpu_next_part = cp; 1793 cp->cpu_prev_part = cp; 1794 1795 init_cpu_mstate(cp, CMS_SYSTEM); 1796 1797 pool_pset_mod = gethrtime(); 1798 } 1799 1800 /* 1801 * Do the opposite of cpu_add_unit(). 1802 */ 1803 void 1804 cpu_del_unit(int cpuid) 1805 { 1806 struct cpu *cp, *cpnext; 1807 1808 ASSERT(MUTEX_HELD(&cpu_lock)); 1809 cp = cpu[cpuid]; 1810 ASSERT(cp != NULL); 1811 1812 ASSERT(cp->cpu_next_onln == cp); 1813 ASSERT(cp->cpu_prev_onln == cp); 1814 ASSERT(cp->cpu_next_part == cp); 1815 ASSERT(cp->cpu_prev_part == cp); 1816 1817 /* 1818 * Tear down the CPU's physical ID cache, and update any 1819 * processor groups 1820 */ 1821 pg_cpu_fini(cp, NULL); 1822 pghw_physid_destroy(cp); 1823 1824 /* 1825 * Destroy kstat stuff. 1826 */ 1827 cpu_info_kstat_destroy(cp); 1828 term_cpu_mstate(cp); 1829 /* 1830 * Free up pause thread. 1831 */ 1832 cpu_pause_free(cp); 1833 CPUSET_DEL(cpu_available, cp->cpu_id); 1834 cpu[cp->cpu_id] = NULL; 1835 cpu_seq[cp->cpu_seqid] = NULL; 1836 1837 /* 1838 * The clock thread and mutex_vector_enter cannot hold the 1839 * cpu_lock while traversing the cpu list, therefore we pause 1840 * all other threads by pausing the other cpus. These, and any 1841 * other routines holding cpu pointers while possibly sleeping 1842 * must be sure to call kpreempt_disable before processing the 1843 * list and be sure to check that the cpu has not been deleted 1844 * after any sleeps (check cp->cpu_next != NULL). We guarantee 1845 * to keep the deleted cpu structure around. 1846 * 1847 * Note that this MUST be done AFTER cpu_available 1848 * has been updated so that we don't waste time 1849 * trying to pause the cpu we're trying to delete. 1850 */ 1851 (void) pause_cpus(NULL); 1852 1853 cpnext = cp->cpu_next; 1854 cp->cpu_prev->cpu_next = cp->cpu_next; 1855 cp->cpu_next->cpu_prev = cp->cpu_prev; 1856 if (cp == cpu_list) 1857 cpu_list = cpnext; 1858 1859 /* 1860 * Signals that the cpu has been deleted (see above). 1861 */ 1862 cp->cpu_next = NULL; 1863 cp->cpu_prev = NULL; 1864 1865 start_cpus(); 1866 1867 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid); 1868 ncpus--; 1869 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0); 1870 1871 pool_pset_mod = gethrtime(); 1872 } 1873 1874 /* 1875 * Add a CPU to the list of active CPUs. 1876 * This routine must not get any locks, because other CPUs are paused. 1877 */ 1878 static void 1879 cpu_add_active_internal(cpu_t *cp) 1880 { 1881 cpupart_t *pp = cp->cpu_part; 1882 1883 ASSERT(MUTEX_HELD(&cpu_lock)); 1884 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1885 1886 ncpus_online++; 1887 cpu_set_state(cp); 1888 cp->cpu_next_onln = cpu_active; 1889 cp->cpu_prev_onln = cpu_active->cpu_prev_onln; 1890 cpu_active->cpu_prev_onln->cpu_next_onln = cp; 1891 cpu_active->cpu_prev_onln = cp; 1892 1893 if (pp->cp_cpulist) { 1894 cp->cpu_next_part = pp->cp_cpulist; 1895 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part; 1896 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp; 1897 pp->cp_cpulist->cpu_prev_part = cp; 1898 } else { 1899 ASSERT(pp->cp_ncpus == 0); 1900 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp; 1901 } 1902 pp->cp_ncpus++; 1903 if (pp->cp_ncpus == 1) { 1904 cp_numparts_nonempty++; 1905 ASSERT(cp_numparts_nonempty != 0); 1906 } 1907 1908 pg_cpu_active(cp); 1909 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0); 1910 1911 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg)); 1912 } 1913 1914 /* 1915 * Add a CPU to the list of active CPUs. 1916 * This is called from machine-dependent layers when a new CPU is started. 1917 */ 1918 void 1919 cpu_add_active(cpu_t *cp) 1920 { 1921 pg_cpupart_in(cp, cp->cpu_part); 1922 1923 pause_cpus(NULL); 1924 cpu_add_active_internal(cp); 1925 start_cpus(); 1926 1927 cpu_stats_kstat_create(cp); 1928 cpu_create_intrstat(cp); 1929 lgrp_kstat_create(cp); 1930 cpu_state_change_notify(cp->cpu_id, CPU_INIT); 1931 } 1932 1933 1934 /* 1935 * Remove a CPU from the list of active CPUs. 1936 * This routine must not get any locks, because other CPUs are paused. 1937 */ 1938 /* ARGSUSED */ 1939 static void 1940 cpu_remove_active(cpu_t *cp) 1941 { 1942 cpupart_t *pp = cp->cpu_part; 1943 1944 ASSERT(MUTEX_HELD(&cpu_lock)); 1945 ASSERT(cp->cpu_next_onln != cp); /* not the last one */ 1946 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */ 1947 1948 pg_cpu_inactive(cp); 1949 1950 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0); 1951 1952 if (cp == clock_cpu_list) 1953 clock_cpu_list = cp->cpu_next_onln; 1954 1955 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln; 1956 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln; 1957 if (cpu_active == cp) { 1958 cpu_active = cp->cpu_next_onln; 1959 } 1960 cp->cpu_next_onln = cp; 1961 cp->cpu_prev_onln = cp; 1962 1963 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; 1964 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; 1965 if (pp->cp_cpulist == cp) { 1966 pp->cp_cpulist = cp->cpu_next_part; 1967 ASSERT(pp->cp_cpulist != cp); 1968 } 1969 cp->cpu_next_part = cp; 1970 cp->cpu_prev_part = cp; 1971 pp->cp_ncpus--; 1972 if (pp->cp_ncpus == 0) { 1973 cp_numparts_nonempty--; 1974 ASSERT(cp_numparts_nonempty != 0); 1975 } 1976 } 1977 1978 /* 1979 * Routine used to setup a newly inserted CPU in preparation for starting 1980 * it running code. 1981 */ 1982 int 1983 cpu_configure(int cpuid) 1984 { 1985 int retval = 0; 1986 1987 ASSERT(MUTEX_HELD(&cpu_lock)); 1988 1989 /* 1990 * Some structures are statically allocated based upon 1991 * the maximum number of cpus the system supports. Do not 1992 * try to add anything beyond this limit. 1993 */ 1994 if (cpuid < 0 || cpuid >= NCPU) { 1995 return (EINVAL); 1996 } 1997 1998 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) { 1999 return (EALREADY); 2000 } 2001 2002 if ((retval = mp_cpu_configure(cpuid)) != 0) { 2003 return (retval); 2004 } 2005 2006 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF; 2007 cpu_set_state(cpu[cpuid]); 2008 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG); 2009 if (retval != 0) 2010 (void) mp_cpu_unconfigure(cpuid); 2011 2012 return (retval); 2013 } 2014 2015 /* 2016 * Routine used to cleanup a CPU that has been powered off. This will 2017 * destroy all per-cpu information related to this cpu. 2018 */ 2019 int 2020 cpu_unconfigure(int cpuid) 2021 { 2022 int error; 2023 2024 ASSERT(MUTEX_HELD(&cpu_lock)); 2025 2026 if (cpu[cpuid] == NULL) { 2027 return (ENODEV); 2028 } 2029 2030 if (cpu[cpuid]->cpu_flags == 0) { 2031 return (EALREADY); 2032 } 2033 2034 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) { 2035 return (EBUSY); 2036 } 2037 2038 if (cpu[cpuid]->cpu_props != NULL) { 2039 (void) nvlist_free(cpu[cpuid]->cpu_props); 2040 cpu[cpuid]->cpu_props = NULL; 2041 } 2042 2043 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG); 2044 2045 if (error != 0) 2046 return (error); 2047 2048 return (mp_cpu_unconfigure(cpuid)); 2049 } 2050 2051 /* 2052 * Routines for registering and de-registering cpu_setup callback functions. 2053 * 2054 * Caller's context 2055 * These routines must not be called from a driver's attach(9E) or 2056 * detach(9E) entry point. 2057 * 2058 * NOTE: CPU callbacks should not block. They are called with cpu_lock held. 2059 */ 2060 2061 /* 2062 * Ideally, these would be dynamically allocated and put into a linked 2063 * list; however that is not feasible because the registration routine 2064 * has to be available before the kmem allocator is working (in fact, 2065 * it is called by the kmem allocator init code). In any case, there 2066 * are quite a few extra entries for future users. 2067 */ 2068 #define NCPU_SETUPS 20 2069 2070 struct cpu_setup { 2071 cpu_setup_func_t *func; 2072 void *arg; 2073 } cpu_setups[NCPU_SETUPS]; 2074 2075 void 2076 register_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2077 { 2078 int i; 2079 2080 ASSERT(MUTEX_HELD(&cpu_lock)); 2081 2082 for (i = 0; i < NCPU_SETUPS; i++) 2083 if (cpu_setups[i].func == NULL) 2084 break; 2085 if (i >= NCPU_SETUPS) 2086 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries"); 2087 2088 cpu_setups[i].func = func; 2089 cpu_setups[i].arg = arg; 2090 } 2091 2092 void 2093 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2094 { 2095 int i; 2096 2097 ASSERT(MUTEX_HELD(&cpu_lock)); 2098 2099 for (i = 0; i < NCPU_SETUPS; i++) 2100 if ((cpu_setups[i].func == func) && 2101 (cpu_setups[i].arg == arg)) 2102 break; 2103 if (i >= NCPU_SETUPS) 2104 cmn_err(CE_PANIC, "Could not find cpu_setup callback to " 2105 "deregister"); 2106 2107 cpu_setups[i].func = NULL; 2108 cpu_setups[i].arg = 0; 2109 } 2110 2111 /* 2112 * Call any state change hooks for this CPU, ignore any errors. 2113 */ 2114 void 2115 cpu_state_change_notify(int id, cpu_setup_t what) 2116 { 2117 int i; 2118 2119 ASSERT(MUTEX_HELD(&cpu_lock)); 2120 2121 for (i = 0; i < NCPU_SETUPS; i++) { 2122 if (cpu_setups[i].func != NULL) { 2123 cpu_setups[i].func(what, id, cpu_setups[i].arg); 2124 } 2125 } 2126 } 2127 2128 /* 2129 * Call any state change hooks for this CPU, undo it if error found. 2130 */ 2131 static int 2132 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo) 2133 { 2134 int i; 2135 int retval = 0; 2136 2137 ASSERT(MUTEX_HELD(&cpu_lock)); 2138 2139 for (i = 0; i < NCPU_SETUPS; i++) { 2140 if (cpu_setups[i].func != NULL) { 2141 retval = cpu_setups[i].func(what, id, 2142 cpu_setups[i].arg); 2143 if (retval) { 2144 for (i--; i >= 0; i--) { 2145 if (cpu_setups[i].func != NULL) 2146 cpu_setups[i].func(undo, 2147 id, cpu_setups[i].arg); 2148 } 2149 break; 2150 } 2151 } 2152 } 2153 return (retval); 2154 } 2155 2156 /* 2157 * Export information about this CPU via the kstat mechanism. 2158 */ 2159 static struct { 2160 kstat_named_t ci_state; 2161 kstat_named_t ci_state_begin; 2162 kstat_named_t ci_cpu_type; 2163 kstat_named_t ci_fpu_type; 2164 kstat_named_t ci_clock_MHz; 2165 kstat_named_t ci_chip_id; 2166 kstat_named_t ci_implementation; 2167 kstat_named_t ci_brandstr; 2168 kstat_named_t ci_core_id; 2169 kstat_named_t ci_curr_clock_Hz; 2170 kstat_named_t ci_supp_freq_Hz; 2171 kstat_named_t ci_pg_id; 2172 #if defined(__sparcv9) 2173 kstat_named_t ci_device_ID; 2174 kstat_named_t ci_cpu_fru; 2175 #endif 2176 #if defined(__x86) 2177 kstat_named_t ci_vendorstr; 2178 kstat_named_t ci_family; 2179 kstat_named_t ci_model; 2180 kstat_named_t ci_step; 2181 kstat_named_t ci_clogid; 2182 kstat_named_t ci_pkg_core_id; 2183 kstat_named_t ci_ncpuperchip; 2184 kstat_named_t ci_ncoreperchip; 2185 kstat_named_t ci_max_cstates; 2186 kstat_named_t ci_curr_cstate; 2187 kstat_named_t ci_cacheid; 2188 kstat_named_t ci_sktstr; 2189 #endif 2190 } cpu_info_template = { 2191 { "state", KSTAT_DATA_CHAR }, 2192 { "state_begin", KSTAT_DATA_LONG }, 2193 { "cpu_type", KSTAT_DATA_CHAR }, 2194 { "fpu_type", KSTAT_DATA_CHAR }, 2195 { "clock_MHz", KSTAT_DATA_LONG }, 2196 { "chip_id", KSTAT_DATA_LONG }, 2197 { "implementation", KSTAT_DATA_STRING }, 2198 { "brand", KSTAT_DATA_STRING }, 2199 { "core_id", KSTAT_DATA_LONG }, 2200 { "current_clock_Hz", KSTAT_DATA_UINT64 }, 2201 { "supported_frequencies_Hz", KSTAT_DATA_STRING }, 2202 { "pg_id", KSTAT_DATA_LONG }, 2203 #if defined(__sparcv9) 2204 { "device_ID", KSTAT_DATA_UINT64 }, 2205 { "cpu_fru", KSTAT_DATA_STRING }, 2206 #endif 2207 #if defined(__x86) 2208 { "vendor_id", KSTAT_DATA_STRING }, 2209 { "family", KSTAT_DATA_INT32 }, 2210 { "model", KSTAT_DATA_INT32 }, 2211 { "stepping", KSTAT_DATA_INT32 }, 2212 { "clog_id", KSTAT_DATA_INT32 }, 2213 { "pkg_core_id", KSTAT_DATA_LONG }, 2214 { "ncpu_per_chip", KSTAT_DATA_INT32 }, 2215 { "ncore_per_chip", KSTAT_DATA_INT32 }, 2216 { "supported_max_cstates", KSTAT_DATA_INT32 }, 2217 { "current_cstate", KSTAT_DATA_INT32 }, 2218 { "cache_id", KSTAT_DATA_INT32 }, 2219 { "socket_type", KSTAT_DATA_STRING }, 2220 #endif 2221 }; 2222 2223 static kmutex_t cpu_info_template_lock; 2224 2225 static int 2226 cpu_info_kstat_update(kstat_t *ksp, int rw) 2227 { 2228 cpu_t *cp = ksp->ks_private; 2229 const char *pi_state; 2230 2231 if (rw == KSTAT_WRITE) 2232 return (EACCES); 2233 2234 #if defined(__x86) 2235 /* Is the cpu still initialising itself? */ 2236 if (cpuid_checkpass(cp, 1) == 0) 2237 return (ENXIO); 2238 #endif 2239 switch (cp->cpu_type_info.pi_state) { 2240 case P_ONLINE: 2241 pi_state = PS_ONLINE; 2242 break; 2243 case P_POWEROFF: 2244 pi_state = PS_POWEROFF; 2245 break; 2246 case P_NOINTR: 2247 pi_state = PS_NOINTR; 2248 break; 2249 case P_FAULTED: 2250 pi_state = PS_FAULTED; 2251 break; 2252 case P_SPARE: 2253 pi_state = PS_SPARE; 2254 break; 2255 case P_OFFLINE: 2256 pi_state = PS_OFFLINE; 2257 break; 2258 default: 2259 pi_state = "unknown"; 2260 } 2261 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state); 2262 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin; 2263 (void) strncpy(cpu_info_template.ci_cpu_type.value.c, 2264 cp->cpu_type_info.pi_processor_type, 15); 2265 (void) strncpy(cpu_info_template.ci_fpu_type.value.c, 2266 cp->cpu_type_info.pi_fputypes, 15); 2267 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock; 2268 cpu_info_template.ci_chip_id.value.l = 2269 pg_plat_hw_instance_id(cp, PGHW_CHIP); 2270 kstat_named_setstr(&cpu_info_template.ci_implementation, 2271 cp->cpu_idstr); 2272 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr); 2273 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp); 2274 cpu_info_template.ci_curr_clock_Hz.value.ui64 = 2275 cp->cpu_curr_clock; 2276 cpu_info_template.ci_pg_id.value.l = 2277 cp->cpu_pg && cp->cpu_pg->cmt_lineage ? 2278 cp->cpu_pg->cmt_lineage->pg_id : -1; 2279 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz, 2280 cp->cpu_supp_freqs); 2281 #if defined(__sparcv9) 2282 cpu_info_template.ci_device_ID.value.ui64 = 2283 cpunodes[cp->cpu_id].device_id; 2284 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp)); 2285 #endif 2286 #if defined(__x86) 2287 kstat_named_setstr(&cpu_info_template.ci_vendorstr, 2288 cpuid_getvendorstr(cp)); 2289 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp); 2290 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp); 2291 cpu_info_template.ci_step.value.l = cpuid_getstep(cp); 2292 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp); 2293 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp); 2294 cpu_info_template.ci_ncoreperchip.value.l = 2295 cpuid_get_ncore_per_chip(cp); 2296 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp); 2297 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates; 2298 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp); 2299 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp); 2300 kstat_named_setstr(&cpu_info_template.ci_sktstr, 2301 cpuid_getsocketstr(cp)); 2302 #endif 2303 2304 return (0); 2305 } 2306 2307 static void 2308 cpu_info_kstat_create(cpu_t *cp) 2309 { 2310 zoneid_t zoneid; 2311 2312 ASSERT(MUTEX_HELD(&cpu_lock)); 2313 2314 if (pool_pset_enabled()) 2315 zoneid = GLOBAL_ZONEID; 2316 else 2317 zoneid = ALL_ZONES; 2318 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id, 2319 NULL, "misc", KSTAT_TYPE_NAMED, 2320 sizeof (cpu_info_template) / sizeof (kstat_named_t), 2321 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) { 2322 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN; 2323 #if defined(__sparcv9) 2324 cp->cpu_info_kstat->ks_data_size += 2325 strlen(cpu_fru_fmri(cp)) + 1; 2326 #endif 2327 #if defined(__x86) 2328 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN; 2329 #endif 2330 if (cp->cpu_supp_freqs != NULL) 2331 cp->cpu_info_kstat->ks_data_size += 2332 strlen(cp->cpu_supp_freqs) + 1; 2333 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock; 2334 cp->cpu_info_kstat->ks_data = &cpu_info_template; 2335 cp->cpu_info_kstat->ks_private = cp; 2336 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update; 2337 kstat_install(cp->cpu_info_kstat); 2338 } 2339 } 2340 2341 static void 2342 cpu_info_kstat_destroy(cpu_t *cp) 2343 { 2344 ASSERT(MUTEX_HELD(&cpu_lock)); 2345 2346 kstat_delete(cp->cpu_info_kstat); 2347 cp->cpu_info_kstat = NULL; 2348 } 2349 2350 /* 2351 * Create and install kstats for the boot CPU. 2352 */ 2353 void 2354 cpu_kstat_init(cpu_t *cp) 2355 { 2356 mutex_enter(&cpu_lock); 2357 cpu_info_kstat_create(cp); 2358 cpu_stats_kstat_create(cp); 2359 cpu_create_intrstat(cp); 2360 cpu_set_state(cp); 2361 mutex_exit(&cpu_lock); 2362 } 2363 2364 /* 2365 * Make visible to the zone that subset of the cpu information that would be 2366 * initialized when a cpu is configured (but still offline). 2367 */ 2368 void 2369 cpu_visibility_configure(cpu_t *cp, zone_t *zone) 2370 { 2371 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2372 2373 ASSERT(MUTEX_HELD(&cpu_lock)); 2374 ASSERT(pool_pset_enabled()); 2375 ASSERT(cp != NULL); 2376 2377 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2378 zone->zone_ncpus++; 2379 ASSERT(zone->zone_ncpus <= ncpus); 2380 } 2381 if (cp->cpu_info_kstat != NULL) 2382 kstat_zone_add(cp->cpu_info_kstat, zoneid); 2383 } 2384 2385 /* 2386 * Make visible to the zone that subset of the cpu information that would be 2387 * initialized when a previously configured cpu is onlined. 2388 */ 2389 void 2390 cpu_visibility_online(cpu_t *cp, zone_t *zone) 2391 { 2392 kstat_t *ksp; 2393 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2394 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2395 processorid_t cpun; 2396 2397 ASSERT(MUTEX_HELD(&cpu_lock)); 2398 ASSERT(pool_pset_enabled()); 2399 ASSERT(cp != NULL); 2400 ASSERT(cpu_is_active(cp)); 2401 2402 cpun = cp->cpu_id; 2403 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2404 zone->zone_ncpus_online++; 2405 ASSERT(zone->zone_ncpus_online <= ncpus_online); 2406 } 2407 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2408 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2409 != NULL) { 2410 kstat_zone_add(ksp, zoneid); 2411 kstat_rele(ksp); 2412 } 2413 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2414 kstat_zone_add(ksp, zoneid); 2415 kstat_rele(ksp); 2416 } 2417 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2418 kstat_zone_add(ksp, zoneid); 2419 kstat_rele(ksp); 2420 } 2421 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2422 NULL) { 2423 kstat_zone_add(ksp, zoneid); 2424 kstat_rele(ksp); 2425 } 2426 } 2427 2428 /* 2429 * Update relevant kstats such that cpu is now visible to processes 2430 * executing in specified zone. 2431 */ 2432 void 2433 cpu_visibility_add(cpu_t *cp, zone_t *zone) 2434 { 2435 cpu_visibility_configure(cp, zone); 2436 if (cpu_is_active(cp)) 2437 cpu_visibility_online(cp, zone); 2438 } 2439 2440 /* 2441 * Make invisible to the zone that subset of the cpu information that would be 2442 * torn down when a previously offlined cpu is unconfigured. 2443 */ 2444 void 2445 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone) 2446 { 2447 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2448 2449 ASSERT(MUTEX_HELD(&cpu_lock)); 2450 ASSERT(pool_pset_enabled()); 2451 ASSERT(cp != NULL); 2452 2453 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2454 ASSERT(zone->zone_ncpus != 0); 2455 zone->zone_ncpus--; 2456 } 2457 if (cp->cpu_info_kstat) 2458 kstat_zone_remove(cp->cpu_info_kstat, zoneid); 2459 } 2460 2461 /* 2462 * Make invisible to the zone that subset of the cpu information that would be 2463 * torn down when a cpu is offlined (but still configured). 2464 */ 2465 void 2466 cpu_visibility_offline(cpu_t *cp, zone_t *zone) 2467 { 2468 kstat_t *ksp; 2469 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2470 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2471 processorid_t cpun; 2472 2473 ASSERT(MUTEX_HELD(&cpu_lock)); 2474 ASSERT(pool_pset_enabled()); 2475 ASSERT(cp != NULL); 2476 ASSERT(cpu_is_active(cp)); 2477 2478 cpun = cp->cpu_id; 2479 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2480 ASSERT(zone->zone_ncpus_online != 0); 2481 zone->zone_ncpus_online--; 2482 } 2483 2484 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2485 NULL) { 2486 kstat_zone_remove(ksp, zoneid); 2487 kstat_rele(ksp); 2488 } 2489 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2490 kstat_zone_remove(ksp, zoneid); 2491 kstat_rele(ksp); 2492 } 2493 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2494 kstat_zone_remove(ksp, zoneid); 2495 kstat_rele(ksp); 2496 } 2497 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2498 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2499 != NULL) { 2500 kstat_zone_remove(ksp, zoneid); 2501 kstat_rele(ksp); 2502 } 2503 } 2504 2505 /* 2506 * Update relevant kstats such that cpu is no longer visible to processes 2507 * executing in specified zone. 2508 */ 2509 void 2510 cpu_visibility_remove(cpu_t *cp, zone_t *zone) 2511 { 2512 if (cpu_is_active(cp)) 2513 cpu_visibility_offline(cp, zone); 2514 cpu_visibility_unconfigure(cp, zone); 2515 } 2516 2517 /* 2518 * Bind a thread to a CPU as requested. 2519 */ 2520 int 2521 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, 2522 int *error) 2523 { 2524 processorid_t binding; 2525 cpu_t *cp = NULL; 2526 2527 ASSERT(MUTEX_HELD(&cpu_lock)); 2528 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 2529 2530 thread_lock(tp); 2531 2532 /* 2533 * Record old binding, but change the obind, which was initialized 2534 * to PBIND_NONE, only if this thread has a binding. This avoids 2535 * reporting PBIND_NONE for a process when some LWPs are bound. 2536 */ 2537 binding = tp->t_bind_cpu; 2538 if (binding != PBIND_NONE) 2539 *obind = binding; /* record old binding */ 2540 2541 switch (bind) { 2542 case PBIND_QUERY: 2543 /* Just return the old binding */ 2544 thread_unlock(tp); 2545 return (0); 2546 2547 case PBIND_QUERY_TYPE: 2548 /* Return the binding type */ 2549 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD; 2550 thread_unlock(tp); 2551 return (0); 2552 2553 case PBIND_SOFT: 2554 /* 2555 * Set soft binding for this thread and return the actual 2556 * binding 2557 */ 2558 TB_CPU_SOFT_SET(tp); 2559 thread_unlock(tp); 2560 return (0); 2561 2562 case PBIND_HARD: 2563 /* 2564 * Set hard binding for this thread and return the actual 2565 * binding 2566 */ 2567 TB_CPU_HARD_SET(tp); 2568 thread_unlock(tp); 2569 return (0); 2570 2571 default: 2572 break; 2573 } 2574 2575 /* 2576 * If this thread/LWP cannot be bound because of permission 2577 * problems, just note that and return success so that the 2578 * other threads/LWPs will be bound. This is the way 2579 * processor_bind() is defined to work. 2580 * 2581 * Binding will get EPERM if the thread is of system class 2582 * or hasprocperm() fails. 2583 */ 2584 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) { 2585 *error = EPERM; 2586 thread_unlock(tp); 2587 return (0); 2588 } 2589 2590 binding = bind; 2591 if (binding != PBIND_NONE) { 2592 cp = cpu_get((processorid_t)binding); 2593 /* 2594 * Make sure binding is valid and is in right partition. 2595 */ 2596 if (cp == NULL || tp->t_cpupart != cp->cpu_part) { 2597 *error = EINVAL; 2598 thread_unlock(tp); 2599 return (0); 2600 } 2601 } 2602 tp->t_bind_cpu = binding; /* set new binding */ 2603 2604 /* 2605 * If there is no system-set reason for affinity, set 2606 * the t_bound_cpu field to reflect the binding. 2607 */ 2608 if (tp->t_affinitycnt == 0) { 2609 if (binding == PBIND_NONE) { 2610 /* 2611 * We may need to adjust disp_max_unbound_pri 2612 * since we're becoming unbound. 2613 */ 2614 disp_adjust_unbound_pri(tp); 2615 2616 tp->t_bound_cpu = NULL; /* set new binding */ 2617 2618 /* 2619 * Move thread to lgroup with strongest affinity 2620 * after unbinding 2621 */ 2622 if (tp->t_lgrp_affinity) 2623 lgrp_move_thread(tp, 2624 lgrp_choose(tp, tp->t_cpupart), 1); 2625 2626 if (tp->t_state == TS_ONPROC && 2627 tp->t_cpu->cpu_part != tp->t_cpupart) 2628 cpu_surrender(tp); 2629 } else { 2630 lpl_t *lpl; 2631 2632 tp->t_bound_cpu = cp; 2633 ASSERT(cp->cpu_lpl != NULL); 2634 2635 /* 2636 * Set home to lgroup with most affinity containing CPU 2637 * that thread is being bound or minimum bounding 2638 * lgroup if no affinities set 2639 */ 2640 if (tp->t_lgrp_affinity) 2641 lpl = lgrp_affinity_best(tp, tp->t_cpupart, 2642 LGRP_NONE, B_FALSE); 2643 else 2644 lpl = cp->cpu_lpl; 2645 2646 if (tp->t_lpl != lpl) { 2647 /* can't grab cpu_lock */ 2648 lgrp_move_thread(tp, lpl, 1); 2649 } 2650 2651 /* 2652 * Make the thread switch to the bound CPU. 2653 * If the thread is runnable, we need to 2654 * requeue it even if t_cpu is already set 2655 * to the right CPU, since it may be on a 2656 * kpreempt queue and need to move to a local 2657 * queue. We could check t_disp_queue to 2658 * avoid unnecessary overhead if it's already 2659 * on the right queue, but since this isn't 2660 * a performance-critical operation it doesn't 2661 * seem worth the extra code and complexity. 2662 * 2663 * If the thread is weakbound to the cpu then it will 2664 * resist the new binding request until the weak 2665 * binding drops. The cpu_surrender or requeueing 2666 * below could be skipped in such cases (since it 2667 * will have no effect), but that would require 2668 * thread_allowmigrate to acquire thread_lock so 2669 * we'll take the very occasional hit here instead. 2670 */ 2671 if (tp->t_state == TS_ONPROC) { 2672 cpu_surrender(tp); 2673 } else if (tp->t_state == TS_RUN) { 2674 cpu_t *ocp = tp->t_cpu; 2675 2676 (void) dispdeq(tp); 2677 setbackdq(tp); 2678 /* 2679 * Either on the bound CPU's disp queue now, 2680 * or swapped out or on the swap queue. 2681 */ 2682 ASSERT(tp->t_disp_queue == cp->cpu_disp || 2683 tp->t_weakbound_cpu == ocp || 2684 (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) 2685 != TS_LOAD); 2686 } 2687 } 2688 } 2689 2690 /* 2691 * Our binding has changed; set TP_CHANGEBIND. 2692 */ 2693 tp->t_proc_flag |= TP_CHANGEBIND; 2694 aston(tp); 2695 2696 thread_unlock(tp); 2697 2698 return (0); 2699 } 2700 2701 #if CPUSET_WORDS > 1 2702 2703 /* 2704 * Functions for implementing cpuset operations when a cpuset is more 2705 * than one word. On platforms where a cpuset is a single word these 2706 * are implemented as macros in cpuvar.h. 2707 */ 2708 2709 void 2710 cpuset_all(cpuset_t *s) 2711 { 2712 int i; 2713 2714 for (i = 0; i < CPUSET_WORDS; i++) 2715 s->cpub[i] = ~0UL; 2716 } 2717 2718 void 2719 cpuset_all_but(cpuset_t *s, uint_t cpu) 2720 { 2721 cpuset_all(s); 2722 CPUSET_DEL(*s, cpu); 2723 } 2724 2725 void 2726 cpuset_only(cpuset_t *s, uint_t cpu) 2727 { 2728 CPUSET_ZERO(*s); 2729 CPUSET_ADD(*s, cpu); 2730 } 2731 2732 int 2733 cpuset_isnull(cpuset_t *s) 2734 { 2735 int i; 2736 2737 for (i = 0; i < CPUSET_WORDS; i++) 2738 if (s->cpub[i] != 0) 2739 return (0); 2740 return (1); 2741 } 2742 2743 int 2744 cpuset_cmp(cpuset_t *s1, cpuset_t *s2) 2745 { 2746 int i; 2747 2748 for (i = 0; i < CPUSET_WORDS; i++) 2749 if (s1->cpub[i] != s2->cpub[i]) 2750 return (0); 2751 return (1); 2752 } 2753 2754 uint_t 2755 cpuset_find(cpuset_t *s) 2756 { 2757 2758 uint_t i; 2759 uint_t cpu = (uint_t)-1; 2760 2761 /* 2762 * Find a cpu in the cpuset 2763 */ 2764 for (i = 0; i < CPUSET_WORDS; i++) { 2765 cpu = (uint_t)(lowbit(s->cpub[i]) - 1); 2766 if (cpu != (uint_t)-1) { 2767 cpu += i * BT_NBIPUL; 2768 break; 2769 } 2770 } 2771 return (cpu); 2772 } 2773 2774 void 2775 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid) 2776 { 2777 int i, j; 2778 uint_t bit; 2779 2780 /* 2781 * First, find the smallest cpu id in the set. 2782 */ 2783 for (i = 0; i < CPUSET_WORDS; i++) { 2784 if (s->cpub[i] != 0) { 2785 bit = (uint_t)(lowbit(s->cpub[i]) - 1); 2786 ASSERT(bit != (uint_t)-1); 2787 *smallestid = bit + (i * BT_NBIPUL); 2788 2789 /* 2790 * Now find the largest cpu id in 2791 * the set and return immediately. 2792 * Done in an inner loop to avoid 2793 * having to break out of the first 2794 * loop. 2795 */ 2796 for (j = CPUSET_WORDS - 1; j >= i; j--) { 2797 if (s->cpub[j] != 0) { 2798 bit = (uint_t)(highbit(s->cpub[j]) - 1); 2799 ASSERT(bit != (uint_t)-1); 2800 *largestid = bit + (j * BT_NBIPUL); 2801 ASSERT(*largestid >= *smallestid); 2802 return; 2803 } 2804 } 2805 2806 /* 2807 * If this code is reached, a 2808 * smallestid was found, but not a 2809 * largestid. The cpuset must have 2810 * been changed during the course 2811 * of this function call. 2812 */ 2813 ASSERT(0); 2814 } 2815 } 2816 *smallestid = *largestid = CPUSET_NOTINSET; 2817 } 2818 2819 #endif /* CPUSET_WORDS */ 2820 2821 /* 2822 * Unbind threads bound to specified CPU. 2823 * 2824 * If `unbind_all_threads' is true, unbind all user threads bound to a given 2825 * CPU. Otherwise unbind all soft-bound user threads. 2826 */ 2827 int 2828 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads) 2829 { 2830 processorid_t obind; 2831 kthread_t *tp; 2832 int ret = 0; 2833 proc_t *pp; 2834 int err, berr = 0; 2835 2836 ASSERT(MUTEX_HELD(&cpu_lock)); 2837 2838 mutex_enter(&pidlock); 2839 for (pp = practive; pp != NULL; pp = pp->p_next) { 2840 mutex_enter(&pp->p_lock); 2841 tp = pp->p_tlist; 2842 /* 2843 * Skip zombies, kernel processes, and processes in 2844 * other zones, if called from a non-global zone. 2845 */ 2846 if (tp == NULL || (pp->p_flag & SSYS) || 2847 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 2848 mutex_exit(&pp->p_lock); 2849 continue; 2850 } 2851 do { 2852 if (tp->t_bind_cpu != cpu) 2853 continue; 2854 /* 2855 * Skip threads with hard binding when 2856 * `unbind_all_threads' is not specified. 2857 */ 2858 if (!unbind_all_threads && TB_CPU_IS_HARD(tp)) 2859 continue; 2860 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr); 2861 if (ret == 0) 2862 ret = err; 2863 } while ((tp = tp->t_forw) != pp->p_tlist); 2864 mutex_exit(&pp->p_lock); 2865 } 2866 mutex_exit(&pidlock); 2867 if (ret == 0) 2868 ret = berr; 2869 return (ret); 2870 } 2871 2872 2873 /* 2874 * Destroy all remaining bound threads on a cpu. 2875 */ 2876 void 2877 cpu_destroy_bound_threads(cpu_t *cp) 2878 { 2879 extern id_t syscid; 2880 register kthread_id_t t, tlist, tnext; 2881 2882 /* 2883 * Destroy all remaining bound threads on the cpu. This 2884 * should include both the interrupt threads and the idle thread. 2885 * This requires some care, since we need to traverse the 2886 * thread list with the pidlock mutex locked, but thread_free 2887 * also locks the pidlock mutex. So, we collect the threads 2888 * we're going to reap in a list headed by "tlist", then we 2889 * unlock the pidlock mutex and traverse the tlist list, 2890 * doing thread_free's on the thread's. Simple, n'est pas? 2891 * Also, this depends on thread_free not mucking with the 2892 * t_next and t_prev links of the thread. 2893 */ 2894 2895 if ((t = curthread) != NULL) { 2896 2897 tlist = NULL; 2898 mutex_enter(&pidlock); 2899 do { 2900 tnext = t->t_next; 2901 if (t->t_bound_cpu == cp) { 2902 2903 /* 2904 * We've found a bound thread, carefully unlink 2905 * it out of the thread list, and add it to 2906 * our "tlist". We "know" we don't have to 2907 * worry about unlinking curthread (the thread 2908 * that is executing this code). 2909 */ 2910 t->t_next->t_prev = t->t_prev; 2911 t->t_prev->t_next = t->t_next; 2912 t->t_next = tlist; 2913 tlist = t; 2914 ASSERT(t->t_cid == syscid); 2915 /* wake up anyone blocked in thread_join */ 2916 cv_broadcast(&t->t_joincv); 2917 /* 2918 * t_lwp set by interrupt threads and not 2919 * cleared. 2920 */ 2921 t->t_lwp = NULL; 2922 /* 2923 * Pause and idle threads always have 2924 * t_state set to TS_ONPROC. 2925 */ 2926 t->t_state = TS_FREE; 2927 t->t_prev = NULL; /* Just in case */ 2928 } 2929 2930 } while ((t = tnext) != curthread); 2931 2932 mutex_exit(&pidlock); 2933 2934 mutex_sync(); 2935 for (t = tlist; t != NULL; t = tnext) { 2936 tnext = t->t_next; 2937 thread_free(t); 2938 } 2939 } 2940 } 2941 2942 /* 2943 * Update the cpu_supp_freqs of this cpu. This information is returned 2944 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then 2945 * maintain the kstat data size. 2946 */ 2947 void 2948 cpu_set_supp_freqs(cpu_t *cp, const char *freqs) 2949 { 2950 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */ 2951 const char *lfreqs = clkstr; 2952 boolean_t kstat_exists = B_FALSE; 2953 kstat_t *ksp; 2954 size_t len; 2955 2956 /* 2957 * A NULL pointer means we only support one speed. 2958 */ 2959 if (freqs == NULL) 2960 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64, 2961 cp->cpu_curr_clock); 2962 else 2963 lfreqs = freqs; 2964 2965 /* 2966 * Make sure the frequency doesn't change while a snapshot is 2967 * going on. Of course, we only need to worry about this if 2968 * the kstat exists. 2969 */ 2970 if ((ksp = cp->cpu_info_kstat) != NULL) { 2971 mutex_enter(ksp->ks_lock); 2972 kstat_exists = B_TRUE; 2973 } 2974 2975 /* 2976 * Free any previously allocated string and if the kstat 2977 * already exists, then update its data size. 2978 */ 2979 if (cp->cpu_supp_freqs != NULL) { 2980 len = strlen(cp->cpu_supp_freqs) + 1; 2981 kmem_free(cp->cpu_supp_freqs, len); 2982 if (kstat_exists) 2983 ksp->ks_data_size -= len; 2984 } 2985 2986 /* 2987 * Allocate the new string and set the pointer. 2988 */ 2989 len = strlen(lfreqs) + 1; 2990 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP); 2991 (void) strcpy(cp->cpu_supp_freqs, lfreqs); 2992 2993 /* 2994 * If the kstat already exists then update the data size and 2995 * free the lock. 2996 */ 2997 if (kstat_exists) { 2998 ksp->ks_data_size += len; 2999 mutex_exit(ksp->ks_lock); 3000 } 3001 } 3002 3003 /* 3004 * Indicate the current CPU's clock freqency (in Hz). 3005 * The calling context must be such that CPU references are safe. 3006 */ 3007 void 3008 cpu_set_curr_clock(uint64_t new_clk) 3009 { 3010 uint64_t old_clk; 3011 3012 old_clk = CPU->cpu_curr_clock; 3013 CPU->cpu_curr_clock = new_clk; 3014 3015 /* 3016 * The cpu-change-speed DTrace probe exports the frequency in Hz 3017 */ 3018 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id, 3019 uint64_t, old_clk, uint64_t, new_clk); 3020 } 3021 3022 /* 3023 * processor_info(2) and p_online(2) status support functions 3024 * The constants returned by the cpu_get_state() and cpu_get_state_str() are 3025 * for use in communicating processor state information to userland. Kernel 3026 * subsystems should only be using the cpu_flags value directly. Subsystems 3027 * modifying cpu_flags should record the state change via a call to the 3028 * cpu_set_state(). 3029 */ 3030 3031 /* 3032 * Update the pi_state of this CPU. This function provides the CPU status for 3033 * the information returned by processor_info(2). 3034 */ 3035 void 3036 cpu_set_state(cpu_t *cpu) 3037 { 3038 ASSERT(MUTEX_HELD(&cpu_lock)); 3039 cpu->cpu_type_info.pi_state = cpu_get_state(cpu); 3040 cpu->cpu_state_begin = gethrestime_sec(); 3041 pool_cpu_mod = gethrtime(); 3042 } 3043 3044 /* 3045 * Return offline/online/other status for the indicated CPU. Use only for 3046 * communication with user applications; cpu_flags provides the in-kernel 3047 * interface. 3048 */ 3049 int 3050 cpu_get_state(cpu_t *cpu) 3051 { 3052 ASSERT(MUTEX_HELD(&cpu_lock)); 3053 if (cpu->cpu_flags & CPU_POWEROFF) 3054 return (P_POWEROFF); 3055 else if (cpu->cpu_flags & CPU_FAULTED) 3056 return (P_FAULTED); 3057 else if (cpu->cpu_flags & CPU_SPARE) 3058 return (P_SPARE); 3059 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY) 3060 return (P_OFFLINE); 3061 else if (cpu->cpu_flags & CPU_ENABLE) 3062 return (P_ONLINE); 3063 else 3064 return (P_NOINTR); 3065 } 3066 3067 /* 3068 * Return processor_info(2) state as a string. 3069 */ 3070 const char * 3071 cpu_get_state_str(cpu_t *cpu) 3072 { 3073 const char *string; 3074 3075 switch (cpu_get_state(cpu)) { 3076 case P_ONLINE: 3077 string = PS_ONLINE; 3078 break; 3079 case P_POWEROFF: 3080 string = PS_POWEROFF; 3081 break; 3082 case P_NOINTR: 3083 string = PS_NOINTR; 3084 break; 3085 case P_SPARE: 3086 string = PS_SPARE; 3087 break; 3088 case P_FAULTED: 3089 string = PS_FAULTED; 3090 break; 3091 case P_OFFLINE: 3092 string = PS_OFFLINE; 3093 break; 3094 default: 3095 string = "unknown"; 3096 break; 3097 } 3098 return (string); 3099 } 3100 3101 /* 3102 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named 3103 * kstats, respectively. This is done when a CPU is initialized or placed 3104 * online via p_online(2). 3105 */ 3106 static void 3107 cpu_stats_kstat_create(cpu_t *cp) 3108 { 3109 int instance = cp->cpu_id; 3110 char *module = "cpu"; 3111 char *class = "misc"; 3112 kstat_t *ksp; 3113 zoneid_t zoneid; 3114 3115 ASSERT(MUTEX_HELD(&cpu_lock)); 3116 3117 if (pool_pset_enabled()) 3118 zoneid = GLOBAL_ZONEID; 3119 else 3120 zoneid = ALL_ZONES; 3121 /* 3122 * Create named kstats 3123 */ 3124 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \ 3125 ksp = kstat_create_zone(module, instance, (name), class, \ 3126 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \ 3127 zoneid); \ 3128 if (ksp != NULL) { \ 3129 ksp->ks_private = cp; \ 3130 ksp->ks_update = (update_func); \ 3131 kstat_install(ksp); \ 3132 } else \ 3133 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \ 3134 module, instance, (name)); 3135 3136 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template), 3137 cpu_sys_stats_ks_update); 3138 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template), 3139 cpu_vm_stats_ks_update); 3140 3141 /* 3142 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat. 3143 */ 3144 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL, 3145 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid); 3146 if (ksp != NULL) { 3147 ksp->ks_update = cpu_stat_ks_update; 3148 ksp->ks_private = cp; 3149 kstat_install(ksp); 3150 } 3151 } 3152 3153 static void 3154 cpu_stats_kstat_destroy(cpu_t *cp) 3155 { 3156 char ks_name[KSTAT_STRLEN]; 3157 3158 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id); 3159 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name); 3160 3161 kstat_delete_byname("cpu", cp->cpu_id, "sys"); 3162 kstat_delete_byname("cpu", cp->cpu_id, "vm"); 3163 } 3164 3165 static int 3166 cpu_sys_stats_ks_update(kstat_t *ksp, int rw) 3167 { 3168 cpu_t *cp = (cpu_t *)ksp->ks_private; 3169 struct cpu_sys_stats_ks_data *csskd; 3170 cpu_sys_stats_t *css; 3171 hrtime_t msnsecs[NCMSTATES]; 3172 int i; 3173 3174 if (rw == KSTAT_WRITE) 3175 return (EACCES); 3176 3177 csskd = ksp->ks_data; 3178 css = &cp->cpu_stats.sys; 3179 3180 /* 3181 * Read CPU mstate, but compare with the last values we 3182 * received to make sure that the returned kstats never 3183 * decrease. 3184 */ 3185 3186 get_cpu_mstate(cp, msnsecs); 3187 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE]) 3188 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64; 3189 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER]) 3190 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64; 3191 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM]) 3192 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64; 3193 3194 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data, 3195 sizeof (cpu_sys_stats_ks_data_template)); 3196 3197 csskd->cpu_ticks_wait.value.ui64 = 0; 3198 csskd->wait_ticks_io.value.ui64 = 0; 3199 3200 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE]; 3201 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER]; 3202 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM]; 3203 csskd->cpu_ticks_idle.value.ui64 = 3204 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64); 3205 csskd->cpu_ticks_user.value.ui64 = 3206 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64); 3207 csskd->cpu_ticks_kernel.value.ui64 = 3208 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64); 3209 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast; 3210 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload; 3211 csskd->bread.value.ui64 = css->bread; 3212 csskd->bwrite.value.ui64 = css->bwrite; 3213 csskd->lread.value.ui64 = css->lread; 3214 csskd->lwrite.value.ui64 = css->lwrite; 3215 csskd->phread.value.ui64 = css->phread; 3216 csskd->phwrite.value.ui64 = css->phwrite; 3217 csskd->pswitch.value.ui64 = css->pswitch; 3218 csskd->trap.value.ui64 = css->trap; 3219 csskd->intr.value.ui64 = 0; 3220 for (i = 0; i < PIL_MAX; i++) 3221 csskd->intr.value.ui64 += css->intr[i]; 3222 csskd->syscall.value.ui64 = css->syscall; 3223 csskd->sysread.value.ui64 = css->sysread; 3224 csskd->syswrite.value.ui64 = css->syswrite; 3225 csskd->sysfork.value.ui64 = css->sysfork; 3226 csskd->sysvfork.value.ui64 = css->sysvfork; 3227 csskd->sysexec.value.ui64 = css->sysexec; 3228 csskd->readch.value.ui64 = css->readch; 3229 csskd->writech.value.ui64 = css->writech; 3230 csskd->rcvint.value.ui64 = css->rcvint; 3231 csskd->xmtint.value.ui64 = css->xmtint; 3232 csskd->mdmint.value.ui64 = css->mdmint; 3233 csskd->rawch.value.ui64 = css->rawch; 3234 csskd->canch.value.ui64 = css->canch; 3235 csskd->outch.value.ui64 = css->outch; 3236 csskd->msg.value.ui64 = css->msg; 3237 csskd->sema.value.ui64 = css->sema; 3238 csskd->namei.value.ui64 = css->namei; 3239 csskd->ufsiget.value.ui64 = css->ufsiget; 3240 csskd->ufsdirblk.value.ui64 = css->ufsdirblk; 3241 csskd->ufsipage.value.ui64 = css->ufsipage; 3242 csskd->ufsinopage.value.ui64 = css->ufsinopage; 3243 csskd->procovf.value.ui64 = css->procovf; 3244 csskd->intrthread.value.ui64 = 0; 3245 for (i = 0; i < LOCK_LEVEL - 1; i++) 3246 csskd->intrthread.value.ui64 += css->intr[i]; 3247 csskd->intrblk.value.ui64 = css->intrblk; 3248 csskd->intrunpin.value.ui64 = css->intrunpin; 3249 csskd->idlethread.value.ui64 = css->idlethread; 3250 csskd->inv_swtch.value.ui64 = css->inv_swtch; 3251 csskd->nthreads.value.ui64 = css->nthreads; 3252 csskd->cpumigrate.value.ui64 = css->cpumigrate; 3253 csskd->xcalls.value.ui64 = css->xcalls; 3254 csskd->mutex_adenters.value.ui64 = css->mutex_adenters; 3255 csskd->rw_rdfails.value.ui64 = css->rw_rdfails; 3256 csskd->rw_wrfails.value.ui64 = css->rw_wrfails; 3257 csskd->modload.value.ui64 = css->modload; 3258 csskd->modunload.value.ui64 = css->modunload; 3259 csskd->bawrite.value.ui64 = css->bawrite; 3260 csskd->iowait.value.ui64 = css->iowait; 3261 3262 return (0); 3263 } 3264 3265 static int 3266 cpu_vm_stats_ks_update(kstat_t *ksp, int rw) 3267 { 3268 cpu_t *cp = (cpu_t *)ksp->ks_private; 3269 struct cpu_vm_stats_ks_data *cvskd; 3270 cpu_vm_stats_t *cvs; 3271 3272 if (rw == KSTAT_WRITE) 3273 return (EACCES); 3274 3275 cvs = &cp->cpu_stats.vm; 3276 cvskd = ksp->ks_data; 3277 3278 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data, 3279 sizeof (cpu_vm_stats_ks_data_template)); 3280 cvskd->pgrec.value.ui64 = cvs->pgrec; 3281 cvskd->pgfrec.value.ui64 = cvs->pgfrec; 3282 cvskd->pgin.value.ui64 = cvs->pgin; 3283 cvskd->pgpgin.value.ui64 = cvs->pgpgin; 3284 cvskd->pgout.value.ui64 = cvs->pgout; 3285 cvskd->pgpgout.value.ui64 = cvs->pgpgout; 3286 cvskd->swapin.value.ui64 = cvs->swapin; 3287 cvskd->pgswapin.value.ui64 = cvs->pgswapin; 3288 cvskd->swapout.value.ui64 = cvs->swapout; 3289 cvskd->pgswapout.value.ui64 = cvs->pgswapout; 3290 cvskd->zfod.value.ui64 = cvs->zfod; 3291 cvskd->dfree.value.ui64 = cvs->dfree; 3292 cvskd->scan.value.ui64 = cvs->scan; 3293 cvskd->rev.value.ui64 = cvs->rev; 3294 cvskd->hat_fault.value.ui64 = cvs->hat_fault; 3295 cvskd->as_fault.value.ui64 = cvs->as_fault; 3296 cvskd->maj_fault.value.ui64 = cvs->maj_fault; 3297 cvskd->cow_fault.value.ui64 = cvs->cow_fault; 3298 cvskd->prot_fault.value.ui64 = cvs->prot_fault; 3299 cvskd->softlock.value.ui64 = cvs->softlock; 3300 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt; 3301 cvskd->pgrrun.value.ui64 = cvs->pgrrun; 3302 cvskd->execpgin.value.ui64 = cvs->execpgin; 3303 cvskd->execpgout.value.ui64 = cvs->execpgout; 3304 cvskd->execfree.value.ui64 = cvs->execfree; 3305 cvskd->anonpgin.value.ui64 = cvs->anonpgin; 3306 cvskd->anonpgout.value.ui64 = cvs->anonpgout; 3307 cvskd->anonfree.value.ui64 = cvs->anonfree; 3308 cvskd->fspgin.value.ui64 = cvs->fspgin; 3309 cvskd->fspgout.value.ui64 = cvs->fspgout; 3310 cvskd->fsfree.value.ui64 = cvs->fsfree; 3311 3312 return (0); 3313 } 3314 3315 static int 3316 cpu_stat_ks_update(kstat_t *ksp, int rw) 3317 { 3318 cpu_stat_t *cso; 3319 cpu_t *cp; 3320 int i; 3321 hrtime_t msnsecs[NCMSTATES]; 3322 3323 cso = (cpu_stat_t *)ksp->ks_data; 3324 cp = (cpu_t *)ksp->ks_private; 3325 3326 if (rw == KSTAT_WRITE) 3327 return (EACCES); 3328 3329 /* 3330 * Read CPU mstate, but compare with the last values we 3331 * received to make sure that the returned kstats never 3332 * decrease. 3333 */ 3334 3335 get_cpu_mstate(cp, msnsecs); 3336 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]); 3337 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]); 3338 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]); 3339 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE]) 3340 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE]; 3341 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER]) 3342 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; 3343 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) 3344 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; 3345 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; 3346 cso->cpu_sysinfo.wait[W_IO] = 0; 3347 cso->cpu_sysinfo.wait[W_SWAP] = 0; 3348 cso->cpu_sysinfo.wait[W_PIO] = 0; 3349 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); 3350 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); 3351 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); 3352 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); 3353 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); 3354 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); 3355 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); 3356 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); 3357 cso->cpu_sysinfo.intr = 0; 3358 for (i = 0; i < PIL_MAX; i++) 3359 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); 3360 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall); 3361 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread); 3362 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite); 3363 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork); 3364 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork); 3365 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec); 3366 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch); 3367 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech); 3368 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint); 3369 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint); 3370 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint); 3371 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch); 3372 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch); 3373 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch); 3374 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg); 3375 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema); 3376 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei); 3377 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget); 3378 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk); 3379 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage); 3380 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage); 3381 cso->cpu_sysinfo.inodeovf = 0; 3382 cso->cpu_sysinfo.fileovf = 0; 3383 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf); 3384 cso->cpu_sysinfo.intrthread = 0; 3385 for (i = 0; i < LOCK_LEVEL - 1; i++) 3386 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]); 3387 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk); 3388 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread); 3389 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch); 3390 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads); 3391 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate); 3392 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls); 3393 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters); 3394 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails); 3395 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails); 3396 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload); 3397 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload); 3398 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite); 3399 cso->cpu_sysinfo.rw_enters = 0; 3400 cso->cpu_sysinfo.win_uo_cnt = 0; 3401 cso->cpu_sysinfo.win_uu_cnt = 0; 3402 cso->cpu_sysinfo.win_so_cnt = 0; 3403 cso->cpu_sysinfo.win_su_cnt = 0; 3404 cso->cpu_sysinfo.win_suo_cnt = 0; 3405 3406 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait); 3407 cso->cpu_syswait.swap = 0; 3408 cso->cpu_syswait.physio = 0; 3409 3410 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec); 3411 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec); 3412 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin); 3413 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin); 3414 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout); 3415 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout); 3416 cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin); 3417 cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin); 3418 cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout); 3419 cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout); 3420 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod); 3421 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree); 3422 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan); 3423 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev); 3424 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault); 3425 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault); 3426 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault); 3427 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault); 3428 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault); 3429 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock); 3430 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt); 3431 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun); 3432 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin); 3433 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout); 3434 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree); 3435 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin); 3436 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout); 3437 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree); 3438 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin); 3439 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout); 3440 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree); 3441 3442 return (0); 3443 } 3444