1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/systm.h> 27 #include <sys/types.h> 28 #include <sys/param.h> 29 #include <sys/thread.h> 30 #include <sys/cpuvar.h> 31 #include <sys/cpupart.h> 32 #include <sys/kmem.h> 33 #include <sys/cmn_err.h> 34 #include <sys/kstat.h> 35 #include <sys/processor.h> 36 #include <sys/disp.h> 37 #include <sys/group.h> 38 #include <sys/pg.h> 39 40 /* 41 * Processor groups 42 * 43 * With the introduction of Chip Multi-Threaded (CMT) processor architectures, 44 * it is no longer necessarily true that a given physical processor module 45 * will present itself as a single schedulable entity (cpu_t). Rather, each 46 * chip and/or processor core may present itself as one or more "logical" CPUs. 47 * 48 * The logical CPUs presented may share physical components such as caches, 49 * data pipes, execution pipelines, FPUs, etc. It is advantageous to have the 50 * kernel be aware of the relationships existing between logical CPUs so that 51 * the appropriate optmizations may be employed. 52 * 53 * The processor group abstraction represents a set of logical CPUs that 54 * generally share some sort of physical or characteristic relationship. 55 * 56 * In the case of a physical sharing relationship, the CPUs in the group may 57 * share a pipeline, cache or floating point unit. In the case of a logical 58 * relationship, a PG may represent the set of CPUs in a processor set, or the 59 * set of CPUs running at a particular clock speed. 60 * 61 * The generic processor group structure, pg_t, contains the elements generic 62 * to a group of CPUs. Depending on the nature of the CPU relationship 63 * (LOGICAL or PHYSICAL), a pointer to a pg may be recast to a "view" of that 64 * PG where more specific data is represented. 65 * 66 * As an example, a PG representing a PHYSICAL relationship, may be recast to 67 * a pghw_t, where data further describing the hardware sharing relationship 68 * is maintained. See pghw.c and pghw.h for details on physical PGs. 69 * 70 * At this time a more specialized casting of a PG representing a LOGICAL 71 * relationship has not been implemented, but the architecture allows for this 72 * in the future. 73 * 74 * Processor Group Classes 75 * 76 * Processor group consumers may wish to maintain and associate specific 77 * data with the PGs they create. For this reason, a mechanism for creating 78 * class specific PGs exists. Classes may overload the default functions for 79 * creating, destroying, and associating CPUs with PGs, and may also register 80 * class specific callbacks to be invoked when the CPU related system 81 * configuration changes. Class specific data is stored/associated with 82 * PGs by incorporating the pg_t (or pghw_t, as appropriate), as the first 83 * element of a class specific PG object. In memory, such a structure may look 84 * like: 85 * 86 * ----------------------- - - - 87 * | common | | | | <--(pg_t *) 88 * ----------------------- | | - 89 * | HW specific | | | <-----(pghw_t *) 90 * ----------------------- | - 91 * | class specific | | <-------(pg_cmt_t *) 92 * ----------------------- - 93 * 94 * Access to the PG class specific data can be had by casting a pointer to 95 * it's class specific view. 96 */ 97 98 static pg_t *pg_alloc_default(pg_class_t); 99 static void pg_free_default(pg_t *); 100 static void pg_null_op(); 101 102 /* 103 * Bootstrap CPU specific PG data 104 * See pg_cpu_bootstrap() 105 */ 106 static cpu_pg_t bootstrap_pg_data; 107 108 /* 109 * Bitset of allocated PG ids (they are sequential) 110 * and the next free id in the set. 111 */ 112 static bitset_t pg_id_set; 113 static pgid_t pg_id_next = 0; 114 115 /* 116 * Default and externed PG ops vectors 117 */ 118 static struct pg_ops pg_ops_default = { 119 pg_alloc_default, /* alloc */ 120 pg_free_default, /* free */ 121 NULL, /* cpu_init */ 122 NULL, /* cpu_fini */ 123 NULL, /* cpu_active */ 124 NULL, /* cpu_inactive */ 125 NULL, /* cpupart_in */ 126 NULL, /* cpupart_out */ 127 NULL, /* cpupart_move */ 128 NULL, /* cpu_belongs */ 129 NULL, /* policy_name */ 130 }; 131 132 static struct pg_cb_ops pg_cb_ops_default = { 133 pg_null_op, /* thread_swtch */ 134 pg_null_op, /* thread_remain */ 135 }; 136 137 /* 138 * Class specific PG allocation callbacks 139 */ 140 #define PG_ALLOC(class) \ 141 (pg_classes[class].pgc_ops->alloc ? \ 142 pg_classes[class].pgc_ops->alloc() : \ 143 pg_classes[pg_default_cid].pgc_ops->alloc()) 144 145 #define PG_FREE(pg) \ 146 ((pg)->pg_class->pgc_ops->free ? \ 147 (pg)->pg_class->pgc_ops->free(pg) : \ 148 pg_classes[pg_default_cid].pgc_ops->free(pg)) \ 149 150 151 /* 152 * Class specific PG policy name 153 */ 154 #define PG_POLICY_NAME(pg) \ 155 ((pg)->pg_class->pgc_ops->policy_name ? \ 156 (pg)->pg_class->pgc_ops->policy_name(pg) : NULL) \ 157 158 /* 159 * Class specific membership test callback 160 */ 161 #define PG_CPU_BELONGS(pg, cp) \ 162 ((pg)->pg_class->pgc_ops->cpu_belongs ? \ 163 (pg)->pg_class->pgc_ops->cpu_belongs(pg, cp) : 0) \ 164 165 /* 166 * CPU configuration callbacks 167 */ 168 #define PG_CPU_INIT(class, cp, cpu_pg) \ 169 { \ 170 if (pg_classes[class].pgc_ops->cpu_init) \ 171 pg_classes[class].pgc_ops->cpu_init(cp, cpu_pg); \ 172 } 173 174 #define PG_CPU_FINI(class, cp, cpu_pg) \ 175 { \ 176 if (pg_classes[class].pgc_ops->cpu_fini) \ 177 pg_classes[class].pgc_ops->cpu_fini(cp, cpu_pg); \ 178 } 179 180 #define PG_CPU_ACTIVE(class, cp) \ 181 { \ 182 if (pg_classes[class].pgc_ops->cpu_active) \ 183 pg_classes[class].pgc_ops->cpu_active(cp); \ 184 } 185 186 #define PG_CPU_INACTIVE(class, cp) \ 187 { \ 188 if (pg_classes[class].pgc_ops->cpu_inactive) \ 189 pg_classes[class].pgc_ops->cpu_inactive(cp); \ 190 } 191 192 /* 193 * CPU / cpupart configuration callbacks 194 */ 195 #define PG_CPUPART_IN(class, cp, pp) \ 196 { \ 197 if (pg_classes[class].pgc_ops->cpupart_in) \ 198 pg_classes[class].pgc_ops->cpupart_in(cp, pp); \ 199 } 200 201 #define PG_CPUPART_OUT(class, cp, pp) \ 202 { \ 203 if (pg_classes[class].pgc_ops->cpupart_out) \ 204 pg_classes[class].pgc_ops->cpupart_out(cp, pp); \ 205 } 206 207 #define PG_CPUPART_MOVE(class, cp, old, new) \ 208 { \ 209 if (pg_classes[class].pgc_ops->cpupart_move) \ 210 pg_classes[class].pgc_ops->cpupart_move(cp, old, new); \ 211 } 212 213 214 215 static pg_class_t *pg_classes; 216 static int pg_nclasses; 217 218 static pg_cid_t pg_default_cid; 219 220 /* 221 * Initialze common PG subsystem. 222 */ 223 void 224 pg_init(void) 225 { 226 extern void pg_cmt_class_init(); 227 extern void pg_cmt_cpu_startup(); 228 229 pg_default_cid = 230 pg_class_register("default", &pg_ops_default, PGR_LOGICAL); 231 232 /* 233 * Initialize classes to allow them to register with the framework 234 */ 235 pg_cmt_class_init(); 236 237 pg_cpu0_init(); 238 pg_cmt_cpu_startup(CPU); 239 } 240 241 /* 242 * Perform CPU 0 initialization 243 */ 244 void 245 pg_cpu0_init(void) 246 { 247 extern void pghw_physid_create(); 248 249 /* 250 * Create the physical ID cache for the boot CPU 251 */ 252 pghw_physid_create(CPU); 253 254 /* 255 * pg_cpu_* require that cpu_lock be held 256 */ 257 mutex_enter(&cpu_lock); 258 259 pg_cpu_init(CPU); 260 pg_cpupart_in(CPU, &cp_default); 261 pg_cpu_active(CPU); 262 263 mutex_exit(&cpu_lock); 264 } 265 266 /* 267 * Invoked when topology for CPU0 changes 268 * post pg_cpu0_init(). 269 * 270 * Currently happens as a result of null_proc_lpa 271 * on Starcat. 272 */ 273 void 274 pg_cpu0_reinit(void) 275 { 276 mutex_enter(&cpu_lock); 277 pg_cpu_inactive(CPU); 278 pg_cpupart_out(CPU, &cp_default); 279 pg_cpu_fini(CPU); 280 281 pg_cpu_init(CPU); 282 pg_cpupart_in(CPU, &cp_default); 283 pg_cpu_active(CPU); 284 mutex_exit(&cpu_lock); 285 } 286 287 /* 288 * Register a new PG class 289 */ 290 pg_cid_t 291 pg_class_register(char *name, struct pg_ops *ops, pg_relation_t relation) 292 { 293 pg_class_t *newclass; 294 pg_class_t *classes_old; 295 id_t cid; 296 297 mutex_enter(&cpu_lock); 298 299 /* 300 * Allocate a new pg_class_t in the pg_classes array 301 */ 302 if (pg_nclasses == 0) { 303 pg_classes = kmem_zalloc(sizeof (pg_class_t), KM_SLEEP); 304 } else { 305 classes_old = pg_classes; 306 pg_classes = 307 kmem_zalloc(sizeof (pg_class_t) * (pg_nclasses + 1), 308 KM_SLEEP); 309 (void) kcopy(classes_old, pg_classes, 310 sizeof (pg_class_t) * pg_nclasses); 311 kmem_free(classes_old, sizeof (pg_class_t) * pg_nclasses); 312 } 313 314 cid = pg_nclasses++; 315 newclass = &pg_classes[cid]; 316 317 (void) strncpy(newclass->pgc_name, name, PG_CLASS_NAME_MAX); 318 newclass->pgc_id = cid; 319 newclass->pgc_ops = ops; 320 newclass->pgc_relation = relation; 321 322 mutex_exit(&cpu_lock); 323 324 return (cid); 325 } 326 327 /* 328 * Try to find an existing pg in set in which to place cp. 329 * Returns the pg if found, and NULL otherwise. 330 * In the event that the CPU could belong to multiple 331 * PGs in the set, the first matching PG will be returned. 332 */ 333 pg_t * 334 pg_cpu_find_pg(cpu_t *cp, group_t *set) 335 { 336 pg_t *pg; 337 group_iter_t i; 338 339 group_iter_init(&i); 340 while ((pg = group_iterate(set, &i)) != NULL) { 341 /* 342 * Ask the class if the CPU belongs here 343 */ 344 if (PG_CPU_BELONGS(pg, cp)) 345 return (pg); 346 } 347 return (NULL); 348 } 349 350 /* 351 * Iterate over the CPUs in a PG after initializing 352 * the iterator with PG_CPU_ITR_INIT() 353 */ 354 cpu_t * 355 pg_cpu_next(pg_cpu_itr_t *itr) 356 { 357 cpu_t *cpu; 358 pg_t *pg = itr->pg; 359 360 cpu = group_iterate(&pg->pg_cpus, &itr->position); 361 return (cpu); 362 } 363 364 /* 365 * Test if a given PG contains a given CPU 366 */ 367 boolean_t 368 pg_cpu_find(pg_t *pg, cpu_t *cp) 369 { 370 if (group_find(&pg->pg_cpus, cp) == (uint_t)-1) 371 return (B_FALSE); 372 373 return (B_TRUE); 374 } 375 376 /* 377 * Set the PGs callbacks to the default 378 */ 379 void 380 pg_callback_set_defaults(pg_t *pg) 381 { 382 bcopy(&pg_cb_ops_default, &pg->pg_cb, sizeof (struct pg_cb_ops)); 383 } 384 385 /* 386 * Create a PG of a given class. 387 * This routine may block. 388 */ 389 pg_t * 390 pg_create(pg_cid_t cid) 391 { 392 pg_t *pg; 393 pgid_t id; 394 395 ASSERT(MUTEX_HELD(&cpu_lock)); 396 397 /* 398 * Call the class specific PG allocation routine 399 */ 400 pg = PG_ALLOC(cid); 401 pg->pg_class = &pg_classes[cid]; 402 pg->pg_relation = pg->pg_class->pgc_relation; 403 404 /* 405 * Find the next free sequential pg id 406 */ 407 do { 408 if (pg_id_next >= bitset_capacity(&pg_id_set)) 409 bitset_resize(&pg_id_set, pg_id_next + 1); 410 id = pg_id_next++; 411 } while (bitset_in_set(&pg_id_set, id)); 412 413 pg->pg_id = id; 414 bitset_add(&pg_id_set, pg->pg_id); 415 416 /* 417 * Create the PG's CPU group 418 */ 419 group_create(&pg->pg_cpus); 420 421 /* 422 * Initialize the events ops vector 423 */ 424 pg_callback_set_defaults(pg); 425 426 return (pg); 427 } 428 429 /* 430 * Destroy a PG. 431 * This routine may block. 432 */ 433 void 434 pg_destroy(pg_t *pg) 435 { 436 ASSERT(MUTEX_HELD(&cpu_lock)); 437 438 group_destroy(&pg->pg_cpus); 439 440 /* 441 * Unassign the pg_id 442 */ 443 if (pg_id_next > pg->pg_id) 444 pg_id_next = pg->pg_id; 445 bitset_del(&pg_id_set, pg->pg_id); 446 447 /* 448 * Invoke the class specific de-allocation routine 449 */ 450 PG_FREE(pg); 451 } 452 453 /* 454 * Add the CPU "cp" to processor group "pg" 455 * This routine may block. 456 */ 457 void 458 pg_cpu_add(pg_t *pg, cpu_t *cp, cpu_pg_t *cpu_pg) 459 { 460 int err; 461 462 ASSERT(MUTEX_HELD(&cpu_lock)); 463 464 /* This adds the CPU to the PG's CPU group */ 465 err = group_add(&pg->pg_cpus, cp, GRP_RESIZE); 466 ASSERT(err == 0); 467 468 /* 469 * The CPU should be referencing the bootstrap PG data still 470 * at this point, since this routine may block causing us to 471 * enter the dispatcher. 472 */ 473 ASSERT(pg_cpu_is_bootstrapped(cp)); 474 475 /* This adds the PG to the CPUs PG group */ 476 err = group_add(&cpu_pg->pgs, pg, GRP_RESIZE); 477 ASSERT(err == 0); 478 } 479 480 /* 481 * Remove "cp" from "pg". 482 * This routine may block. 483 */ 484 void 485 pg_cpu_delete(pg_t *pg, cpu_t *cp, cpu_pg_t *cpu_pg) 486 { 487 int err; 488 489 ASSERT(MUTEX_HELD(&cpu_lock)); 490 491 /* Remove the CPU from the PG */ 492 err = group_remove(&pg->pg_cpus, cp, GRP_RESIZE); 493 ASSERT(err == 0); 494 495 /* 496 * The CPU should be referencing the bootstrap PG data still 497 * at this point, since this routine may block causing us to 498 * enter the dispatcher. 499 */ 500 ASSERT(pg_cpu_is_bootstrapped(cp)); 501 502 /* Remove the PG from the CPU's PG group */ 503 err = group_remove(&cpu_pg->pgs, pg, GRP_RESIZE); 504 ASSERT(err == 0); 505 } 506 507 /* 508 * Allocate a CPU's PG data. This hangs off struct cpu at cpu_pg 509 */ 510 static cpu_pg_t * 511 pg_cpu_data_alloc(void) 512 { 513 cpu_pg_t *pgd; 514 515 pgd = kmem_zalloc(sizeof (cpu_pg_t), KM_SLEEP); 516 group_create(&pgd->pgs); 517 group_create(&pgd->cmt_pgs); 518 519 return (pgd); 520 } 521 522 /* 523 * Free the CPU's PG data. 524 */ 525 static void 526 pg_cpu_data_free(cpu_pg_t *pgd) 527 { 528 group_destroy(&pgd->pgs); 529 group_destroy(&pgd->cmt_pgs); 530 kmem_free(pgd, sizeof (cpu_pg_t)); 531 } 532 533 /* 534 * A new CPU is coming into the system, either via booting or DR. 535 * Allocate it's PG data, and notify all registered classes about 536 * the new CPU. 537 * 538 * This routine may block. 539 */ 540 void 541 pg_cpu_init(cpu_t *cp) 542 { 543 pg_cid_t i; 544 cpu_pg_t *cpu_pg; 545 546 ASSERT(MUTEX_HELD(&cpu_lock)); 547 548 /* 549 * Allocate and size the per CPU pg data 550 * 551 * The CPU's PG data will be populated by the various 552 * PG classes during the invocation of the PG_CPU_INIT() 553 * callback below. 554 * 555 * Since the we could block and enter the dispatcher during 556 * this process, the CPU will continue to reference the bootstrap 557 * PG data until all the initialization completes. 558 */ 559 ASSERT(pg_cpu_is_bootstrapped(cp)); 560 561 cpu_pg = pg_cpu_data_alloc(); 562 563 /* 564 * Notify all registered classes about the new CPU 565 */ 566 for (i = 0; i < pg_nclasses; i++) 567 PG_CPU_INIT(i, cp, cpu_pg); 568 569 /* 570 * The CPU's PG data is now ready to use. 571 */ 572 cp->cpu_pg = cpu_pg; 573 } 574 575 /* 576 * This CPU is being deleted from the system. Notify the classes 577 * and free up the CPU's PG data. 578 */ 579 void 580 pg_cpu_fini(cpu_t *cp) 581 { 582 pg_cid_t i; 583 cpu_pg_t *cpu_pg; 584 585 ASSERT(MUTEX_HELD(&cpu_lock)); 586 587 cpu_pg = cp->cpu_pg; 588 589 /* 590 * This can happen if the CPU coming into the system 591 * failed to power on. 592 */ 593 if (cpu_pg == NULL || pg_cpu_is_bootstrapped(cp)) 594 return; 595 596 /* 597 * Have the CPU reference the bootstrap PG data to survive 598 * the dispatcher should it block from here on out. 599 */ 600 pg_cpu_bootstrap(cp); 601 602 for (i = 0; i < pg_nclasses; i++) 603 PG_CPU_FINI(i, cp, cpu_pg); 604 605 pg_cpu_data_free(cpu_pg); 606 } 607 608 /* 609 * This CPU is becoming active (online) 610 * This routine may not block as it is called from paused CPUs 611 * context. 612 */ 613 void 614 pg_cpu_active(cpu_t *cp) 615 { 616 pg_cid_t i; 617 618 ASSERT(MUTEX_HELD(&cpu_lock)); 619 620 /* 621 * Notify all registered classes about the new CPU 622 */ 623 for (i = 0; i < pg_nclasses; i++) 624 PG_CPU_ACTIVE(i, cp); 625 } 626 627 /* 628 * This CPU is going inactive (offline) 629 * This routine may not block, as it is called from paused 630 * CPUs context. 631 */ 632 void 633 pg_cpu_inactive(cpu_t *cp) 634 { 635 pg_cid_t i; 636 637 ASSERT(MUTEX_HELD(&cpu_lock)); 638 639 /* 640 * Notify all registered classes about the new CPU 641 */ 642 for (i = 0; i < pg_nclasses; i++) 643 PG_CPU_INACTIVE(i, cp); 644 } 645 646 /* 647 * Invoked when the CPU is about to move into the partition 648 * This routine may block. 649 */ 650 void 651 pg_cpupart_in(cpu_t *cp, cpupart_t *pp) 652 { 653 int i; 654 655 ASSERT(MUTEX_HELD(&cpu_lock)); 656 657 /* 658 * Notify all registered classes that the 659 * CPU is about to enter the CPU partition 660 */ 661 for (i = 0; i < pg_nclasses; i++) 662 PG_CPUPART_IN(i, cp, pp); 663 } 664 665 /* 666 * Invoked when the CPU is about to move out of the partition 667 * This routine may block. 668 */ 669 /*ARGSUSED*/ 670 void 671 pg_cpupart_out(cpu_t *cp, cpupart_t *pp) 672 { 673 int i; 674 675 ASSERT(MUTEX_HELD(&cpu_lock)); 676 677 /* 678 * Notify all registered classes that the 679 * CPU is about to leave the CPU partition 680 */ 681 for (i = 0; i < pg_nclasses; i++) 682 PG_CPUPART_OUT(i, cp, pp); 683 } 684 685 /* 686 * Invoked when the CPU is *moving* partitions. 687 * 688 * This routine may not block, as it is called from paused CPUs 689 * context. 690 */ 691 void 692 pg_cpupart_move(cpu_t *cp, cpupart_t *oldpp, cpupart_t *newpp) 693 { 694 int i; 695 696 ASSERT(MUTEX_HELD(&cpu_lock)); 697 698 /* 699 * Notify all registered classes that the 700 * CPU is about to leave the CPU partition 701 */ 702 for (i = 0; i < pg_nclasses; i++) 703 PG_CPUPART_MOVE(i, cp, oldpp, newpp); 704 } 705 706 /* 707 * Return a class specific string describing a policy implemented 708 * across this PG 709 */ 710 char * 711 pg_policy_name(pg_t *pg) 712 { 713 char *str; 714 if ((str = PG_POLICY_NAME(pg)) != NULL) 715 return (str); 716 717 return ("N/A"); 718 } 719 720 /* 721 * Provide the specified CPU a bootstrap pg 722 * This is needed to allow sane behaviour if any PG consuming 723 * code needs to deal with a partially initialized CPU 724 */ 725 void 726 pg_cpu_bootstrap(cpu_t *cp) 727 { 728 cp->cpu_pg = &bootstrap_pg_data; 729 } 730 731 /* 732 * Return non-zero if the specified CPU is bootstrapped, 733 * which means it's CPU specific PG data has not yet been 734 * fully constructed. 735 */ 736 int 737 pg_cpu_is_bootstrapped(cpu_t *cp) 738 { 739 return (cp->cpu_pg == &bootstrap_pg_data); 740 } 741 742 /*ARGSUSED*/ 743 static pg_t * 744 pg_alloc_default(pg_class_t class) 745 { 746 return (kmem_zalloc(sizeof (pg_t), KM_SLEEP)); 747 } 748 749 /*ARGSUSED*/ 750 static void 751 pg_free_default(struct pg *pg) 752 { 753 kmem_free(pg, sizeof (pg_t)); 754 } 755 756 static void 757 pg_null_op() 758 { 759 } 760 761 /* 762 * Invoke the "thread switch" callback for each of the CPU's PGs 763 * This is invoked from the dispatcher swtch() routine, which is called 764 * when a thread running an a CPU should switch to another thread. 765 * "cp" is the CPU on which the thread switch is happening 766 * "now" is an unscaled hrtime_t timestamp taken in swtch() 767 * "old" and "new" are the outgoing and incoming threads, respectively. 768 */ 769 void 770 pg_ev_thread_swtch(struct cpu *cp, hrtime_t now, kthread_t *old, kthread_t *new) 771 { 772 int i, sz; 773 group_t *grp; 774 pg_t *pg; 775 776 grp = &cp->cpu_pg->pgs; 777 sz = GROUP_SIZE(grp); 778 for (i = 0; i < sz; i++) { 779 pg = GROUP_ACCESS(grp, i); 780 pg->pg_cb.thread_swtch(pg, cp, now, old, new); 781 } 782 } 783 784 /* 785 * Invoke the "thread remain" callback for each of the CPU's PGs. 786 * This is called from the dispatcher's swtch() routine when a thread 787 * running on the CPU "cp" is switching to itself, which can happen as an 788 * artifact of the thread's timeslice expiring. 789 */ 790 void 791 pg_ev_thread_remain(struct cpu *cp, kthread_t *t) 792 { 793 int i, sz; 794 group_t *grp; 795 pg_t *pg; 796 797 grp = &cp->cpu_pg->pgs; 798 sz = GROUP_SIZE(grp); 799 for (i = 0; i < sz; i++) { 800 pg = GROUP_ACCESS(grp, i); 801 pg->pg_cb.thread_remain(pg, cp, t); 802 } 803 } 804