1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * vm_usage 29 * 30 * This file implements the getvmusage() private system call. 31 * getvmusage() counts the amount of resident memory pages and swap 32 * reserved by the specified process collective. A "process collective" is 33 * the set of processes owned by a particular, zone, project, task, or user. 34 * 35 * rss and swap are counted so that for a given process collective, a page is 36 * only counted once. For example, this means that if multiple processes in 37 * the same project map the same page, then the project will only be charged 38 * once for that page. On the other hand, if two processes in different 39 * projects map the same page, then both projects will be charged 40 * for the page. 41 * 42 * The vm_getusage() calculation is implemented so that the first thread 43 * performs the rss/swap counting. Other callers will wait for that thread to 44 * finish, copying the results. This enables multiple rcapds and prstats to 45 * consume data from the same calculation. The results are also cached so that 46 * a caller interested in recent results can just copy them instead of starting 47 * a new calculation. The caller passes the maximium age (in seconds) of the 48 * data. If the cached data is young enough, the cache is copied, otherwise, 49 * a new calculation is executed and the cache is replaced with the new 50 * data. 51 * 52 * The rss calculation for each process collective is as follows: 53 * 54 * - Inspect flags, determine if counting rss for zones, projects, tasks, 55 * and/or users. 56 * - For each proc: 57 * - Figure out proc's collectives (zone, project, task, and/or user). 58 * - For each seg in proc's address space: 59 * - If seg is private: 60 * - Lookup anons in the amp. 61 * - For incore pages not previously visited each of the 62 * proc's collectives, add incore pagesize to each. 63 * collective. 64 * Anon's with a refcnt of 1 can be assummed to be not 65 * previously visited. 66 * - For address ranges without anons in the amp: 67 * - Lookup pages in underlying vnode. 68 * - For incore pages not previously visiting for 69 * each of the proc's collectives, add incore 70 * pagesize to each collective. 71 * - If seg is shared: 72 * - Lookup pages in the shared amp or vnode. 73 * - For incore pages not previously visited for each of 74 * the proc's collectives, add incore pagesize to each 75 * collective. 76 * 77 * Swap is reserved by private segments, and shared anonymous segments. 78 * The only shared anon segments which do not reserve swap are ISM segments 79 * and schedctl segments, both of which can be identified by having 80 * amp->swresv == 0. 81 * 82 * The swap calculation for each collective is as follows: 83 * 84 * - Inspect flags, determine if counting rss for zones, projects, tasks, 85 * and/or users. 86 * - For each proc: 87 * - Figure out proc's collectives (zone, project, task, and/or user). 88 * - For each seg in proc's address space: 89 * - If seg is private: 90 * - Add svd->swresv pages to swap count for each of the 91 * proc's collectives. 92 * - If seg is anon, shared, and amp->swresv != 0 93 * - For address ranges in amp not previously visited for 94 * each of the proc's collectives, add size of address 95 * range to the swap count for each collective. 96 * 97 * These two calculations are done simultaneously, with most of the work 98 * being done in vmu_calculate_seg(). The results of the calculation are 99 * copied into "vmu_data.vmu_cache_results". 100 * 101 * To perform the calculation, various things are tracked and cached: 102 * 103 * - incore/not-incore page ranges for all vnodes. 104 * (vmu_data.vmu_all_vnodes_hash) 105 * This eliminates looking up the same page more than once. 106 * 107 * - incore/not-incore page ranges for all shared amps. 108 * (vmu_data.vmu_all_amps_hash) 109 * This eliminates looking up the same page more than once. 110 * 111 * - visited page ranges for each collective. 112 * - per vnode (entity->vme_vnode_hash) 113 * - per shared amp (entity->vme_amp_hash) 114 * For accurate counting of map-shared and COW-shared pages. 115 * 116 * - visited private anons (refcnt > 1) for each collective. 117 * (entity->vme_anon_hash) 118 * For accurate counting of COW-shared pages. 119 * 120 * The common accounting structure is the vmu_entity_t, which represents 121 * collectives: 122 * 123 * - A zone. 124 * - A project, task, or user within a zone. 125 * - The entire system (vmu_data.vmu_system). 126 * - Each collapsed (col) project and user. This means a given projid or 127 * uid, regardless of which zone the process is in. For instance, 128 * project 0 in the global zone and project 0 in a non global zone are 129 * the same collapsed project. 130 * 131 * Each entity structure tracks which pages have been already visited for 132 * that entity (via previously inspected processes) so that these pages are 133 * not double counted. 134 */ 135 136 #include <sys/errno.h> 137 #include <sys/types.h> 138 #include <sys/zone.h> 139 #include <sys/proc.h> 140 #include <sys/project.h> 141 #include <sys/task.h> 142 #include <sys/thread.h> 143 #include <sys/time.h> 144 #include <sys/mman.h> 145 #include <sys/modhash.h> 146 #include <sys/modhash_impl.h> 147 #include <sys/shm.h> 148 #include <sys/swap.h> 149 #include <sys/synch.h> 150 #include <sys/systm.h> 151 #include <sys/var.h> 152 #include <sys/vm_usage.h> 153 #include <sys/zone.h> 154 #include <sys/sunddi.h> 155 #include <sys/avl.h> 156 #include <vm/anon.h> 157 #include <vm/as.h> 158 #include <vm/seg_vn.h> 159 #include <vm/seg_spt.h> 160 161 #define VMUSAGE_HASH_SIZE 512 162 163 #define VMUSAGE_TYPE_VNODE 1 164 #define VMUSAGE_TYPE_AMP 2 165 #define VMUSAGE_TYPE_ANON 3 166 167 #define VMUSAGE_BOUND_UNKNOWN 0 168 #define VMUSAGE_BOUND_INCORE 1 169 #define VMUSAGE_BOUND_NOT_INCORE 2 170 171 #define ISWITHIN(node, addr) ((node)->vmb_start <= addr && \ 172 (node)->vmb_end >= addr ? 1 : 0) 173 174 /* 175 * bounds for vnodes and shared amps 176 * Each bound is either entirely incore, entirely not in core, or 177 * entirely unknown. bounds are stored in an avl tree sorted by start member 178 * when in use, otherwise (free or temporary lists) they're strung 179 * together off of vmb_next. 180 */ 181 typedef struct vmu_bound { 182 avl_node_t vmb_node; 183 struct vmu_bound *vmb_next; /* NULL in tree else on free or temp list */ 184 pgcnt_t vmb_start; /* page offset in vnode/amp on which bound starts */ 185 pgcnt_t vmb_end; /* page offset in vnode/amp on which bound ends */ 186 char vmb_type; /* One of VMUSAGE_BOUND_* */ 187 } vmu_bound_t; 188 189 /* 190 * hash of visited objects (vnodes or shared amps) 191 * key is address of vnode or amp. Bounds lists known incore/non-incore 192 * bounds for vnode/amp. 193 */ 194 typedef struct vmu_object { 195 struct vmu_object *vmo_next; /* free list */ 196 caddr_t vmo_key; 197 short vmo_type; 198 avl_tree_t vmo_bounds; 199 } vmu_object_t; 200 201 /* 202 * Entity by which to count results. 203 * 204 * The entity structure keeps the current rss/swap counts for each entity 205 * (zone, project, etc), and hashes of vm structures that have already 206 * been visited for the entity. 207 * 208 * vme_next: links the list of all entities currently being counted by 209 * vmu_calculate(). 210 * 211 * vme_next_calc: links the list of entities related to the current process 212 * being counted by vmu_calculate_proc(). 213 * 214 * vmu_calculate_proc() walks all processes. For each process, it makes a 215 * list of the entities related to that process using vme_next_calc. This 216 * list changes each time vmu_calculate_proc() is called. 217 * 218 */ 219 typedef struct vmu_entity { 220 struct vmu_entity *vme_next; 221 struct vmu_entity *vme_next_calc; 222 mod_hash_t *vme_vnode_hash; /* vnodes visited for entity */ 223 mod_hash_t *vme_amp_hash; /* shared amps visited for entity */ 224 mod_hash_t *vme_anon_hash; /* COW anons visited for entity */ 225 vmusage_t vme_result; /* identifies entity and results */ 226 } vmu_entity_t; 227 228 /* 229 * Hash of entities visited within a zone, and an entity for the zone 230 * itself. 231 */ 232 typedef struct vmu_zone { 233 struct vmu_zone *vmz_next; /* free list */ 234 id_t vmz_id; 235 vmu_entity_t *vmz_zone; 236 mod_hash_t *vmz_projects_hash; 237 mod_hash_t *vmz_tasks_hash; 238 mod_hash_t *vmz_rusers_hash; 239 mod_hash_t *vmz_eusers_hash; 240 } vmu_zone_t; 241 242 /* 243 * Cache of results from last calculation 244 */ 245 typedef struct vmu_cache { 246 vmusage_t *vmc_results; /* Results from last call to */ 247 /* vm_getusage(). */ 248 uint64_t vmc_nresults; /* Count of cached results */ 249 uint64_t vmc_refcnt; /* refcnt for free */ 250 uint_t vmc_flags; /* Flags for vm_getusage() */ 251 hrtime_t vmc_timestamp; /* when cache was created */ 252 } vmu_cache_t; 253 254 /* 255 * top level rss info for the system 256 */ 257 typedef struct vmu_data { 258 kmutex_t vmu_lock; /* Protects vmu_data */ 259 kcondvar_t vmu_cv; /* Used to signal threads */ 260 /* Waiting for */ 261 /* Rss_calc_thread to finish */ 262 vmu_entity_t *vmu_system; /* Entity for tracking */ 263 /* rss/swap for all processes */ 264 /* in all zones */ 265 mod_hash_t *vmu_zones_hash; /* Zones visited */ 266 mod_hash_t *vmu_projects_col_hash; /* These *_col_hash hashes */ 267 mod_hash_t *vmu_rusers_col_hash; /* keep track of entities, */ 268 mod_hash_t *vmu_eusers_col_hash; /* ignoring zoneid, in order */ 269 /* to implement VMUSAGE_COL_* */ 270 /* flags, which aggregate by */ 271 /* project or user regardless */ 272 /* of zoneid. */ 273 mod_hash_t *vmu_all_vnodes_hash; /* System wide visited vnodes */ 274 /* to track incore/not-incore */ 275 mod_hash_t *vmu_all_amps_hash; /* System wide visited shared */ 276 /* amps to track incore/not- */ 277 /* incore */ 278 vmu_entity_t *vmu_entities; /* Linked list of entities */ 279 size_t vmu_nentities; /* Count of entities in list */ 280 vmu_cache_t *vmu_cache; /* Cached results */ 281 kthread_t *vmu_calc_thread; /* NULL, or thread running */ 282 /* vmu_calculate() */ 283 uint_t vmu_calc_flags; /* Flags being using by */ 284 /* currently running calc */ 285 /* thread */ 286 uint_t vmu_pending_flags; /* Flags of vm_getusage() */ 287 /* threads waiting for */ 288 /* calc thread to finish */ 289 uint_t vmu_pending_waiters; /* Number of threads waiting */ 290 /* for calc thread */ 291 vmu_bound_t *vmu_free_bounds; 292 vmu_object_t *vmu_free_objects; 293 vmu_entity_t *vmu_free_entities; 294 vmu_zone_t *vmu_free_zones; 295 } vmu_data_t; 296 297 extern struct as kas; 298 extern proc_t *practive; 299 extern zone_t *global_zone; 300 extern struct seg_ops segvn_ops; 301 extern struct seg_ops segspt_shmops; 302 303 static vmu_data_t vmu_data; 304 static kmem_cache_t *vmu_bound_cache; 305 static kmem_cache_t *vmu_object_cache; 306 307 /* 308 * Comparison routine for AVL tree. We base our comparison on vmb_start. 309 */ 310 static int 311 bounds_cmp(const void *bnd1, const void *bnd2) 312 { 313 const vmu_bound_t *bound1 = bnd1; 314 const vmu_bound_t *bound2 = bnd2; 315 316 if (bound1->vmb_start == bound2->vmb_start) { 317 return (0); 318 } 319 if (bound1->vmb_start < bound2->vmb_start) { 320 return (-1); 321 } 322 323 return (1); 324 } 325 326 /* 327 * Save a bound on the free list. 328 */ 329 static void 330 vmu_free_bound(vmu_bound_t *bound) 331 { 332 bound->vmb_next = vmu_data.vmu_free_bounds; 333 bound->vmb_start = 0; 334 bound->vmb_end = 0; 335 bound->vmb_type = 0; 336 vmu_data.vmu_free_bounds = bound; 337 } 338 339 /* 340 * Free an object, and all visited bound info. 341 */ 342 static void 343 vmu_free_object(mod_hash_val_t val) 344 { 345 vmu_object_t *obj = (vmu_object_t *)val; 346 avl_tree_t *tree = &(obj->vmo_bounds); 347 vmu_bound_t *bound; 348 void *cookie = NULL; 349 350 while ((bound = avl_destroy_nodes(tree, &cookie)) != NULL) 351 vmu_free_bound(bound); 352 avl_destroy(tree); 353 354 obj->vmo_type = 0; 355 obj->vmo_next = vmu_data.vmu_free_objects; 356 vmu_data.vmu_free_objects = obj; 357 } 358 359 /* 360 * Free an entity, and hashes of visited objects for that entity. 361 */ 362 static void 363 vmu_free_entity(mod_hash_val_t val) 364 { 365 vmu_entity_t *entity = (vmu_entity_t *)val; 366 367 if (entity->vme_vnode_hash != NULL) 368 i_mod_hash_clear_nosync(entity->vme_vnode_hash); 369 if (entity->vme_amp_hash != NULL) 370 i_mod_hash_clear_nosync(entity->vme_amp_hash); 371 if (entity->vme_anon_hash != NULL) 372 i_mod_hash_clear_nosync(entity->vme_anon_hash); 373 374 entity->vme_next = vmu_data.vmu_free_entities; 375 vmu_data.vmu_free_entities = entity; 376 } 377 378 /* 379 * Free zone entity, and all hashes of entities inside that zone, 380 * which are projects, tasks, and users. 381 */ 382 static void 383 vmu_free_zone(mod_hash_val_t val) 384 { 385 vmu_zone_t *zone = (vmu_zone_t *)val; 386 387 if (zone->vmz_zone != NULL) { 388 vmu_free_entity((mod_hash_val_t)zone->vmz_zone); 389 zone->vmz_zone = NULL; 390 } 391 if (zone->vmz_projects_hash != NULL) 392 i_mod_hash_clear_nosync(zone->vmz_projects_hash); 393 if (zone->vmz_tasks_hash != NULL) 394 i_mod_hash_clear_nosync(zone->vmz_tasks_hash); 395 if (zone->vmz_rusers_hash != NULL) 396 i_mod_hash_clear_nosync(zone->vmz_rusers_hash); 397 if (zone->vmz_eusers_hash != NULL) 398 i_mod_hash_clear_nosync(zone->vmz_eusers_hash); 399 zone->vmz_next = vmu_data.vmu_free_zones; 400 vmu_data.vmu_free_zones = zone; 401 } 402 403 /* 404 * Initialize synchronization primitives and hashes for system-wide tracking 405 * of visited vnodes and shared amps. Initialize results cache. 406 */ 407 void 408 vm_usage_init() 409 { 410 mutex_init(&vmu_data.vmu_lock, NULL, MUTEX_DEFAULT, NULL); 411 cv_init(&vmu_data.vmu_cv, NULL, CV_DEFAULT, NULL); 412 413 vmu_data.vmu_system = NULL; 414 vmu_data.vmu_zones_hash = NULL; 415 vmu_data.vmu_projects_col_hash = NULL; 416 vmu_data.vmu_rusers_col_hash = NULL; 417 vmu_data.vmu_eusers_col_hash = NULL; 418 419 vmu_data.vmu_free_bounds = NULL; 420 vmu_data.vmu_free_objects = NULL; 421 vmu_data.vmu_free_entities = NULL; 422 vmu_data.vmu_free_zones = NULL; 423 424 vmu_data.vmu_all_vnodes_hash = mod_hash_create_ptrhash( 425 "vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object, 426 sizeof (vnode_t)); 427 vmu_data.vmu_all_amps_hash = mod_hash_create_ptrhash( 428 "vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object, 429 sizeof (struct anon_map)); 430 vmu_data.vmu_projects_col_hash = mod_hash_create_idhash( 431 "vmusage collapsed project hash", VMUSAGE_HASH_SIZE, 432 vmu_free_entity); 433 vmu_data.vmu_rusers_col_hash = mod_hash_create_idhash( 434 "vmusage collapsed ruser hash", VMUSAGE_HASH_SIZE, 435 vmu_free_entity); 436 vmu_data.vmu_eusers_col_hash = mod_hash_create_idhash( 437 "vmusage collpased euser hash", VMUSAGE_HASH_SIZE, 438 vmu_free_entity); 439 vmu_data.vmu_zones_hash = mod_hash_create_idhash( 440 "vmusage zone hash", VMUSAGE_HASH_SIZE, vmu_free_zone); 441 442 vmu_bound_cache = kmem_cache_create("vmu_bound_cache", 443 sizeof (vmu_bound_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 444 vmu_object_cache = kmem_cache_create("vmu_object_cache", 445 sizeof (vmu_object_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 446 447 vmu_data.vmu_entities = NULL; 448 vmu_data.vmu_nentities = 0; 449 450 vmu_data.vmu_cache = NULL; 451 vmu_data.vmu_calc_thread = NULL; 452 vmu_data.vmu_calc_flags = 0; 453 vmu_data.vmu_pending_flags = 0; 454 vmu_data.vmu_pending_waiters = 0; 455 } 456 457 /* 458 * Allocate hashes for tracking vm objects visited for an entity. 459 * Update list of entities. 460 */ 461 static vmu_entity_t * 462 vmu_alloc_entity(id_t id, int type, id_t zoneid) 463 { 464 vmu_entity_t *entity; 465 466 if (vmu_data.vmu_free_entities != NULL) { 467 entity = vmu_data.vmu_free_entities; 468 vmu_data.vmu_free_entities = 469 vmu_data.vmu_free_entities->vme_next; 470 bzero(&entity->vme_result, sizeof (vmusage_t)); 471 } else { 472 entity = kmem_zalloc(sizeof (vmu_entity_t), KM_SLEEP); 473 } 474 entity->vme_result.vmu_id = id; 475 entity->vme_result.vmu_zoneid = zoneid; 476 entity->vme_result.vmu_type = type; 477 478 if (entity->vme_vnode_hash == NULL) 479 entity->vme_vnode_hash = mod_hash_create_ptrhash( 480 "vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object, 481 sizeof (vnode_t)); 482 483 if (entity->vme_amp_hash == NULL) 484 entity->vme_amp_hash = mod_hash_create_ptrhash( 485 "vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object, 486 sizeof (struct anon_map)); 487 488 if (entity->vme_anon_hash == NULL) 489 entity->vme_anon_hash = mod_hash_create_ptrhash( 490 "vmusage anon hash", VMUSAGE_HASH_SIZE, 491 mod_hash_null_valdtor, sizeof (struct anon)); 492 493 entity->vme_next = vmu_data.vmu_entities; 494 vmu_data.vmu_entities = entity; 495 vmu_data.vmu_nentities++; 496 497 return (entity); 498 } 499 500 /* 501 * Allocate a zone entity, and hashes for tracking visited vm objects 502 * for projects, tasks, and users within that zone. 503 */ 504 static vmu_zone_t * 505 vmu_alloc_zone(id_t id) 506 { 507 vmu_zone_t *zone; 508 509 if (vmu_data.vmu_free_zones != NULL) { 510 zone = vmu_data.vmu_free_zones; 511 vmu_data.vmu_free_zones = 512 vmu_data.vmu_free_zones->vmz_next; 513 zone->vmz_next = NULL; 514 zone->vmz_zone = NULL; 515 } else { 516 zone = kmem_zalloc(sizeof (vmu_zone_t), KM_SLEEP); 517 } 518 519 zone->vmz_id = id; 520 521 if ((vmu_data.vmu_calc_flags & (VMUSAGE_ZONE | VMUSAGE_ALL_ZONES)) != 0) 522 zone->vmz_zone = vmu_alloc_entity(id, VMUSAGE_ZONE, id); 523 524 if ((vmu_data.vmu_calc_flags & (VMUSAGE_PROJECTS | 525 VMUSAGE_ALL_PROJECTS)) != 0 && zone->vmz_projects_hash == NULL) 526 zone->vmz_projects_hash = mod_hash_create_idhash( 527 "vmusage project hash", VMUSAGE_HASH_SIZE, vmu_free_entity); 528 529 if ((vmu_data.vmu_calc_flags & (VMUSAGE_TASKS | VMUSAGE_ALL_TASKS)) 530 != 0 && zone->vmz_tasks_hash == NULL) 531 zone->vmz_tasks_hash = mod_hash_create_idhash( 532 "vmusage task hash", VMUSAGE_HASH_SIZE, vmu_free_entity); 533 534 if ((vmu_data.vmu_calc_flags & (VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS)) 535 != 0 && zone->vmz_rusers_hash == NULL) 536 zone->vmz_rusers_hash = mod_hash_create_idhash( 537 "vmusage ruser hash", VMUSAGE_HASH_SIZE, vmu_free_entity); 538 539 if ((vmu_data.vmu_calc_flags & (VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS)) 540 != 0 && zone->vmz_eusers_hash == NULL) 541 zone->vmz_eusers_hash = mod_hash_create_idhash( 542 "vmusage euser hash", VMUSAGE_HASH_SIZE, vmu_free_entity); 543 544 return (zone); 545 } 546 547 /* 548 * Allocate a structure for tracking visited bounds for a vm object. 549 */ 550 static vmu_object_t * 551 vmu_alloc_object(caddr_t key, int type) 552 { 553 vmu_object_t *object; 554 555 if (vmu_data.vmu_free_objects != NULL) { 556 object = vmu_data.vmu_free_objects; 557 vmu_data.vmu_free_objects = 558 vmu_data.vmu_free_objects->vmo_next; 559 } else { 560 object = kmem_cache_alloc(vmu_object_cache, KM_SLEEP); 561 } 562 563 object->vmo_next = NULL; 564 object->vmo_key = key; 565 object->vmo_type = type; 566 avl_create(&(object->vmo_bounds), bounds_cmp, sizeof (vmu_bound_t), 0); 567 568 return (object); 569 } 570 571 /* 572 * Allocate and return a bound structure. 573 */ 574 static vmu_bound_t * 575 vmu_alloc_bound() 576 { 577 vmu_bound_t *bound; 578 579 if (vmu_data.vmu_free_bounds != NULL) { 580 bound = vmu_data.vmu_free_bounds; 581 vmu_data.vmu_free_bounds = 582 vmu_data.vmu_free_bounds->vmb_next; 583 } else { 584 bound = kmem_cache_alloc(vmu_bound_cache, KM_SLEEP); 585 } 586 587 bound->vmb_next = NULL; 588 bound->vmb_start = 0; 589 bound->vmb_end = 0; 590 bound->vmb_type = 0; 591 return (bound); 592 } 593 594 /* 595 * vmu_find_insert_* functions implement hash lookup or allocate and 596 * insert operations. 597 */ 598 static vmu_object_t * 599 vmu_find_insert_object(mod_hash_t *hash, caddr_t key, uint_t type) 600 { 601 int ret; 602 vmu_object_t *object; 603 604 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key, 605 (mod_hash_val_t *)&object); 606 if (ret != 0) { 607 object = vmu_alloc_object(key, type); 608 ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key, 609 (mod_hash_val_t)object, (mod_hash_hndl_t)0); 610 ASSERT(ret == 0); 611 } 612 return (object); 613 } 614 615 static int 616 vmu_find_insert_anon(mod_hash_t *hash, caddr_t key) 617 { 618 int ret; 619 caddr_t val; 620 621 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key, 622 (mod_hash_val_t *)&val); 623 624 if (ret == 0) 625 return (0); 626 627 ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key, 628 (mod_hash_val_t)key, (mod_hash_hndl_t)0); 629 630 ASSERT(ret == 0); 631 632 return (1); 633 } 634 635 static vmu_entity_t * 636 vmu_find_insert_entity(mod_hash_t *hash, id_t id, uint_t type, id_t zoneid) 637 { 638 int ret; 639 vmu_entity_t *entity; 640 641 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)(uintptr_t)id, 642 (mod_hash_val_t *)&entity); 643 if (ret != 0) { 644 entity = vmu_alloc_entity(id, type, zoneid); 645 ret = i_mod_hash_insert_nosync(hash, 646 (mod_hash_key_t)(uintptr_t)id, (mod_hash_val_t)entity, 647 (mod_hash_hndl_t)0); 648 ASSERT(ret == 0); 649 } 650 return (entity); 651 } 652 653 654 655 656 /* 657 * Returns list of object bounds between start and end. New bounds inserted 658 * by this call are given type. 659 * 660 * Returns the number of pages covered if new bounds are created. Returns 0 661 * if region between start/end consists of all existing bounds. 662 */ 663 static pgcnt_t 664 vmu_insert_lookup_object_bounds(vmu_object_t *ro, pgcnt_t start, pgcnt_t 665 end, char type, vmu_bound_t **first, vmu_bound_t **last) 666 { 667 avl_tree_t *tree = &(ro->vmo_bounds); 668 avl_index_t where; 669 vmu_bound_t *walker, *tmp; 670 pgcnt_t ret = 0; 671 672 ASSERT(start <= end); 673 674 *first = *last = NULL; 675 676 tmp = vmu_alloc_bound(); 677 tmp->vmb_start = start; 678 tmp->vmb_type = type; 679 680 /* Hopelessly optimistic case. */ 681 if (walker = avl_find(tree, tmp, &where)) { 682 /* We got lucky. */ 683 vmu_free_bound(tmp); 684 *first = walker; 685 } 686 687 if (walker == NULL) { 688 /* Is start in the previous node? */ 689 walker = avl_nearest(tree, where, AVL_BEFORE); 690 if (walker != NULL) { 691 if (ISWITHIN(walker, start)) { 692 /* We found start. */ 693 vmu_free_bound(tmp); 694 *first = walker; 695 } 696 } 697 } 698 699 /* 700 * At this point, if *first is still NULL, then we 701 * didn't get a direct hit and start isn't covered 702 * by the previous node. We know that the next node 703 * must have a greater start value than we require 704 * because avl_find tells us where the AVL routines would 705 * insert our new node. We have some gap between the 706 * start we want and the next node. 707 */ 708 if (*first == NULL) { 709 walker = avl_nearest(tree, where, AVL_AFTER); 710 if (walker != NULL && walker->vmb_start <= end) { 711 /* Fill the gap. */ 712 tmp->vmb_end = walker->vmb_start - 1; 713 *first = tmp; 714 } else { 715 /* We have a gap over [start, end]. */ 716 tmp->vmb_end = end; 717 *first = *last = tmp; 718 } 719 ret += tmp->vmb_end - tmp->vmb_start + 1; 720 avl_insert(tree, tmp, where); 721 } 722 723 ASSERT(*first != NULL); 724 725 if (*last != NULL) { 726 /* We're done. */ 727 return (ret); 728 } 729 730 /* 731 * If we are here we still need to set *last and 732 * that may involve filling in some gaps. 733 */ 734 *last = *first; 735 for (;;) { 736 if (ISWITHIN(*last, end)) { 737 /* We're done. */ 738 break; 739 } 740 walker = AVL_NEXT(tree, *last); 741 if (walker == NULL || walker->vmb_start > end) { 742 /* Bottom or mid tree with gap. */ 743 tmp = vmu_alloc_bound(); 744 tmp->vmb_start = (*last)->vmb_end + 1; 745 tmp->vmb_end = end; 746 ret += tmp->vmb_end - tmp->vmb_start + 1; 747 avl_insert_here(tree, tmp, *last, AVL_AFTER); 748 *last = tmp; 749 break; 750 } else { 751 if ((*last)->vmb_end + 1 != walker->vmb_start) { 752 /* Non-contiguous. */ 753 tmp = vmu_alloc_bound(); 754 tmp->vmb_start = (*last)->vmb_end + 1; 755 tmp->vmb_end = walker->vmb_start - 1; 756 ret += tmp->vmb_end - tmp->vmb_start + 1; 757 avl_insert_here(tree, tmp, *last, AVL_AFTER); 758 *last = tmp; 759 } else { 760 *last = walker; 761 } 762 } 763 } 764 765 return (ret); 766 } 767 768 /* 769 * vmu_update_bounds() 770 * 771 * tree: avl_tree in which first and last hang. 772 * 773 * first, last: list of continuous bounds, of which zero or more are of 774 * type VMUSAGE_BOUND_UNKNOWN. 775 * 776 * new_tree: avl_tree in which new_first and new_last hang. 777 * 778 * new_first, new_last: list of continuous bounds, of which none are of 779 * type VMUSAGE_BOUND_UNKNOWN. These bounds are used to 780 * update the types of bounds in (first,last) with 781 * type VMUSAGE_BOUND_UNKNOWN. 782 * 783 * For the list of bounds (first,last), this function updates any bounds 784 * with type VMUSAGE_BOUND_UNKNOWN using the type of the corresponding bound in 785 * the list (new_first, new_last). 786 * 787 * If a bound of type VMUSAGE_BOUND_UNKNOWN spans multiple bounds in the list 788 * (new_first, new_last), it will be split into multiple bounds. 789 * 790 * Return value: 791 * The number of pages in the list of bounds (first,last) that were of 792 * type VMUSAGE_BOUND_UNKNOWN, which have been updated to be of type 793 * VMUSAGE_BOUND_INCORE. 794 * 795 */ 796 static pgcnt_t 797 vmu_update_bounds(avl_tree_t *tree, vmu_bound_t **first, vmu_bound_t **last, 798 avl_tree_t *new_tree, vmu_bound_t *new_first, vmu_bound_t *new_last) 799 { 800 vmu_bound_t *next, *new_next, *tmp; 801 pgcnt_t rss = 0; 802 803 next = *first; 804 new_next = new_first; 805 806 /* 807 * Verify first and last bound are covered by new bounds if they 808 * have unknown type. 809 */ 810 ASSERT((*first)->vmb_type != VMUSAGE_BOUND_UNKNOWN || 811 (*first)->vmb_start >= new_first->vmb_start); 812 ASSERT((*last)->vmb_type != VMUSAGE_BOUND_UNKNOWN || 813 (*last)->vmb_end <= new_last->vmb_end); 814 for (;;) { 815 /* If bound already has type, proceed to next bound. */ 816 if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) { 817 if (next == *last) 818 break; 819 next = AVL_NEXT(tree, next); 820 continue; 821 } 822 while (new_next->vmb_end < next->vmb_start) 823 new_next = AVL_NEXT(new_tree, new_next); 824 ASSERT(new_next->vmb_type != VMUSAGE_BOUND_UNKNOWN); 825 next->vmb_type = new_next->vmb_type; 826 if (new_next->vmb_end < next->vmb_end) { 827 /* need to split bound */ 828 tmp = vmu_alloc_bound(); 829 tmp->vmb_type = VMUSAGE_BOUND_UNKNOWN; 830 tmp->vmb_start = new_next->vmb_end + 1; 831 tmp->vmb_end = next->vmb_end; 832 avl_insert_here(tree, tmp, next, AVL_AFTER); 833 next->vmb_end = new_next->vmb_end; 834 if (*last == next) 835 *last = tmp; 836 if (next->vmb_type == VMUSAGE_BOUND_INCORE) 837 rss += next->vmb_end - next->vmb_start + 1; 838 next = tmp; 839 } else { 840 if (next->vmb_type == VMUSAGE_BOUND_INCORE) 841 rss += next->vmb_end - next->vmb_start + 1; 842 if (next == *last) 843 break; 844 next = AVL_NEXT(tree, next); 845 } 846 } 847 return (rss); 848 } 849 850 /* 851 * Merges adjacent bounds with same type between first and last bound. 852 * After merge, last pointer is no longer valid, as last bound may be 853 * merged away. 854 */ 855 static void 856 vmu_merge_bounds(avl_tree_t *tree, vmu_bound_t **first, vmu_bound_t **last) 857 { 858 vmu_bound_t *current; 859 vmu_bound_t *next; 860 861 ASSERT(tree != NULL); 862 ASSERT(*first != NULL); 863 ASSERT(*last != NULL); 864 865 current = *first; 866 while (current != *last) { 867 next = AVL_NEXT(tree, current); 868 if ((current->vmb_end + 1) == next->vmb_start && 869 current->vmb_type == next->vmb_type) { 870 current->vmb_end = next->vmb_end; 871 avl_remove(tree, next); 872 vmu_free_bound(next); 873 if (next == *last) { 874 break; 875 } 876 } 877 current = AVL_NEXT(tree, current); 878 } 879 } 880 881 /* 882 * Given an amp and a list of bounds, updates each bound's type with 883 * VMUSAGE_BOUND_INCORE or VMUSAGE_BOUND_NOT_INCORE. 884 * 885 * If a bound is partially incore, it will be split into two bounds. 886 * first and last may be modified, as bounds may be split into multiple 887 * bounds if they are partially incore/not-incore. 888 * 889 * Set incore to non-zero if bounds are already known to be incore. 890 * 891 */ 892 static void 893 vmu_amp_update_incore_bounds(avl_tree_t *tree, struct anon_map *amp, 894 vmu_bound_t **first, vmu_bound_t **last, boolean_t incore) 895 { 896 vmu_bound_t *next; 897 vmu_bound_t *tmp; 898 pgcnt_t index; 899 short bound_type; 900 short page_type; 901 vnode_t *vn; 902 anoff_t off; 903 struct anon *ap; 904 905 next = *first; 906 /* Shared anon slots don't change once set. */ 907 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 908 for (;;) { 909 if (incore == B_TRUE) 910 next->vmb_type = VMUSAGE_BOUND_INCORE; 911 912 if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) { 913 if (next == *last) 914 break; 915 next = AVL_NEXT(tree, next); 916 continue; 917 } 918 bound_type = next->vmb_type; 919 index = next->vmb_start; 920 while (index <= next->vmb_end) { 921 922 /* 923 * These are used to determine how much to increment 924 * index when a large page is found. 925 */ 926 page_t *page; 927 pgcnt_t pgcnt = 1; 928 uint_t pgshft; 929 pgcnt_t pgmsk; 930 931 ap = anon_get_ptr(amp->ahp, index); 932 if (ap != NULL) 933 swap_xlate(ap, &vn, &off); 934 935 if (ap != NULL && vn != NULL && vn->v_pages != NULL && 936 (page = page_exists(vn, off)) != NULL) { 937 page_type = VMUSAGE_BOUND_INCORE; 938 if (page->p_szc > 0) { 939 pgcnt = page_get_pagecnt(page->p_szc); 940 pgshft = page_get_shift(page->p_szc); 941 pgmsk = (0x1 << (pgshft - PAGESHIFT)) 942 - 1; 943 } 944 } else { 945 page_type = VMUSAGE_BOUND_NOT_INCORE; 946 } 947 if (bound_type == VMUSAGE_BOUND_UNKNOWN) { 948 next->vmb_type = page_type; 949 } else if (next->vmb_type != page_type) { 950 /* 951 * If current bound type does not match page 952 * type, need to split off new bound. 953 */ 954 tmp = vmu_alloc_bound(); 955 tmp->vmb_type = page_type; 956 tmp->vmb_start = index; 957 tmp->vmb_end = next->vmb_end; 958 avl_insert_here(tree, tmp, next, AVL_AFTER); 959 next->vmb_end = index - 1; 960 if (*last == next) 961 *last = tmp; 962 next = tmp; 963 } 964 if (pgcnt > 1) { 965 /* 966 * If inside large page, jump to next large 967 * page 968 */ 969 index = (index & ~pgmsk) + pgcnt; 970 } else { 971 index++; 972 } 973 } 974 if (next == *last) { 975 ASSERT(next->vmb_type != VMUSAGE_BOUND_UNKNOWN); 976 break; 977 } else 978 next = AVL_NEXT(tree, next); 979 } 980 ANON_LOCK_EXIT(&->a_rwlock); 981 } 982 983 /* 984 * Same as vmu_amp_update_incore_bounds(), except for tracking 985 * incore-/not-incore for vnodes. 986 */ 987 static void 988 vmu_vnode_update_incore_bounds(avl_tree_t *tree, vnode_t *vnode, 989 vmu_bound_t **first, vmu_bound_t **last) 990 { 991 vmu_bound_t *next; 992 vmu_bound_t *tmp; 993 pgcnt_t index; 994 short bound_type; 995 short page_type; 996 997 next = *first; 998 for (;;) { 999 if (vnode->v_pages == NULL) 1000 next->vmb_type = VMUSAGE_BOUND_NOT_INCORE; 1001 1002 if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) { 1003 if (next == *last) 1004 break; 1005 next = AVL_NEXT(tree, next); 1006 continue; 1007 } 1008 1009 bound_type = next->vmb_type; 1010 index = next->vmb_start; 1011 while (index <= next->vmb_end) { 1012 1013 /* 1014 * These are used to determine how much to increment 1015 * index when a large page is found. 1016 */ 1017 page_t *page; 1018 pgcnt_t pgcnt = 1; 1019 uint_t pgshft; 1020 pgcnt_t pgmsk; 1021 1022 if (vnode->v_pages != NULL && 1023 (page = page_exists(vnode, ptob(index))) != NULL) { 1024 page_type = VMUSAGE_BOUND_INCORE; 1025 if (page->p_szc > 0) { 1026 pgcnt = page_get_pagecnt(page->p_szc); 1027 pgshft = page_get_shift(page->p_szc); 1028 pgmsk = (0x1 << (pgshft - PAGESHIFT)) 1029 - 1; 1030 } 1031 } else { 1032 page_type = VMUSAGE_BOUND_NOT_INCORE; 1033 } 1034 if (bound_type == VMUSAGE_BOUND_UNKNOWN) { 1035 next->vmb_type = page_type; 1036 } else if (next->vmb_type != page_type) { 1037 /* 1038 * If current bound type does not match page 1039 * type, need to split off new bound. 1040 */ 1041 tmp = vmu_alloc_bound(); 1042 tmp->vmb_type = page_type; 1043 tmp->vmb_start = index; 1044 tmp->vmb_end = next->vmb_end; 1045 avl_insert_here(tree, tmp, next, AVL_AFTER); 1046 next->vmb_end = index - 1; 1047 if (*last == next) 1048 *last = tmp; 1049 next = tmp; 1050 } 1051 if (pgcnt > 1) { 1052 /* 1053 * If inside large page, jump to next large 1054 * page 1055 */ 1056 index = (index & ~pgmsk) + pgcnt; 1057 } else { 1058 index++; 1059 } 1060 } 1061 if (next == *last) { 1062 ASSERT(next->vmb_type != VMUSAGE_BOUND_UNKNOWN); 1063 break; 1064 } else 1065 next = AVL_NEXT(tree, next); 1066 } 1067 } 1068 1069 /* 1070 * Calculate the rss and swap consumed by a segment. vmu_entities is the 1071 * list of entities to visit. For shared segments, the vnode or amp 1072 * is looked up in each entity to see if it has been already counted. Private 1073 * anon pages are checked per entity to ensure that COW pages are not 1074 * double counted. 1075 * 1076 * For private mapped files, first the amp is checked for private pages. 1077 * Bounds not backed by the amp are looked up in the vnode for each entity 1078 * to avoid double counting of private COW vnode pages. 1079 */ 1080 static void 1081 vmu_calculate_seg(vmu_entity_t *vmu_entities, struct seg *seg) 1082 { 1083 struct segvn_data *svd; 1084 struct shm_data *shmd; 1085 struct spt_data *sptd; 1086 vmu_object_t *shared_object = NULL; 1087 vmu_object_t *entity_object = NULL; 1088 vmu_entity_t *entity; 1089 vmusage_t *result; 1090 avl_tree_t *tree; 1091 vmu_bound_t *first = NULL; 1092 vmu_bound_t *last = NULL; 1093 vmu_bound_t *cur = NULL; 1094 vmu_bound_t *e_first = NULL; 1095 vmu_bound_t *e_last = NULL; 1096 vmu_bound_t *tmp; 1097 pgcnt_t p_index, s_index, p_start, p_end, s_start, s_end, rss, virt; 1098 struct anon_map *private_amp = NULL; 1099 boolean_t incore = B_FALSE; 1100 boolean_t shared = B_FALSE; 1101 int file = 0; 1102 pgcnt_t swresv = 0; 1103 pgcnt_t panon = 0; 1104 1105 /* Can zero-length segments exist? Not sure, so paranoia. */ 1106 if (seg->s_size <= 0) 1107 return; 1108 1109 /* 1110 * Figure out if there is a shared object (such as a named vnode or 1111 * a shared amp, then figure out if there is a private amp, which 1112 * identifies private pages. 1113 */ 1114 if (seg->s_ops == &segvn_ops) { 1115 svd = (struct segvn_data *)seg->s_data; 1116 if (svd->type == MAP_SHARED) { 1117 shared = B_TRUE; 1118 } else { 1119 swresv = svd->swresv; 1120 1121 if (SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, 1122 RW_READER) != 0) { 1123 /* 1124 * Text replication anon maps can be shared 1125 * across all zones. Space used for text 1126 * replication is typically capped as a small % 1127 * of memory. To keep it simple for now we 1128 * don't account for swap and memory space used 1129 * for text replication. 1130 */ 1131 if (svd->tr_state == SEGVN_TR_OFF && 1132 svd->amp != NULL) { 1133 private_amp = svd->amp; 1134 p_start = svd->anon_index; 1135 p_end = svd->anon_index + 1136 btop(seg->s_size) - 1; 1137 } 1138 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 1139 } 1140 } 1141 if (svd->vp != NULL) { 1142 file = 1; 1143 shared_object = vmu_find_insert_object( 1144 vmu_data.vmu_all_vnodes_hash, (caddr_t)svd->vp, 1145 VMUSAGE_TYPE_VNODE); 1146 s_start = btop(svd->offset); 1147 s_end = btop(svd->offset + seg->s_size) - 1; 1148 } 1149 if (svd->amp != NULL && svd->type == MAP_SHARED) { 1150 ASSERT(shared_object == NULL); 1151 shared_object = vmu_find_insert_object( 1152 vmu_data.vmu_all_amps_hash, (caddr_t)svd->amp, 1153 VMUSAGE_TYPE_AMP); 1154 s_start = svd->anon_index; 1155 s_end = svd->anon_index + btop(seg->s_size) - 1; 1156 /* schedctl mappings are always in core */ 1157 if (svd->amp->swresv == 0) 1158 incore = B_TRUE; 1159 } 1160 } else if (seg->s_ops == &segspt_shmops) { 1161 shared = B_TRUE; 1162 shmd = (struct shm_data *)seg->s_data; 1163 shared_object = vmu_find_insert_object( 1164 vmu_data.vmu_all_amps_hash, (caddr_t)shmd->shm_amp, 1165 VMUSAGE_TYPE_AMP); 1166 s_start = 0; 1167 s_end = btop(seg->s_size) - 1; 1168 sptd = shmd->shm_sptseg->s_data; 1169 1170 /* ism segments are always incore and do not reserve swap */ 1171 if (sptd->spt_flags & SHM_SHARE_MMU) 1172 incore = B_TRUE; 1173 1174 } else { 1175 return; 1176 } 1177 1178 /* 1179 * If there is a private amp, count anon pages that exist. If an 1180 * anon has a refcnt > 1 (COW sharing), then save the anon in a 1181 * hash so that it is not double counted. 1182 * 1183 * If there is also a shared object, then figure out the bounds 1184 * which are not mapped by the private amp. 1185 */ 1186 if (private_amp != NULL) { 1187 1188 /* Enter as writer to prevent COW anons from being freed */ 1189 ANON_LOCK_ENTER(&private_amp->a_rwlock, RW_WRITER); 1190 1191 p_index = p_start; 1192 s_index = s_start; 1193 1194 while (p_index <= p_end) { 1195 1196 pgcnt_t p_index_next; 1197 pgcnt_t p_bound_size; 1198 int cnt; 1199 anoff_t off; 1200 struct vnode *vn; 1201 struct anon *ap; 1202 page_t *page; /* For handling of large */ 1203 pgcnt_t pgcnt = 1; /* pages */ 1204 pgcnt_t pgstart; 1205 pgcnt_t pgend; 1206 uint_t pgshft; 1207 pgcnt_t pgmsk; 1208 1209 p_index_next = p_index; 1210 ap = anon_get_next_ptr(private_amp->ahp, 1211 &p_index_next); 1212 1213 /* 1214 * If next anon is past end of mapping, simulate 1215 * end of anon so loop terminates. 1216 */ 1217 if (p_index_next > p_end) { 1218 p_index_next = p_end + 1; 1219 ap = NULL; 1220 } 1221 /* 1222 * For COW segments, keep track of bounds not 1223 * backed by private amp so they can be looked 1224 * up in the backing vnode 1225 */ 1226 if (p_index_next != p_index) { 1227 1228 /* 1229 * Compute index difference between anon and 1230 * previous anon. 1231 */ 1232 p_bound_size = p_index_next - p_index - 1; 1233 1234 if (shared_object != NULL) { 1235 cur = vmu_alloc_bound(); 1236 cur->vmb_start = s_index; 1237 cur->vmb_end = s_index + p_bound_size; 1238 cur->vmb_type = VMUSAGE_BOUND_UNKNOWN; 1239 if (first == NULL) { 1240 first = cur; 1241 last = cur; 1242 } else { 1243 last->vmb_next = cur; 1244 last = cur; 1245 } 1246 } 1247 p_index = p_index + p_bound_size + 1; 1248 s_index = s_index + p_bound_size + 1; 1249 } 1250 1251 /* Detect end of anons in amp */ 1252 if (ap == NULL) 1253 break; 1254 1255 cnt = ap->an_refcnt; 1256 swap_xlate(ap, &vn, &off); 1257 1258 if (vn == NULL || vn->v_pages == NULL || 1259 (page = page_exists(vn, off)) == NULL) { 1260 p_index++; 1261 s_index++; 1262 continue; 1263 } 1264 1265 /* 1266 * If large page is found, compute portion of large 1267 * page in mapping, and increment indicies to the next 1268 * large page. 1269 */ 1270 if (page->p_szc > 0) { 1271 1272 pgcnt = page_get_pagecnt(page->p_szc); 1273 pgshft = page_get_shift(page->p_szc); 1274 pgmsk = (0x1 << (pgshft - PAGESHIFT)) - 1; 1275 1276 /* First page in large page */ 1277 pgstart = p_index & ~pgmsk; 1278 /* Last page in large page */ 1279 pgend = pgstart + pgcnt - 1; 1280 /* 1281 * Artifically end page if page extends past 1282 * end of mapping. 1283 */ 1284 if (pgend > p_end) 1285 pgend = p_end; 1286 1287 /* 1288 * Compute number of pages from large page 1289 * which are mapped. 1290 */ 1291 pgcnt = pgend - p_index + 1; 1292 1293 /* 1294 * Point indicies at page after large page, 1295 * or at page after end of mapping. 1296 */ 1297 p_index += pgcnt; 1298 s_index += pgcnt; 1299 } else { 1300 p_index++; 1301 s_index++; 1302 } 1303 1304 /* 1305 * Assume anon structs with a refcnt 1306 * of 1 are not COW shared, so there 1307 * is no reason to track them per entity. 1308 */ 1309 if (cnt == 1) { 1310 panon += pgcnt; 1311 continue; 1312 } 1313 for (entity = vmu_entities; entity != NULL; 1314 entity = entity->vme_next_calc) { 1315 1316 result = &entity->vme_result; 1317 /* 1318 * Track COW anons per entity so 1319 * they are not double counted. 1320 */ 1321 if (vmu_find_insert_anon(entity->vme_anon_hash, 1322 (caddr_t)ap) == 0) 1323 continue; 1324 1325 result->vmu_rss_all += (pgcnt << PAGESHIFT); 1326 result->vmu_rss_private += 1327 (pgcnt << PAGESHIFT); 1328 } 1329 } 1330 ANON_LOCK_EXIT(&private_amp->a_rwlock); 1331 } 1332 1333 /* Add up resident anon and swap reserved for private mappings */ 1334 if (swresv > 0 || panon > 0) { 1335 for (entity = vmu_entities; entity != NULL; 1336 entity = entity->vme_next_calc) { 1337 result = &entity->vme_result; 1338 result->vmu_swap_all += swresv; 1339 result->vmu_swap_private += swresv; 1340 result->vmu_rss_all += (panon << PAGESHIFT); 1341 result->vmu_rss_private += (panon << PAGESHIFT); 1342 } 1343 } 1344 1345 /* Compute resident pages backing shared amp or named vnode */ 1346 if (shared_object != NULL) { 1347 if (first == NULL) { 1348 /* 1349 * No private amp, or private amp has no anon 1350 * structs. This means entire segment is backed by 1351 * the shared object. 1352 */ 1353 first = vmu_alloc_bound(); 1354 first->vmb_start = s_start; 1355 first->vmb_end = s_end; 1356 first->vmb_type = VMUSAGE_BOUND_UNKNOWN; 1357 } 1358 /* 1359 * Iterate bounds not backed by private amp, and compute 1360 * resident pages. 1361 */ 1362 cur = first; 1363 while (cur != NULL) { 1364 1365 if (vmu_insert_lookup_object_bounds(shared_object, 1366 cur->vmb_start, cur->vmb_end, VMUSAGE_BOUND_UNKNOWN, 1367 &first, &last) > 0) { 1368 /* new bounds, find incore/not-incore */ 1369 tree = &(shared_object->vmo_bounds); 1370 if (shared_object->vmo_type == 1371 VMUSAGE_TYPE_VNODE) { 1372 vmu_vnode_update_incore_bounds( 1373 tree, 1374 (vnode_t *) 1375 shared_object->vmo_key, &first, 1376 &last); 1377 } else { 1378 vmu_amp_update_incore_bounds( 1379 tree, 1380 (struct anon_map *) 1381 shared_object->vmo_key, &first, 1382 &last, incore); 1383 } 1384 vmu_merge_bounds(tree, &first, &last); 1385 } 1386 for (entity = vmu_entities; entity != NULL; 1387 entity = entity->vme_next_calc) { 1388 avl_tree_t *e_tree; 1389 1390 result = &entity->vme_result; 1391 1392 entity_object = vmu_find_insert_object( 1393 shared_object->vmo_type == 1394 VMUSAGE_TYPE_VNODE ? entity->vme_vnode_hash: 1395 entity->vme_amp_hash, 1396 shared_object->vmo_key, 1397 shared_object->vmo_type); 1398 1399 virt = vmu_insert_lookup_object_bounds( 1400 entity_object, cur->vmb_start, cur->vmb_end, 1401 VMUSAGE_BOUND_UNKNOWN, &e_first, &e_last); 1402 1403 if (virt == 0) 1404 continue; 1405 /* 1406 * Range visited for this entity 1407 */ 1408 e_tree = &(entity_object->vmo_bounds); 1409 rss = vmu_update_bounds(e_tree, &e_first, 1410 &e_last, tree, first, last); 1411 result->vmu_rss_all += (rss << PAGESHIFT); 1412 if (shared == B_TRUE && file == B_FALSE) { 1413 /* shared anon mapping */ 1414 result->vmu_swap_all += 1415 (virt << PAGESHIFT); 1416 result->vmu_swap_shared += 1417 (virt << PAGESHIFT); 1418 result->vmu_rss_shared += 1419 (rss << PAGESHIFT); 1420 } else if (shared == B_TRUE && file == B_TRUE) { 1421 /* shared file mapping */ 1422 result->vmu_rss_shared += 1423 (rss << PAGESHIFT); 1424 } else if (shared == B_FALSE && 1425 file == B_TRUE) { 1426 /* private file mapping */ 1427 result->vmu_rss_private += 1428 (rss << PAGESHIFT); 1429 } 1430 vmu_merge_bounds(e_tree, &e_first, &e_last); 1431 } 1432 tmp = cur; 1433 cur = cur->vmb_next; 1434 vmu_free_bound(tmp); 1435 } 1436 } 1437 } 1438 1439 /* 1440 * Based on the current calculation flags, find the relevant entities 1441 * which are relative to the process. Then calculate each segment 1442 * in the process'es address space for each relevant entity. 1443 */ 1444 static void 1445 vmu_calculate_proc(proc_t *p) 1446 { 1447 vmu_entity_t *entities = NULL; 1448 vmu_zone_t *zone; 1449 vmu_entity_t *tmp; 1450 struct as *as; 1451 struct seg *seg; 1452 int ret; 1453 1454 /* Figure out which entities are being computed */ 1455 if ((vmu_data.vmu_system) != NULL) { 1456 tmp = vmu_data.vmu_system; 1457 tmp->vme_next_calc = entities; 1458 entities = tmp; 1459 } 1460 if (vmu_data.vmu_calc_flags & 1461 (VMUSAGE_ZONE | VMUSAGE_ALL_ZONES | VMUSAGE_PROJECTS | 1462 VMUSAGE_ALL_PROJECTS | VMUSAGE_TASKS | VMUSAGE_ALL_TASKS | 1463 VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS | VMUSAGE_EUSERS | 1464 VMUSAGE_ALL_EUSERS)) { 1465 ret = i_mod_hash_find_nosync(vmu_data.vmu_zones_hash, 1466 (mod_hash_key_t)(uintptr_t)p->p_zone->zone_id, 1467 (mod_hash_val_t *)&zone); 1468 if (ret != 0) { 1469 zone = vmu_alloc_zone(p->p_zone->zone_id); 1470 ret = i_mod_hash_insert_nosync(vmu_data.vmu_zones_hash, 1471 (mod_hash_key_t)(uintptr_t)p->p_zone->zone_id, 1472 (mod_hash_val_t)zone, (mod_hash_hndl_t)0); 1473 ASSERT(ret == 0); 1474 } 1475 if (zone->vmz_zone != NULL) { 1476 tmp = zone->vmz_zone; 1477 tmp->vme_next_calc = entities; 1478 entities = tmp; 1479 } 1480 if (vmu_data.vmu_calc_flags & 1481 (VMUSAGE_PROJECTS | VMUSAGE_ALL_PROJECTS)) { 1482 tmp = vmu_find_insert_entity(zone->vmz_projects_hash, 1483 p->p_task->tk_proj->kpj_id, VMUSAGE_PROJECTS, 1484 zone->vmz_id); 1485 tmp->vme_next_calc = entities; 1486 entities = tmp; 1487 } 1488 if (vmu_data.vmu_calc_flags & 1489 (VMUSAGE_TASKS | VMUSAGE_ALL_TASKS)) { 1490 tmp = vmu_find_insert_entity(zone->vmz_tasks_hash, 1491 p->p_task->tk_tkid, VMUSAGE_TASKS, zone->vmz_id); 1492 tmp->vme_next_calc = entities; 1493 entities = tmp; 1494 } 1495 if (vmu_data.vmu_calc_flags & 1496 (VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS)) { 1497 tmp = vmu_find_insert_entity(zone->vmz_rusers_hash, 1498 crgetruid(p->p_cred), VMUSAGE_RUSERS, zone->vmz_id); 1499 tmp->vme_next_calc = entities; 1500 entities = tmp; 1501 } 1502 if (vmu_data.vmu_calc_flags & 1503 (VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS)) { 1504 tmp = vmu_find_insert_entity(zone->vmz_eusers_hash, 1505 crgetuid(p->p_cred), VMUSAGE_EUSERS, zone->vmz_id); 1506 tmp->vme_next_calc = entities; 1507 entities = tmp; 1508 } 1509 } 1510 /* Entities which collapse projects and users for all zones */ 1511 if (vmu_data.vmu_calc_flags & VMUSAGE_COL_PROJECTS) { 1512 tmp = vmu_find_insert_entity(vmu_data.vmu_projects_col_hash, 1513 p->p_task->tk_proj->kpj_id, VMUSAGE_PROJECTS, ALL_ZONES); 1514 tmp->vme_next_calc = entities; 1515 entities = tmp; 1516 } 1517 if (vmu_data.vmu_calc_flags & VMUSAGE_COL_RUSERS) { 1518 tmp = vmu_find_insert_entity(vmu_data.vmu_rusers_col_hash, 1519 crgetruid(p->p_cred), VMUSAGE_RUSERS, ALL_ZONES); 1520 tmp->vme_next_calc = entities; 1521 entities = tmp; 1522 } 1523 if (vmu_data.vmu_calc_flags & VMUSAGE_COL_EUSERS) { 1524 tmp = vmu_find_insert_entity(vmu_data.vmu_eusers_col_hash, 1525 crgetuid(p->p_cred), VMUSAGE_EUSERS, ALL_ZONES); 1526 tmp->vme_next_calc = entities; 1527 entities = tmp; 1528 } 1529 1530 ASSERT(entities != NULL); 1531 /* process all segs in process's address space */ 1532 as = p->p_as; 1533 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 1534 for (seg = AS_SEGFIRST(as); seg != NULL; 1535 seg = AS_SEGNEXT(as, seg)) { 1536 vmu_calculate_seg(entities, seg); 1537 } 1538 AS_LOCK_EXIT(as, &as->a_lock); 1539 } 1540 1541 /* 1542 * Free data created by previous call to vmu_calculate(). 1543 */ 1544 static void 1545 vmu_clear_calc() 1546 { 1547 if (vmu_data.vmu_system != NULL) 1548 vmu_free_entity(vmu_data.vmu_system); 1549 vmu_data.vmu_system = NULL; 1550 if (vmu_data.vmu_zones_hash != NULL) 1551 i_mod_hash_clear_nosync(vmu_data.vmu_zones_hash); 1552 if (vmu_data.vmu_projects_col_hash != NULL) 1553 i_mod_hash_clear_nosync(vmu_data.vmu_projects_col_hash); 1554 if (vmu_data.vmu_rusers_col_hash != NULL) 1555 i_mod_hash_clear_nosync(vmu_data.vmu_rusers_col_hash); 1556 if (vmu_data.vmu_eusers_col_hash != NULL) 1557 i_mod_hash_clear_nosync(vmu_data.vmu_eusers_col_hash); 1558 1559 i_mod_hash_clear_nosync(vmu_data.vmu_all_vnodes_hash); 1560 i_mod_hash_clear_nosync(vmu_data.vmu_all_amps_hash); 1561 } 1562 1563 /* 1564 * Free unused data structures. These can result if the system workload 1565 * decreases between calculations. 1566 */ 1567 static void 1568 vmu_free_extra() 1569 { 1570 vmu_bound_t *tb; 1571 vmu_object_t *to; 1572 vmu_entity_t *te; 1573 vmu_zone_t *tz; 1574 1575 while (vmu_data.vmu_free_bounds != NULL) { 1576 tb = vmu_data.vmu_free_bounds; 1577 vmu_data.vmu_free_bounds = vmu_data.vmu_free_bounds->vmb_next; 1578 kmem_cache_free(vmu_bound_cache, tb); 1579 } 1580 while (vmu_data.vmu_free_objects != NULL) { 1581 to = vmu_data.vmu_free_objects; 1582 vmu_data.vmu_free_objects = 1583 vmu_data.vmu_free_objects->vmo_next; 1584 kmem_cache_free(vmu_object_cache, to); 1585 } 1586 while (vmu_data.vmu_free_entities != NULL) { 1587 te = vmu_data.vmu_free_entities; 1588 vmu_data.vmu_free_entities = 1589 vmu_data.vmu_free_entities->vme_next; 1590 if (te->vme_vnode_hash != NULL) 1591 mod_hash_destroy_hash(te->vme_vnode_hash); 1592 if (te->vme_amp_hash != NULL) 1593 mod_hash_destroy_hash(te->vme_amp_hash); 1594 if (te->vme_anon_hash != NULL) 1595 mod_hash_destroy_hash(te->vme_anon_hash); 1596 kmem_free(te, sizeof (vmu_entity_t)); 1597 } 1598 while (vmu_data.vmu_free_zones != NULL) { 1599 tz = vmu_data.vmu_free_zones; 1600 vmu_data.vmu_free_zones = 1601 vmu_data.vmu_free_zones->vmz_next; 1602 if (tz->vmz_projects_hash != NULL) 1603 mod_hash_destroy_hash(tz->vmz_projects_hash); 1604 if (tz->vmz_tasks_hash != NULL) 1605 mod_hash_destroy_hash(tz->vmz_tasks_hash); 1606 if (tz->vmz_rusers_hash != NULL) 1607 mod_hash_destroy_hash(tz->vmz_rusers_hash); 1608 if (tz->vmz_eusers_hash != NULL) 1609 mod_hash_destroy_hash(tz->vmz_eusers_hash); 1610 kmem_free(tz, sizeof (vmu_zone_t)); 1611 } 1612 } 1613 1614 extern kcondvar_t *pr_pid_cv; 1615 1616 /* 1617 * Determine which entity types are relevant and allocate the hashes to 1618 * track them. Then walk the process table and count rss and swap 1619 * for each process'es address space. Address space object such as 1620 * vnodes, amps and anons are tracked per entity, so that they are 1621 * not double counted in the results. 1622 * 1623 */ 1624 static void 1625 vmu_calculate() 1626 { 1627 int i = 0; 1628 int ret; 1629 proc_t *p; 1630 1631 vmu_clear_calc(); 1632 1633 if (vmu_data.vmu_calc_flags & VMUSAGE_SYSTEM) 1634 vmu_data.vmu_system = vmu_alloc_entity(0, VMUSAGE_SYSTEM, 1635 ALL_ZONES); 1636 1637 /* 1638 * Walk process table and calculate rss of each proc. 1639 * 1640 * Pidlock and p_lock cannot be held while doing the rss calculation. 1641 * This is because: 1642 * 1. The calculation allocates using KM_SLEEP. 1643 * 2. The calculation grabs a_lock, which cannot be grabbed 1644 * after p_lock. 1645 * 1646 * Since pidlock must be dropped, we cannot simply just walk the 1647 * practive list. Instead, we walk the process table, and sprlock 1648 * each process to ensure that it does not exit during the 1649 * calculation. 1650 */ 1651 1652 mutex_enter(&pidlock); 1653 for (i = 0; i < v.v_proc; i++) { 1654 again: 1655 p = pid_entry(i); 1656 if (p == NULL) 1657 continue; 1658 1659 mutex_enter(&p->p_lock); 1660 mutex_exit(&pidlock); 1661 1662 if (panicstr) { 1663 mutex_exit(&p->p_lock); 1664 return; 1665 } 1666 1667 /* Try to set P_PR_LOCK */ 1668 ret = sprtrylock_proc(p); 1669 if (ret == -1) { 1670 /* Process in invalid state */ 1671 mutex_exit(&p->p_lock); 1672 mutex_enter(&pidlock); 1673 continue; 1674 } else if (ret == 1) { 1675 /* 1676 * P_PR_LOCK is already set. Wait and try again. 1677 * This also drops p_lock. 1678 */ 1679 sprwaitlock_proc(p); 1680 mutex_enter(&pidlock); 1681 goto again; 1682 } 1683 mutex_exit(&p->p_lock); 1684 1685 vmu_calculate_proc(p); 1686 1687 mutex_enter(&p->p_lock); 1688 sprunlock(p); 1689 mutex_enter(&pidlock); 1690 } 1691 mutex_exit(&pidlock); 1692 1693 vmu_free_extra(); 1694 } 1695 1696 /* 1697 * allocate a new cache for N results satisfying flags 1698 */ 1699 vmu_cache_t * 1700 vmu_cache_alloc(size_t nres, uint_t flags) 1701 { 1702 vmu_cache_t *cache; 1703 1704 cache = kmem_zalloc(sizeof (vmu_cache_t), KM_SLEEP); 1705 cache->vmc_results = kmem_zalloc(sizeof (vmusage_t) * nres, KM_SLEEP); 1706 cache->vmc_nresults = nres; 1707 cache->vmc_flags = flags; 1708 cache->vmc_refcnt = 1; 1709 return (cache); 1710 } 1711 1712 /* 1713 * Make sure cached results are not freed 1714 */ 1715 static void 1716 vmu_cache_hold(vmu_cache_t *cache) 1717 { 1718 ASSERT(MUTEX_HELD(&vmu_data.vmu_lock)); 1719 cache->vmc_refcnt++; 1720 } 1721 1722 /* 1723 * free cache data 1724 */ 1725 static void 1726 vmu_cache_rele(vmu_cache_t *cache) 1727 { 1728 ASSERT(MUTEX_HELD(&vmu_data.vmu_lock)); 1729 ASSERT(cache->vmc_refcnt > 0); 1730 cache->vmc_refcnt--; 1731 if (cache->vmc_refcnt == 0) { 1732 kmem_free(cache->vmc_results, sizeof (vmusage_t) * 1733 cache->vmc_nresults); 1734 kmem_free(cache, sizeof (vmu_cache_t)); 1735 } 1736 } 1737 1738 /* 1739 * Copy out the cached results to a caller. Inspect the callers flags 1740 * and zone to determine which cached results should be copied. 1741 */ 1742 static int 1743 vmu_copyout_results(vmu_cache_t *cache, vmusage_t *buf, size_t *nres, 1744 uint_t flags, int cpflg) 1745 { 1746 vmusage_t *result, *out_result; 1747 vmusage_t dummy; 1748 size_t i, count = 0; 1749 size_t bufsize; 1750 int ret = 0; 1751 uint_t types = 0; 1752 1753 if (nres != NULL) { 1754 if (ddi_copyin((caddr_t)nres, &bufsize, sizeof (size_t), cpflg)) 1755 return (set_errno(EFAULT)); 1756 } else { 1757 bufsize = 0; 1758 } 1759 1760 /* figure out what results the caller is interested in. */ 1761 if ((flags & VMUSAGE_SYSTEM) && curproc->p_zone == global_zone) 1762 types |= VMUSAGE_SYSTEM; 1763 if (flags & (VMUSAGE_ZONE | VMUSAGE_ALL_ZONES)) 1764 types |= VMUSAGE_ZONE; 1765 if (flags & (VMUSAGE_PROJECTS | VMUSAGE_ALL_PROJECTS | 1766 VMUSAGE_COL_PROJECTS)) 1767 types |= VMUSAGE_PROJECTS; 1768 if (flags & (VMUSAGE_TASKS | VMUSAGE_ALL_TASKS)) 1769 types |= VMUSAGE_TASKS; 1770 if (flags & (VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS)) 1771 types |= VMUSAGE_RUSERS; 1772 if (flags & (VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS)) 1773 types |= VMUSAGE_EUSERS; 1774 1775 /* count results for current zone */ 1776 out_result = buf; 1777 for (result = cache->vmc_results, i = 0; 1778 i < cache->vmc_nresults; result++, i++) { 1779 1780 /* Do not return "other-zone" results to non-global zones */ 1781 if (curproc->p_zone != global_zone && 1782 curproc->p_zone->zone_id != result->vmu_zoneid) 1783 continue; 1784 1785 /* 1786 * If non-global zone requests VMUSAGE_SYSTEM, fake 1787 * up VMUSAGE_ZONE result as VMUSAGE_SYSTEM result. 1788 */ 1789 if (curproc->p_zone != global_zone && 1790 (flags & VMUSAGE_SYSTEM) != 0 && 1791 result->vmu_type == VMUSAGE_ZONE) { 1792 count++; 1793 if (out_result != NULL) { 1794 if (bufsize < count) { 1795 ret = set_errno(EOVERFLOW); 1796 } else { 1797 dummy = *result; 1798 dummy.vmu_zoneid = ALL_ZONES; 1799 dummy.vmu_id = 0; 1800 dummy.vmu_type = VMUSAGE_SYSTEM; 1801 if (ddi_copyout(&dummy, out_result, 1802 sizeof (vmusage_t), cpflg)) 1803 return (set_errno(EFAULT)); 1804 out_result++; 1805 } 1806 } 1807 } 1808 1809 /* Skip results that do not match requested type */ 1810 if ((result->vmu_type & types) == 0) 1811 continue; 1812 1813 /* Skip collated results if not requested */ 1814 if (result->vmu_zoneid == ALL_ZONES) { 1815 if (result->vmu_type == VMUSAGE_PROJECTS && 1816 (flags & VMUSAGE_COL_PROJECTS) == 0) 1817 continue; 1818 if (result->vmu_type == VMUSAGE_EUSERS && 1819 (flags & VMUSAGE_COL_EUSERS) == 0) 1820 continue; 1821 if (result->vmu_type == VMUSAGE_RUSERS && 1822 (flags & VMUSAGE_COL_RUSERS) == 0) 1823 continue; 1824 } 1825 1826 /* Skip "other zone" results if not requested */ 1827 if (result->vmu_zoneid != curproc->p_zone->zone_id) { 1828 if (result->vmu_type == VMUSAGE_ZONE && 1829 (flags & VMUSAGE_ALL_ZONES) == 0) 1830 continue; 1831 if (result->vmu_type == VMUSAGE_PROJECTS && 1832 (flags & (VMUSAGE_ALL_PROJECTS | 1833 VMUSAGE_COL_PROJECTS)) == 0) 1834 continue; 1835 if (result->vmu_type == VMUSAGE_TASKS && 1836 (flags & VMUSAGE_ALL_TASKS) == 0) 1837 continue; 1838 if (result->vmu_type == VMUSAGE_RUSERS && 1839 (flags & (VMUSAGE_ALL_RUSERS | 1840 VMUSAGE_COL_RUSERS)) == 0) 1841 continue; 1842 if (result->vmu_type == VMUSAGE_EUSERS && 1843 (flags & (VMUSAGE_ALL_EUSERS | 1844 VMUSAGE_COL_EUSERS)) == 0) 1845 continue; 1846 } 1847 count++; 1848 if (out_result != NULL) { 1849 if (bufsize < count) { 1850 ret = set_errno(EOVERFLOW); 1851 } else { 1852 if (ddi_copyout(result, out_result, 1853 sizeof (vmusage_t), cpflg)) 1854 return (set_errno(EFAULT)); 1855 out_result++; 1856 } 1857 } 1858 } 1859 if (nres != NULL) 1860 if (ddi_copyout(&count, (void *)nres, sizeof (size_t), cpflg)) 1861 return (set_errno(EFAULT)); 1862 1863 return (ret); 1864 } 1865 1866 /* 1867 * vm_getusage() 1868 * 1869 * Counts rss and swap by zone, project, task, and/or user. The flags argument 1870 * determines the type of results structures returned. Flags requesting 1871 * results from more than one zone are "flattened" to the local zone if the 1872 * caller is not the global zone. 1873 * 1874 * args: 1875 * flags: bitmap consisting of one or more of VMUSAGE_*. 1876 * age: maximum allowable age (time since counting was done) in 1877 * seconds of the results. Results from previous callers are 1878 * cached in kernel. 1879 * buf: pointer to buffer array of vmusage_t. If NULL, then only nres 1880 * set on success. 1881 * nres: Set to number of vmusage_t structures pointed to by buf 1882 * before calling vm_getusage(). 1883 * On return 0 (success) or ENOSPC, is set to the number of result 1884 * structures returned or attempted to return. 1885 * 1886 * returns 0 on success, -1 on failure: 1887 * EINTR (interrupted) 1888 * ENOSPC (nres to small for results, nres set to needed value for success) 1889 * EINVAL (flags invalid) 1890 * EFAULT (bad address for buf or nres) 1891 */ 1892 int 1893 vm_getusage(uint_t flags, time_t age, vmusage_t *buf, size_t *nres, int cpflg) 1894 { 1895 vmu_entity_t *entity; 1896 vmusage_t *result; 1897 int ret = 0; 1898 int cacherecent = 0; 1899 hrtime_t now; 1900 uint_t flags_orig; 1901 1902 /* 1903 * Non-global zones cannot request system wide and/or collated 1904 * results, or the system result, so munge the flags accordingly. 1905 */ 1906 flags_orig = flags; 1907 if (curproc->p_zone != global_zone) { 1908 if (flags & (VMUSAGE_ALL_PROJECTS | VMUSAGE_COL_PROJECTS)) { 1909 flags &= ~(VMUSAGE_ALL_PROJECTS | VMUSAGE_COL_PROJECTS); 1910 flags |= VMUSAGE_PROJECTS; 1911 } 1912 if (flags & (VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS)) { 1913 flags &= ~(VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS); 1914 flags |= VMUSAGE_RUSERS; 1915 } 1916 if (flags & (VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS)) { 1917 flags &= ~(VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS); 1918 flags |= VMUSAGE_EUSERS; 1919 } 1920 if (flags & VMUSAGE_SYSTEM) { 1921 flags &= ~VMUSAGE_SYSTEM; 1922 flags |= VMUSAGE_ZONE; 1923 } 1924 } 1925 1926 /* Check for unknown flags */ 1927 if ((flags & (~VMUSAGE_MASK)) != 0) 1928 return (set_errno(EINVAL)); 1929 1930 /* Check for no flags */ 1931 if ((flags & VMUSAGE_MASK) == 0) 1932 return (set_errno(EINVAL)); 1933 1934 mutex_enter(&vmu_data.vmu_lock); 1935 now = gethrtime(); 1936 1937 start: 1938 if (vmu_data.vmu_cache != NULL) { 1939 1940 vmu_cache_t *cache; 1941 1942 if ((vmu_data.vmu_cache->vmc_timestamp + 1943 ((hrtime_t)age * NANOSEC)) > now) 1944 cacherecent = 1; 1945 1946 if ((vmu_data.vmu_cache->vmc_flags & flags) == flags && 1947 cacherecent == 1) { 1948 cache = vmu_data.vmu_cache; 1949 vmu_cache_hold(cache); 1950 mutex_exit(&vmu_data.vmu_lock); 1951 1952 ret = vmu_copyout_results(cache, buf, nres, flags_orig, 1953 cpflg); 1954 mutex_enter(&vmu_data.vmu_lock); 1955 vmu_cache_rele(cache); 1956 if (vmu_data.vmu_pending_waiters > 0) 1957 cv_broadcast(&vmu_data.vmu_cv); 1958 mutex_exit(&vmu_data.vmu_lock); 1959 return (ret); 1960 } 1961 /* 1962 * If the cache is recent, it is likely that there are other 1963 * consumers of vm_getusage running, so add their flags to the 1964 * desired flags for the calculation. 1965 */ 1966 if (cacherecent == 1) 1967 flags = vmu_data.vmu_cache->vmc_flags | flags; 1968 } 1969 if (vmu_data.vmu_calc_thread == NULL) { 1970 1971 vmu_cache_t *cache; 1972 1973 vmu_data.vmu_calc_thread = curthread; 1974 vmu_data.vmu_calc_flags = flags; 1975 vmu_data.vmu_entities = NULL; 1976 vmu_data.vmu_nentities = 0; 1977 if (vmu_data.vmu_pending_waiters > 0) 1978 vmu_data.vmu_calc_flags |= 1979 vmu_data.vmu_pending_flags; 1980 1981 vmu_data.vmu_pending_flags = 0; 1982 mutex_exit(&vmu_data.vmu_lock); 1983 vmu_calculate(); 1984 mutex_enter(&vmu_data.vmu_lock); 1985 /* copy results to cache */ 1986 if (vmu_data.vmu_cache != NULL) 1987 vmu_cache_rele(vmu_data.vmu_cache); 1988 cache = vmu_data.vmu_cache = 1989 vmu_cache_alloc(vmu_data.vmu_nentities, 1990 vmu_data.vmu_calc_flags); 1991 1992 result = cache->vmc_results; 1993 for (entity = vmu_data.vmu_entities; entity != NULL; 1994 entity = entity->vme_next) { 1995 *result = entity->vme_result; 1996 result++; 1997 } 1998 cache->vmc_timestamp = gethrtime(); 1999 vmu_cache_hold(cache); 2000 2001 vmu_data.vmu_calc_flags = 0; 2002 vmu_data.vmu_calc_thread = NULL; 2003 2004 if (vmu_data.vmu_pending_waiters > 0) 2005 cv_broadcast(&vmu_data.vmu_cv); 2006 2007 mutex_exit(&vmu_data.vmu_lock); 2008 2009 /* copy cache */ 2010 ret = vmu_copyout_results(cache, buf, nres, flags_orig, cpflg); 2011 mutex_enter(&vmu_data.vmu_lock); 2012 vmu_cache_rele(cache); 2013 mutex_exit(&vmu_data.vmu_lock); 2014 2015 return (ret); 2016 } 2017 vmu_data.vmu_pending_flags |= flags; 2018 vmu_data.vmu_pending_waiters++; 2019 while (vmu_data.vmu_calc_thread != NULL) { 2020 if (cv_wait_sig(&vmu_data.vmu_cv, 2021 &vmu_data.vmu_lock) == 0) { 2022 vmu_data.vmu_pending_waiters--; 2023 mutex_exit(&vmu_data.vmu_lock); 2024 return (set_errno(EINTR)); 2025 } 2026 } 2027 vmu_data.vmu_pending_waiters--; 2028 goto start; 2029 } 2030