Lines Matching defs:vmp

268  * Get a vmem_seg_t from vmp's segfree list.
271 vmem_getseg(vmem_t *vmp)
275 ASSERT(vmp->vm_nsegfree > 0);
277 vsp = vmp->vm_segfree;
278 vmp->vm_segfree = vsp->vs_knext;
279 vmp->vm_nsegfree--;
285 * Put a vmem_seg_t on vmp's segfree list.
288 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
290 vsp->vs_knext = vmp->vm_segfree;
291 vmp->vm_segfree = vsp;
292 vmp->vm_nsegfree++;
299 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
303 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
305 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
307 vmp->vm_freemap |= VS_SIZE(vprev);
310 (void) cond_broadcast(&vmp->vm_cv);
317 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
319 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
327 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
328 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
337 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
342 bucket = VMEM_HASH(vmp, vsp->vs_start);
355 vmp->vm_kstat.vk_alloc++;
356 vmp->vm_kstat.vk_mem_inuse += VS_SIZE(vsp);
363 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
367 prev_vspp = VMEM_HASH(vmp, addr);
373 vmp->vm_kstat.vk_lookup++;
379 vmp, addr, size);
383 "(expect %lu)", vmp, addr, size, VS_SIZE(vsp));
386 vmp->vm_kstat.vk_free++;
387 vmp->vm_kstat.vk_mem_inuse -= size;
396 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
398 vmem_seg_t *newseg = vmem_getseg(vmp);
414 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
419 vmem_putseg(vmp, vsp);
423 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
426 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
433 knext = &vmp->vm_seg0;
434 if (!import && vmp->vm_source_alloc == NULL) {
443 kend = &vmp->vm_seg0;
452 ASSERT(MUTEX_HELD(&vmp->vm_lock));
454 if ((start | end) & (vmp->vm_quantum - 1)) {
456 vmp, vaddr, size);
459 span = vmem_seg_create(vmp, knext->vs_aprev, start, end);
463 newseg = vmem_seg_create(vmp, span, start, end);
464 vmem_freelist_insert(vmp, newseg);
468 vmp->vm_kstat.vk_mem_import += size;
469 vmp->vm_kstat.vk_mem_total += size;
475 * Remove span vsp from vmp and update kstats.
478 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
483 ASSERT(MUTEX_HELD(&vmp->vm_lock));
487 vmp->vm_kstat.vk_mem_import -= size;
488 vmp->vm_kstat.vk_mem_total -= size;
492 vmem_seg_destroy(vmp, vsp);
493 vmem_seg_destroy(vmp, span);
502 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
507 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
510 ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
511 ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
524 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
525 vmem_hash_insert(vmp, vsp);
529 vmem_freelist_delete(vmp, vsp);
532 vmem_freelist_insert(vmp,
533 vmem_seg_create(vmp, vsp, addr_end, vs_end));
536 vmem_freelist_insert(vmp,
537 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
542 vmem_hash_insert(vmp, vsp);
557 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
560 vmem_populate(vmem_t *vmp, int vmflag)
569 while (vmp->vm_nsegfree < VMEM_MINFREE &&
571 vmem_putseg(vmp, vsp);
573 if (vmp->vm_nsegfree >= VMEM_MINFREE)
580 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
584 (void) mutex_unlock(&vmp->vm_lock);
616 (void) mutex_lock(&vmp->vm_lock);
617 vmp->vm_kstat.vk_populate_fail++;
633 (void) mutex_lock(&vmp->vm_lock);
639 while (vmp->vm_nsegfree < VMEM_MINFREE)
640 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
653 * Note: may drop and reacquire vmp->vm_lock.
656 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
674 vmem_freelist_delete(vmp, vnext);
675 vmem_freelist_delete(vmp, vprev);
677 vmem_freelist_insert(vmp, vprev);
678 vmem_seg_destroy(vmp, vnext);
689 if (vsp != NULL && vsp->vs_import && vmp->vm_source_free != NULL &&
695 vmem_freelist_delete(vmp, vsp);
696 vmem_span_destroy(vmp, vsp);
697 (void) mutex_unlock(&vmp->vm_lock);
698 vmp->vm_source_free(vmp->vm_source, vaddr, size);
699 (void) mutex_lock(&vmp->vm_lock);
711 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
715 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
718 (void) mutex_lock(&vmp->vm_lock);
720 if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
721 (void) mutex_unlock(&vmp->vm_lock);
735 rotor = &vmp->vm_rotor;
742 vmem_hash_insert(vmp,
743 vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
744 (void) mutex_unlock(&vmp->vm_lock);
753 vmp->vm_kstat.vk_search++;
768 vmem_advance(vmp, rotor, rotor->vs_anext);
777 if (vmp->vm_source_alloc != NULL ||
779 (void) mutex_unlock(&vmp->vm_lock);
780 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
783 vmp->vm_kstat.vk_wait++;
786 (void) cond_wait(&vmp->vm_cv, &vmp->vm_lock);
796 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
804 vmem_advance(vmp, rotor, vsp);
805 (void) mutex_unlock(&vmp->vm_lock);
815 vmem_xalloc(vmem_t *vmp, size_t size, size_t align, size_t phase,
828 (void *)vmp, size, align, phase, nocross,
832 align = vmp->vm_quantum;
834 if ((align | phase | nocross) & (vmp->vm_quantum - 1)) {
837 (void *)vmp, size, align, phase, nocross,
845 (void *)vmp, size, align, phase, nocross,
849 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
853 (void) mutex_lock(&vmp->vm_lock);
857 if (vmp->vm_nsegfree < VMEM_MINFREE &&
858 !vmem_populate(vmp, vmflag))
880 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
883 if ((vmp->vm_freemap >> hb) == 0 ||
887 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
891 vmp->vm_freelist[flist - 1].vs_knext;
893 vmp->vm_kstat.vk_search++;
905 flist = lowbit(P2ALIGN(vmp->vm_freemap,
909 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
935 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
938 MAX(align, vmp->vm_source->vm_quantum));
940 (void) mutex_unlock(&vmp->vm_lock);
957 ASSERT(vmp->vm_nsegfree >= resv);
958 vmp->vm_nsegfree -= resv; /* reserve our segs */
959 (void) mutex_unlock(&vmp->vm_lock);
960 vaddr = vmp->vm_source_alloc(vmp->vm_source, asize,
962 (void) mutex_lock(&vmp->vm_lock);
963 vmp->vm_nsegfree += resv; /* claim reservation */
965 vbest = vmem_span_create(vmp, vaddr, asize, 1);
970 (void) mutex_unlock(&vmp->vm_lock);
972 (void) mutex_lock(&vmp->vm_lock);
975 vmp->vm_kstat.vk_wait++;
978 (void) cond_wait(&vmp->vm_cv, &vmp->vm_lock);
984 (void) vmem_seg_alloc(vmp, vbest, addr, size);
985 (void) mutex_unlock(&vmp->vm_lock);
992 vmp->vm_kstat.vk_fail++;
993 (void) mutex_unlock(&vmp->vm_lock);
997 (void *)vmp, size, align, phase, nocross,
1008 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1012 (void) mutex_lock(&vmp->vm_lock);
1014 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1015 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1023 vmem_freelist_delete(vmp, vnext);
1025 vmem_seg_destroy(vmp, vnext);
1034 vmem_freelist_delete(vmp, vprev);
1036 vmem_seg_destroy(vmp, vsp);
1043 if (vsp->vs_import && vmp->vm_source_free != NULL &&
1049 vmem_span_destroy(vmp, vsp);
1050 (void) mutex_unlock(&vmp->vm_lock);
1051 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1053 vmem_freelist_insert(vmp, vsp);
1054 (void) mutex_unlock(&vmp->vm_lock);
1059 * Allocate size bytes from arena vmp. Returns the allocated address
1066 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1075 if (size - 1 < vmp->vm_qcache_max) {
1077 return (_umem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1078 vmp->vm_qshift], UMEM_DEFAULT));
1081 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1086 return (vmem_nextfit_alloc(vmp, size, vmflag));
1089 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1095 (void) mutex_lock(&vmp->vm_lock);
1097 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1099 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1101 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1105 (void) mutex_unlock(&vmp->vm_lock);
1106 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1111 vsp = vmp->vm_freelist[flist].vs_knext;
1113 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1114 (void) mutex_unlock(&vmp->vm_lock);
1122 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1124 if (size - 1 < vmp->vm_qcache_max)
1125 _umem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1128 vmem_xfree(vmp, vaddr, size);
1132 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1135 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1140 vmem_seg_t *seg0 = &vmp->vm_seg0;
1142 (void) mutex_lock(&vmp->vm_lock);
1143 vmp->vm_kstat.vk_contains++;
1145 vmp->vm_kstat.vk_contains_search++;
1150 (void) mutex_unlock(&vmp->vm_lock);
1155 * Add the span [vaddr, vaddr + size) to arena vmp.
1158 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1162 vmp, vaddr, size);
1165 ASSERT(!vmem_contains(vmp, vaddr, size));
1167 (void) mutex_lock(&vmp->vm_lock);
1168 if (vmem_populate(vmp, vmflag))
1169 (void) vmem_span_create(vmp, vaddr, size, 0);
1172 (void) cond_broadcast(&vmp->vm_cv);
1173 (void) mutex_unlock(&vmp->vm_lock);
1178 * Adds the address range [addr, endaddr) to arena vmp, by either:
1185 * Called with vmp->vm_lock held, and a successful vmem_populate() completed.
1193 vmem_extend_unlocked(vmem_t *vmp, uintptr_t addr, uintptr_t endaddr)
1198 vmem_seg_t *end = &vmp->vm_seg0;
1200 ASSERT(MUTEX_HELD(&vmp->vm_lock));
1211 return (vmem_span_create(vmp, (void *)addr, endaddr - addr, 0));
1237 vmem_freelist_delete(vmp, prevseg);
1240 vmem_freelist_delete(vmp, nextseg);
1242 vmem_seg_destroy(vmp, nextseg);
1243 vmem_seg_destroy(vmp, span);
1253 vmem_seg_destroy(vmp, span);
1255 vmem_freelist_delete(vmp, prevseg);
1266 vmem_seg_destroy(vmp, span);
1268 vmem_freelist_delete(vmp, nextseg);
1289 vmem_freelist_delete(vmp, oldseg);
1293 vsp = vmem_seg_create(vmp, oldseg, addr, endaddr);
1302 vmem_freelist_delete(vmp, oldseg);
1306 vsp = vmem_seg_create(vmp, span, addr, endaddr);
1308 vmem_freelist_insert(vmp, vsp);
1309 vmp->vm_kstat.vk_mem_total += (endaddr - addr);
1315 * [vaddr, vaddr+size) to vmp, then allocates alloc bytes from the
1319 _vmem_extend_alloc(vmem_t *vmp, void *vaddr, size_t size, size_t alloc,
1328 ASSERT(((addr | size | alloc) & (vmp->vm_quantum - 1)) == 0);
1330 ASSERT(!vmem_contains(vmp, vaddr, size));
1332 (void) mutex_lock(&vmp->vm_lock);
1333 if (!vmem_populate(vmp, vmflag)) {
1334 (void) mutex_unlock(&vmp->vm_lock);
1340 if (vmp->vm_source_alloc != NULL)
1341 vsp = vmem_span_create(vmp, vaddr, size, 0);
1343 vsp = vmem_extend_unlocked(vmp, addr, endaddr);
1348 (void) vmem_seg_alloc(vmp, vsp, addr, alloc);
1351 (void) cond_broadcast(&vmp->vm_cv);
1352 (void) mutex_unlock(&vmp->vm_lock);
1358 * Walk the vmp arena, applying func to each segment matching typemask.
1366 vmem_walk(vmem_t *vmp, int typemask,
1370 vmem_seg_t *seg0 = &vmp->vm_seg0;
1379 (void) mutex_lock(&vmp->vm_lock);
1386 vmem_advance(vmp, &walker, vsp);
1387 (void) mutex_unlock(&vmp->vm_lock);
1389 (void) mutex_lock(&vmp->vm_lock);
1396 vmem_advance(vmp, &walker, NULL);
1397 (void) mutex_unlock(&vmp->vm_lock);
1408 vmem_size(vmem_t *vmp, int typemask)
1413 size += vmp->vm_kstat.vk_mem_inuse;
1415 size += vmp->vm_kstat.vk_mem_total -
1416 vmp->vm_kstat.vk_mem_inuse;
1436 vmem_t *vmp, *cur, **vmpp;
1442 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1446 vmp = &vmem0[id - 1];
1449 if (vmp == NULL)
1451 bzero(vmp, sizeof (vmem_t));
1453 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1454 (void) mutex_init(&vmp->vm_lock, USYNC_THREAD, NULL);
1455 (void) cond_init(&vmp->vm_cv, USYNC_THREAD, NULL);
1456 vmp->vm_cflags = vmflag;
1459 vmp->vm_quantum = quantum;
1460 vmp->vm_qshift = highbit(quantum) - 1;
1461 nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1464 vfp = &vmp->vm_freelist[i];
1470 vmp->vm_freelist[0].vs_kprev = NULL;
1471 vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1472 vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1473 vmp->vm_hash_table = vmp->vm_hash0;
1474 vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1475 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1477 vsp = &vmp->vm_seg0;
1484 vsp = &vmp->vm_rotor;
1486 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1488 vmp->vm_id = id;
1490 vmp->vm_kstat.vk_source_id = source->vm_id;
1491 vmp->vm_source = source;
1492 vmp->vm_source_alloc = afunc;
1493 vmp->vm_source_free = ffunc;
1496 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1500 vmp->vm_name, (long)((i + 1) * quantum));
1501 vmp->vm_qcache[i] = umem_cache_create(buf,
1503 NULL, vmp, UMC_QCACHE | UMC_NOTOUCH);
1504 if (vmp->vm_qcache[i] == NULL) {
1505 vmp->vm_qcache_max = i * quantum;
1515 *vmpp = vmp;
1518 if (vmp->vm_cflags & VMC_POPULATOR) {
1521 vmem_populator[pop_id - 1] = vmp;
1522 (void) mutex_lock(&vmp->vm_lock);
1523 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1524 (void) mutex_unlock(&vmp->vm_lock);
1527 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1528 vmem_destroy(vmp);
1532 return (vmp);
1536 * Destroy arena vmp.
1539 vmem_destroy(vmem_t *vmp)
1542 vmem_seg_t *seg0 = &vmp->vm_seg0;
1549 while ((cur = *vmpp) != vmp)
1551 *vmpp = vmp->vm_next;
1555 if (vmp->vm_qcache[i])
1556 umem_cache_destroy(vmp->vm_qcache[i]);
1558 leaked = vmem_size(vmp, VMEM_ALLOC);
1561 vmp->vm_name, leaked);
1563 if (vmp->vm_hash_table != vmp->vm_hash0)
1564 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1565 (vmp->vm_hash_mask + 1) * sizeof (void *));
1571 VMEM_DELETE(&vmp->vm_rotor, a);
1575 while (vmp->vm_nsegfree > 0)
1576 vmem_putseg_global(vmem_getseg(vmp));
1578 (void) mutex_destroy(&vmp->vm_lock);
1579 (void) cond_destroy(&vmp->vm_cv);
1580 vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1584 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1587 vmem_hash_rescale(vmem_t *vmp)
1592 nseg = (size_t)(vmp->vm_kstat.vk_alloc - vmp->vm_kstat.vk_free);
1595 old_size = vmp->vm_hash_mask + 1;
1606 (void) mutex_lock(&vmp->vm_lock);
1608 old_size = vmp->vm_hash_mask + 1;
1609 old_table = vmp->vm_hash_table;
1611 vmp->vm_hash_mask = new_size - 1;
1612 vmp->vm_hash_table = new_table;
1613 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1620 vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1627 (void) mutex_unlock(&vmp->vm_lock);
1629 if (old_table != vmp->vm_hash0)
1641 vmem_t *vmp;
1644 for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1650 (void) cond_broadcast(&vmp->vm_cv);
1655 vmem_hash_rescale(vmp);