Lines Matching defs:vmp

169  * of size >= 2^n reside in vmp->vm_freelist[n].  To ensure constant-time
180 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
398 * Get a vmem_seg_t from vmp's segfree list.
401 vmem_getseg(vmem_t *vmp)
405 ASSERT(vmp->vm_nsegfree > 0);
407 vsp = vmp->vm_segfree;
408 vmp->vm_segfree = vsp->vs_knext;
409 vmp->vm_nsegfree--;
415 * Put a vmem_seg_t on vmp's segfree list.
418 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
420 vsp->vs_knext = vmp->vm_segfree;
421 vmp->vm_segfree = vsp;
422 vmp->vm_nsegfree++;
429 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
433 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
435 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
437 vmp->vm_freemap |= VS_SIZE(vprev);
440 cv_broadcast(&vmp->vm_cv);
447 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
449 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
457 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
458 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
467 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
472 bucket = VMEM_HASH(vmp, vsp->vs_start);
485 vmp->vm_kstat.vk_alloc.value.ui64++;
486 vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
493 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
497 prev_vspp = VMEM_HASH(vmp, addr);
503 vmp->vm_kstat.vk_lookup.value.ui64++;
509 (void *)vmp, addr, size);
512 (void *)vmp, addr, size, VS_SIZE(vsp));
514 vmp->vm_kstat.vk_free.value.ui64++;
515 vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
524 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
526 vmem_seg_t *newseg = vmem_getseg(vmp);
542 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
547 vmem_putseg(vmp, vsp);
551 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
554 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
560 ASSERT(MUTEX_HELD(&vmp->vm_lock));
562 if ((start | end) & (vmp->vm_quantum - 1))
564 (void *)vmp, vaddr, size);
566 span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
569 VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
571 newseg = vmem_seg_create(vmp, span, start, end);
572 vmem_freelist_insert(vmp, newseg);
575 vmp->vm_kstat.vk_mem_import.value.ui64 += size;
576 vmp->vm_kstat.vk_mem_total.value.ui64 += size;
582 * Remove span vsp from vmp and update kstats.
585 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
590 ASSERT(MUTEX_HELD(&vmp->vm_lock));
594 vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
595 vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
599 vmem_seg_destroy(vmp, vsp);
600 vmem_seg_destroy(vmp, span);
609 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
614 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
617 ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
618 ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
631 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
632 vmem_hash_insert(vmp, vsp);
636 vmem_freelist_delete(vmp, vsp);
639 vmem_freelist_insert(vmp,
640 vmem_seg_create(vmp, vsp, addr_end, vs_end));
643 vmem_freelist_insert(vmp,
644 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
649 vmem_hash_insert(vmp, vsp);
667 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
670 vmem_populate(vmem_t *vmp, int vmflag)
679 while (vmp->vm_nsegfree < VMEM_MINFREE &&
681 vmem_putseg(vmp, vsp);
683 if (vmp->vm_nsegfree >= VMEM_MINFREE)
690 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
694 mutex_exit(&vmp->vm_lock);
720 mutex_enter(&vmp->vm_lock);
721 vmp->vm_kstat.vk_populate_fail.value.ui64++;
737 mutex_enter(&vmp->vm_lock);
743 while (vmp->vm_nsegfree < VMEM_MINFREE)
744 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
757 * Note: may drop and reacquire vmp->vm_lock.
760 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
778 vmem_freelist_delete(vmp, vnext);
779 vmem_freelist_delete(vmp, vprev);
781 vmem_freelist_insert(vmp, vprev);
782 vmem_seg_destroy(vmp, vnext);
794 vmp->vm_source_free != NULL &&
800 vmem_freelist_delete(vmp, vsp);
801 vmem_span_destroy(vmp, vsp);
802 mutex_exit(&vmp->vm_lock);
803 vmp->vm_source_free(vmp->vm_source, vaddr, size);
804 mutex_enter(&vmp->vm_lock);
816 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
820 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
823 mutex_enter(&vmp->vm_lock);
825 if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
826 mutex_exit(&vmp->vm_lock);
840 rotor = &vmp->vm_rotor;
847 vmem_hash_insert(vmp,
848 vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
849 mutex_exit(&vmp->vm_lock);
858 vmp->vm_kstat.vk_search.value.ui64++;
871 vmem_advance(vmp, rotor, rotor->vs_anext);
880 if (vmp->vm_source_alloc != NULL ||
882 mutex_exit(&vmp->vm_lock);
883 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
886 vmp->vm_kstat.vk_wait.value.ui64++;
887 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
896 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
904 vmem_advance(vmp, rotor, vsp);
905 mutex_exit(&vmp->vm_lock);
910 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
917 vmem_canalloc(vmem_t *vmp, size_t size)
921 ASSERT(MUTEX_HELD(&vmp->vm_lock));
924 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
926 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
937 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
943 uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
949 if ((align | phase | nocross) & (vmp->vm_quantum - 1))
952 (void *)vmp, size, align_arg, phase, nocross,
959 (void *)vmp, size, align_arg, phase, nocross,
965 (void *)vmp, size, align_arg, phase, nocross,
968 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
972 mutex_enter(&vmp->vm_lock);
974 if (vmp->vm_nsegfree < VMEM_MINFREE &&
975 !vmem_populate(vmp, vmflag))
997 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1000 if ((vmp->vm_freemap >> hb) == 0 ||
1004 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1008 vmp->vm_freelist[flist - 1].vs_knext;
1010 vmp->vm_kstat.vk_search.value.ui64++;
1022 flist = lowbit(P2ALIGN(vmp->vm_freemap,
1026 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
1053 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
1056 size_t aquantum = MAX(vmp->vm_quantum,
1057 vmp->vm_source->vm_quantum);
1060 !(vmp->vm_cflags & VMC_XALIGN)) {
1062 align - vmp->vm_quantum : align - aquantum;
1065 aneeded = MAX(size + aphase, vmp->vm_min_import);
1076 mutex_exit(&vmp->vm_lock);
1089 if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
1093 align <= vmp->vm_source->vm_quantum)
1099 ASSERT(vmp->vm_nsegfree >= resv);
1100 vmp->vm_nsegfree -= resv; /* reserve our segs */
1101 mutex_exit(&vmp->vm_lock);
1102 if (vmp->vm_cflags & VMC_XALLOC) {
1105 vmp->vm_source_alloc)(vmp->vm_source,
1109 vmp->vm_source->vm_quantum) == 0);
1110 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
1113 vaddr = vmp->vm_source_alloc(vmp->vm_source,
1116 mutex_enter(&vmp->vm_lock);
1117 vmp->vm_nsegfree += resv; /* claim reservation */
1118 aneeded = size + align - vmp->vm_quantum;
1119 aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
1130 vmp->vm_source_free != NULL &&
1131 vmem_canalloc(vmp, aneeded)) {
1138 vbest = vmem_span_create(vmp, vaddr, asize, 1);
1141 } else if (vmem_canalloc(vmp, aneeded)) {
1159 mutex_exit(&vmp->vm_lock);
1160 if (vmp->vm_cflags & VMC_IDENTIFIER)
1164 mutex_enter(&vmp->vm_lock);
1167 vmp->vm_kstat.vk_wait.value.ui64++;
1168 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
1178 (void) vmem_seg_alloc(vmp, vbest, addr, size);
1179 mutex_exit(&vmp->vm_lock);
1181 vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
1188 vmp->vm_kstat.vk_fail.value.ui64++;
1189 mutex_exit(&vmp->vm_lock);
1193 (void *)vmp, size, align_arg, phase, nocross,
1205 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1209 mutex_enter(&vmp->vm_lock);
1211 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1212 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1220 vmem_freelist_delete(vmp, vnext);
1222 vmem_seg_destroy(vmp, vnext);
1231 vmem_freelist_delete(vmp, vprev);
1233 vmem_seg_destroy(vmp, vsp);
1240 if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1246 vmem_span_destroy(vmp, vsp);
1247 mutex_exit(&vmp->vm_lock);
1248 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1250 vmem_freelist_insert(vmp, vsp);
1251 mutex_exit(&vmp->vm_lock);
1256 * Allocate size bytes from arena vmp. Returns the allocated address
1263 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1271 if (size - 1 < vmp->vm_qcache_max)
1272 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1273 vmp->vm_qshift], vmflag & VM_KMFLAGS));
1275 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1280 return (vmem_nextfit_alloc(vmp, size, vmflag));
1283 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1289 mutex_enter(&vmp->vm_lock);
1291 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1293 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1295 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1299 mutex_exit(&vmp->vm_lock);
1300 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1305 vsp = vmp->vm_freelist[flist].vs_knext;
1310 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1311 mutex_exit(&vmp->vm_lock);
1319 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1321 if (size - 1 < vmp->vm_qcache_max)
1322 kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1325 vmem_xfree(vmp, vaddr, size);
1329 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1332 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1337 vmem_seg_t *seg0 = &vmp->vm_seg0;
1339 mutex_enter(&vmp->vm_lock);
1340 vmp->vm_kstat.vk_contains.value.ui64++;
1342 vmp->vm_kstat.vk_contains_search.value.ui64++;
1347 mutex_exit(&vmp->vm_lock);
1352 * Add the span [vaddr, vaddr + size) to arena vmp.
1355 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1359 (void *)vmp, vaddr, size);
1361 ASSERT(!vmem_contains(vmp, vaddr, size));
1363 mutex_enter(&vmp->vm_lock);
1364 if (vmem_populate(vmp, vmflag))
1365 (void) vmem_span_create(vmp, vaddr, size, 0);
1368 mutex_exit(&vmp->vm_lock);
1373 * Walk the vmp arena, applying func to each segment matching typemask.
1381 vmem_walk(vmem_t *vmp, int typemask,
1385 vmem_seg_t *seg0 = &vmp->vm_seg0;
1394 mutex_enter(&vmp->vm_lock);
1401 vmem_advance(vmp, &walker, vsp);
1402 mutex_exit(&vmp->vm_lock);
1404 mutex_enter(&vmp->vm_lock);
1411 vmem_advance(vmp, &walker, NULL);
1412 mutex_exit(&vmp->vm_lock);
1423 vmem_size(vmem_t *vmp, int typemask)
1428 size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
1430 size += vmp->vm_kstat.vk_mem_total.value.ui64 -
1431 vmp->vm_kstat.vk_mem_inuse.value.ui64;
1452 vmem_t *vmp, *cur, **vmpp;
1458 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1462 vmp = &vmem0[id - 1];
1469 if (vmp == NULL)
1471 bzero(vmp, sizeof (vmem_t));
1473 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1474 mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1475 cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1476 vmp->vm_cflags = vmflag;
1479 vmp->vm_quantum = quantum;
1480 vmp->vm_qshift = highbit(quantum) - 1;
1481 nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1484 vfp = &vmp->vm_freelist[i];
1490 vmp->vm_freelist[0].vs_kprev = NULL;
1491 vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1492 vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1493 vmp->vm_hash_table = vmp->vm_hash0;
1494 vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1495 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1497 vsp = &vmp->vm_seg0;
1504 vsp = &vmp->vm_rotor;
1506 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1508 bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
1510 vmp->vm_id = id;
1512 vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
1513 vmp->vm_source = source;
1514 vmp->vm_source_alloc = afunc;
1515 vmp->vm_source_free = ffunc;
1522 if (vmp->vm_cflags & VMC_NO_QCACHE) {
1523 vmp->vm_min_import =
1524 VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
1530 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1533 (void) sprintf(buf, "%s_%lu", vmp->vm_name,
1535 vmp->vm_qcache[i] = kmem_cache_create(buf,
1537 NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1541 if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1544 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1545 kstat_install(vmp->vm_ksp);
1552 *vmpp = vmp;
1555 if (vmp->vm_cflags & VMC_POPULATOR) {
1557 vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
1558 mutex_enter(&vmp->vm_lock);
1559 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1560 mutex_exit(&vmp->vm_lock);
1563 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1564 vmem_destroy(vmp);
1568 return (vmp);
1597 * Destroy arena vmp.
1600 vmem_destroy(vmem_t *vmp)
1603 vmem_seg_t *seg0 = &vmp->vm_seg0;
1610 while ((cur = *vmpp) != vmp)
1612 *vmpp = vmp->vm_next;
1616 if (vmp->vm_qcache[i])
1617 kmem_cache_destroy(vmp->vm_qcache[i]);
1619 leaked = vmem_size(vmp, VMEM_ALLOC);
1622 vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
1625 if (vmp->vm_hash_table != vmp->vm_hash0)
1626 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1627 (vmp->vm_hash_mask + 1) * sizeof (void *));
1633 VMEM_DELETE(&vmp->vm_rotor, a);
1639 while (vmp->vm_nsegfree > 0)
1640 vmem_putseg_global(vmem_getseg(vmp));
1642 kstat_delete(vmp->vm_ksp);
1644 mutex_destroy(&vmp->vm_lock);
1645 cv_destroy(&vmp->vm_cv);
1646 vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1656 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1659 vmem_hash_rescale(vmem_t *vmp)
1664 nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
1665 vmp->vm_kstat.vk_free.value.ui64);
1668 old_size = vmp->vm_hash_mask + 1;
1680 mutex_enter(&vmp->vm_lock);
1682 old_size = vmp->vm_hash_mask + 1;
1683 old_table = vmp->vm_hash_table;
1685 vmp->vm_hash_mask = new_size - 1;
1686 vmp->vm_hash_table = new_table;
1687 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1694 vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1701 mutex_exit(&vmp->vm_lock);
1703 if (old_table != vmp->vm_hash0)
1714 vmem_t *vmp;
1717 for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1723 cv_broadcast(&vmp->vm_cv);
1728 vmem_hash_rescale(vmp);
1736 vmem_qcache_reap(vmem_t *vmp)
1744 if (vmp->vm_qcache[i])
1745 kmem_cache_reap_now(vmp->vm_qcache[i]);