Lines Matching defs:vsp

222 #define	VMEM_INSERT(vprev, vsp, type)					\
225 (vsp)->vs_##type##next = (vnext); \
226 (vsp)->vs_##type##prev = (vprev); \
227 (vprev)->vs_##type##next = (vsp); \
228 (vnext)->vs_##type##prev = (vsp); \
231 #define VMEM_DELETE(vsp, type) \
233 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
234 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
245 vmem_seg_t *vsp;
248 if ((vsp = vmem_segfree) != NULL)
249 vmem_segfree = vsp->vs_knext;
252 return (vsp);
259 vmem_putseg_global(vmem_seg_t *vsp)
262 vsp->vs_knext = vmem_segfree;
263 vmem_segfree = vsp;
273 vmem_seg_t *vsp;
277 vsp = vmp->vm_segfree;
278 vmp->vm_segfree = vsp->vs_knext;
281 return (vsp);
288 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
290 vsp->vs_knext = vmp->vm_segfree;
291 vmp->vm_segfree = vsp;
296 * Add vsp to the appropriate freelist.
299 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
303 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
305 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
306 vsp->vs_type = VMEM_FREE;
308 VMEM_INSERT(vprev, vsp, k);
314 * Take vsp from the freelist.
317 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
319 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
320 ASSERT(vsp->vs_type == VMEM_FREE);
322 if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
324 * The segments on both sides of 'vsp' are freelist heads,
325 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
327 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
328 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
330 VMEM_DELETE(vsp, k);
334 * Add vsp to the allocated-segment hash table and update kstats.
337 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
341 vsp->vs_type = VMEM_ALLOC;
342 bucket = VMEM_HASH(vmp, vsp->vs_start);
343 vsp->vs_knext = *bucket;
344 *bucket = vsp;
347 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
349 vsp->vs_thread = thr_self();
350 vsp->vs_timestamp = gethrtime();
352 vsp->vs_depth = 0;
356 vmp->vm_kstat.vk_mem_inuse += VS_SIZE(vsp);
360 * Remove vsp from the allocated-segment hash table and update kstats.
365 vmem_seg_t *vsp, **prev_vspp;
368 while ((vsp = *prev_vspp) != NULL) {
369 if (vsp->vs_start == addr) {
370 *prev_vspp = vsp->vs_knext;
374 prev_vspp = &vsp->vs_knext;
377 if (vsp == NULL) {
381 if (VS_SIZE(vsp) != size) {
383 "(expect %lu)", vmp, addr, size, VS_SIZE(vsp));
389 return (vsp);
411 * Remove segment vsp from the arena.
414 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
416 ASSERT(vsp->vs_type != VMEM_ROTOR);
417 VMEM_DELETE(vsp, a);
419 vmem_putseg(vmp, vsp);
475 * Remove span vsp from vmp and update kstats.
478 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
480 vmem_seg_t *span = vsp->vs_aprev;
481 size_t size = VS_SIZE(vsp);
486 if (vsp->vs_import)
492 vmem_seg_destroy(vmp, vsp);
497 * Allocate the subrange [addr, addr + size) from segment vsp.
502 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
504 uintptr_t vs_start = vsp->vs_start;
505 uintptr_t vs_end = vsp->vs_end;
512 ASSERT(vsp->vs_type == VMEM_FREE);
523 vsp->vs_start = addr_end;
524 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
525 vmem_hash_insert(vmp, vsp);
526 return (vsp);
529 vmem_freelist_delete(vmp, vsp);
533 vmem_seg_create(vmp, vsp, addr_end, vs_end));
537 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
539 vsp->vs_start = addr;
540 vsp->vs_end = addr + size;
542 vmem_hash_insert(vmp, vsp);
543 return (vsp);
563 vmem_seg_t *vsp;
570 (vsp = vmem_getseg_global()) != NULL)
571 vmem_putseg(vmp, vsp);
660 vmem_seg_t *vsp = NULL;
680 vsp = vprev;
682 vsp = vnext;
686 * vsp could represent a complete imported span,
689 if (vsp != NULL && vsp->vs_import && vmp->vm_source_free != NULL &&
690 vsp->vs_aprev->vs_type == VMEM_SPAN &&
691 vsp->vs_anext->vs_type == VMEM_SPAN) {
692 void *vaddr = (void *)vsp->vs_start;
693 size_t size = VS_SIZE(vsp);
694 ASSERT(size == VS_SIZE(vsp->vs_aprev));
695 vmem_freelist_delete(vmp, vsp);
696 vmem_span_destroy(vmp, vsp);
713 vmem_seg_t *vsp, *rotor;
736 vsp = rotor->vs_anext;
737 if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
740 addr = vsp->vs_start;
741 vsp->vs_start = addr + realsize;
754 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
756 vsp = vsp->vs_anext;
757 if (vsp == rotor) {
769 vsp = rotor->vs_aprev;
770 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
788 vsp = rotor->vs_anext;
795 addr = vsp->vs_start;
796 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
797 ASSERT(vsp->vs_type == VMEM_ALLOC &&
798 vsp->vs_start == addr && vsp->vs_end == addr + size);
804 vmem_advance(vmp, rotor, vsp);
818 vmem_seg_t *vsp;
890 for (vbest = NULL, vsp = (flist == 0) ? NULL :
892 vsp != NULL; vsp = vsp->vs_knext) {
894 if (vsp->vs_start == 0) {
906 VS_SIZE(vsp)));
909 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
910 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
913 if (vsp->vs_end - 1 < (uintptr_t)minaddr)
915 if (vsp->vs_start > (uintptr_t)maxaddr - 1)
917 start = MAX(vsp->vs_start, (uintptr_t)minaddr);
918 end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
924 (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
926 vbest = vsp;
1010 vmem_seg_t *vsp, *vnext, *vprev;
1014 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1015 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1020 vnext = vsp->vs_anext;
1022 ASSERT(vsp->vs_end == vnext->vs_start);
1024 vsp->vs_end = vnext->vs_end;
1031 vprev = vsp->vs_aprev;
1033 ASSERT(vprev->vs_end == vsp->vs_start);
1035 vprev->vs_end = vsp->vs_end;
1036 vmem_seg_destroy(vmp, vsp);
1037 vsp = vprev;
1043 if (vsp->vs_import && vmp->vm_source_free != NULL &&
1044 vsp->vs_aprev->vs_type == VMEM_SPAN &&
1045 vsp->vs_anext->vs_type == VMEM_SPAN) {
1046 vaddr = (void *)vsp->vs_start;
1047 size = VS_SIZE(vsp);
1048 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1049 vmem_span_destroy(vmp, vsp);
1053 vmem_freelist_insert(vmp, vsp);
1068 vmem_seg_t *vsp;
1111 vsp = vmp->vm_freelist[flist].vs_knext;
1112 addr = vsp->vs_start;
1113 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1139 vmem_seg_t *vsp;
1144 for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1146 ASSERT(vsp->vs_type == VMEM_SPAN);
1147 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1151 return (vsp != seg0);
1196 vmem_seg_t *vsp;
1245 vsp = prevseg;
1258 vsp = prevseg;
1271 vsp = nextseg;
1280 vsp = span;
1291 vsp = oldseg;
1293 vsp = vmem_seg_create(vmp, oldseg, addr, endaddr);
1304 vsp = oldseg;
1306 vsp = vmem_seg_create(vmp, span, addr, endaddr);
1308 vmem_freelist_insert(vmp, vsp);
1310 return (vsp);
1324 vmem_seg_t *vsp;
1341 vsp = vmem_span_create(vmp, vaddr, size, 0);
1343 vsp = vmem_extend_unlocked(vmp, addr, endaddr);
1345 ASSERT(VS_SIZE(vsp) >= alloc);
1347 addr = vsp->vs_start;
1348 (void) vmem_seg_alloc(vmp, vsp, addr, alloc);
1369 vmem_seg_t *vsp;
1381 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1382 if (vsp->vs_type & typemask) {
1383 void *start = (void *)vsp->vs_start;
1384 size_t size = VS_SIZE(vsp);
1386 vmem_advance(vmp, &walker, vsp);
1390 vsp = &walker;
1437 vmem_seg_t *vsp;
1477 vsp = &vmp->vm_seg0;
1478 vsp->vs_anext = vsp;
1479 vsp->vs_aprev = vsp;
1480 vsp->vs_knext = vsp;
1481 vsp->vs_kprev = vsp;
1482 vsp->vs_type = VMEM_SPAN;
1484 vsp = &vmp->vm_rotor;
1485 vsp->vs_type = VMEM_ROTOR;
1486 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1543 vmem_seg_t *vsp;
1572 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext)
1573 vmem_putseg_global(vsp);
1589 vmem_seg_t **old_table, **new_table, *vsp;
1616 vsp = old_table[h];
1617 while (vsp != NULL) {
1618 uintptr_t addr = vsp->vs_start;
1619 vmem_seg_t *next_vsp = vsp->vs_knext;
1621 vsp->vs_knext = *hash_bucket;
1622 *hash_bucket = vsp;
1623 vsp = next_vsp;