1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * Copyright (c) 2013 EMC Corp. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * From: 32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 34 */ 35 36 /* 37 * reference: 38 * - Magazines and Vmem: Extending the Slab Allocator 39 * to Many CPUs and Arbitrary Resources 40 * http://www.usenix.org/event/usenix01/bonwick.html 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/queue.h> 52 #include <sys/callout.h> 53 #include <sys/hash.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/smp.h> 58 #include <sys/condvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/taskqueue.h> 61 #include <sys/vmem.h> 62 63 #include "opt_vm.h" 64 65 #include <vm/uma.h> 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_kern.h> 71 #include <vm/vm_extern.h> 72 #include <vm/vm_param.h> 73 #include <vm/vm_pageout.h> 74 75 #define VMEM_OPTORDER 5 76 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 77 #define VMEM_MAXORDER \ 78 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 79 80 #define VMEM_HASHSIZE_MIN 16 81 #define VMEM_HASHSIZE_MAX 131072 82 83 #define VMEM_QCACHE_IDX_MAX 16 84 85 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 86 87 #define VMEM_FLAGS \ 88 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 89 90 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 91 92 #define QC_NAME_MAX 16 93 94 /* 95 * Data structures private to vmem. 96 */ 97 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 98 99 typedef struct vmem_btag bt_t; 100 101 TAILQ_HEAD(vmem_seglist, vmem_btag); 102 LIST_HEAD(vmem_freelist, vmem_btag); 103 LIST_HEAD(vmem_hashlist, vmem_btag); 104 105 struct qcache { 106 uma_zone_t qc_cache; 107 vmem_t *qc_vmem; 108 vmem_size_t qc_size; 109 char qc_name[QC_NAME_MAX]; 110 }; 111 typedef struct qcache qcache_t; 112 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 113 114 #define VMEM_NAME_MAX 16 115 116 /* vmem arena */ 117 struct vmem { 118 struct mtx_padalign vm_lock; 119 struct cv vm_cv; 120 char vm_name[VMEM_NAME_MAX+1]; 121 LIST_ENTRY(vmem) vm_alllist; 122 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 123 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 124 struct vmem_seglist vm_seglist; 125 struct vmem_hashlist *vm_hashlist; 126 vmem_size_t vm_hashsize; 127 128 /* Constant after init */ 129 vmem_size_t vm_qcache_max; 130 vmem_size_t vm_quantum_mask; 131 vmem_size_t vm_import_quantum; 132 int vm_quantum_shift; 133 134 /* Written on alloc/free */ 135 LIST_HEAD(, vmem_btag) vm_freetags; 136 int vm_nfreetags; 137 int vm_nbusytag; 138 vmem_size_t vm_inuse; 139 vmem_size_t vm_size; 140 vmem_size_t vm_limit; 141 142 /* Used on import. */ 143 vmem_import_t *vm_importfn; 144 vmem_release_t *vm_releasefn; 145 void *vm_arg; 146 147 /* Space exhaustion callback. */ 148 vmem_reclaim_t *vm_reclaimfn; 149 150 /* quantum cache */ 151 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 152 }; 153 154 /* boundary tag */ 155 struct vmem_btag { 156 TAILQ_ENTRY(vmem_btag) bt_seglist; 157 union { 158 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 159 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 160 } bt_u; 161 #define bt_hashlist bt_u.u_hashlist 162 #define bt_freelist bt_u.u_freelist 163 vmem_addr_t bt_start; 164 vmem_size_t bt_size; 165 int bt_type; 166 }; 167 168 #define BT_TYPE_SPAN 1 /* Allocated from importfn */ 169 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 170 #define BT_TYPE_FREE 3 /* Available space. */ 171 #define BT_TYPE_BUSY 4 /* Used space. */ 172 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 173 174 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 175 176 #if defined(DIAGNOSTIC) 177 static int enable_vmem_check = 1; 178 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 179 &enable_vmem_check, 0, "Enable vmem check"); 180 static void vmem_check(vmem_t *); 181 #endif 182 183 static struct callout vmem_periodic_ch; 184 static int vmem_periodic_interval; 185 static struct task vmem_periodic_wk; 186 187 static struct mtx_padalign __exclusive_cache_line vmem_list_lock; 188 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 189 190 /* ---- misc */ 191 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 192 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 193 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 194 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 195 196 197 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 198 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 199 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 200 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 201 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 202 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 203 204 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 205 206 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 207 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 208 209 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 210 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 211 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 212 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 213 214 /* 215 * Maximum number of boundary tags that may be required to satisfy an 216 * allocation. Two may be required to import. Another two may be 217 * required to clip edges. 218 */ 219 #define BT_MAXALLOC 4 220 221 /* 222 * Max free limits the number of locally cached boundary tags. We 223 * just want to avoid hitting the zone allocator for every call. 224 */ 225 #define BT_MAXFREE (BT_MAXALLOC * 8) 226 227 /* Allocator for boundary tags. */ 228 static uma_zone_t vmem_bt_zone; 229 230 /* boot time arena storage. */ 231 static struct vmem kernel_arena_storage; 232 static struct vmem buffer_arena_storage; 233 static struct vmem transient_arena_storage; 234 /* kernel and kmem arenas are aliased for backwards KPI compat. */ 235 vmem_t *kernel_arena = &kernel_arena_storage; 236 vmem_t *kmem_arena = &kernel_arena_storage; 237 vmem_t *buffer_arena = &buffer_arena_storage; 238 vmem_t *transient_arena = &transient_arena_storage; 239 240 #ifdef DEBUG_MEMGUARD 241 static struct vmem memguard_arena_storage; 242 vmem_t *memguard_arena = &memguard_arena_storage; 243 #endif 244 245 /* 246 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 247 * allocation will not fail once bt_fill() passes. To do so we cache 248 * at least the maximum possible tag allocations in the arena. 249 */ 250 static int 251 bt_fill(vmem_t *vm, int flags) 252 { 253 bt_t *bt; 254 255 VMEM_ASSERT_LOCKED(vm); 256 257 /* 258 * Only allow the kernel arena to dip into reserve tags. It is the 259 * vmem where new tags come from. 260 */ 261 flags &= BT_FLAGS; 262 if (vm != kernel_arena) 263 flags &= ~M_USE_RESERVE; 264 265 /* 266 * Loop until we meet the reserve. To minimize the lock shuffle 267 * and prevent simultaneous fills we first try a NOWAIT regardless 268 * of the caller's flags. Specify M_NOVM so we don't recurse while 269 * holding a vmem lock. 270 */ 271 while (vm->vm_nfreetags < BT_MAXALLOC) { 272 bt = uma_zalloc(vmem_bt_zone, 273 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 274 if (bt == NULL) { 275 VMEM_UNLOCK(vm); 276 bt = uma_zalloc(vmem_bt_zone, flags); 277 VMEM_LOCK(vm); 278 if (bt == NULL && (flags & M_NOWAIT) != 0) 279 break; 280 } 281 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 282 vm->vm_nfreetags++; 283 } 284 285 if (vm->vm_nfreetags < BT_MAXALLOC) 286 return ENOMEM; 287 288 return 0; 289 } 290 291 /* 292 * Pop a tag off of the freetag stack. 293 */ 294 static bt_t * 295 bt_alloc(vmem_t *vm) 296 { 297 bt_t *bt; 298 299 VMEM_ASSERT_LOCKED(vm); 300 bt = LIST_FIRST(&vm->vm_freetags); 301 MPASS(bt != NULL); 302 LIST_REMOVE(bt, bt_freelist); 303 vm->vm_nfreetags--; 304 305 return bt; 306 } 307 308 /* 309 * Trim the per-vmem free list. Returns with the lock released to 310 * avoid allocator recursions. 311 */ 312 static void 313 bt_freetrim(vmem_t *vm, int freelimit) 314 { 315 LIST_HEAD(, vmem_btag) freetags; 316 bt_t *bt; 317 318 LIST_INIT(&freetags); 319 VMEM_ASSERT_LOCKED(vm); 320 while (vm->vm_nfreetags > freelimit) { 321 bt = LIST_FIRST(&vm->vm_freetags); 322 LIST_REMOVE(bt, bt_freelist); 323 vm->vm_nfreetags--; 324 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 325 } 326 VMEM_UNLOCK(vm); 327 while ((bt = LIST_FIRST(&freetags)) != NULL) { 328 LIST_REMOVE(bt, bt_freelist); 329 uma_zfree(vmem_bt_zone, bt); 330 } 331 } 332 333 static inline void 334 bt_free(vmem_t *vm, bt_t *bt) 335 { 336 337 VMEM_ASSERT_LOCKED(vm); 338 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 339 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 340 vm->vm_nfreetags++; 341 } 342 343 /* 344 * freelist[0] ... [1, 1] 345 * freelist[1] ... [2, 2] 346 * : 347 * freelist[29] ... [30, 30] 348 * freelist[30] ... [31, 31] 349 * freelist[31] ... [32, 63] 350 * freelist[33] ... [64, 127] 351 * : 352 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 353 * : 354 */ 355 356 static struct vmem_freelist * 357 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 358 { 359 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 360 const int idx = SIZE2ORDER(qsize); 361 362 MPASS(size != 0 && qsize != 0); 363 MPASS((size & vm->vm_quantum_mask) == 0); 364 MPASS(idx >= 0); 365 MPASS(idx < VMEM_MAXORDER); 366 367 return &vm->vm_freelist[idx]; 368 } 369 370 /* 371 * bt_freehead_toalloc: return the freelist for the given size and allocation 372 * strategy. 373 * 374 * For M_FIRSTFIT, return the list in which any blocks are large enough 375 * for the requested size. otherwise, return the list which can have blocks 376 * large enough for the requested size. 377 */ 378 static struct vmem_freelist * 379 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 380 { 381 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 382 int idx = SIZE2ORDER(qsize); 383 384 MPASS(size != 0 && qsize != 0); 385 MPASS((size & vm->vm_quantum_mask) == 0); 386 387 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 388 idx++; 389 /* check too large request? */ 390 } 391 MPASS(idx >= 0); 392 MPASS(idx < VMEM_MAXORDER); 393 394 return &vm->vm_freelist[idx]; 395 } 396 397 /* ---- boundary tag hash */ 398 399 static struct vmem_hashlist * 400 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 401 { 402 struct vmem_hashlist *list; 403 unsigned int hash; 404 405 hash = hash32_buf(&addr, sizeof(addr), 0); 406 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 407 408 return list; 409 } 410 411 static bt_t * 412 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 413 { 414 struct vmem_hashlist *list; 415 bt_t *bt; 416 417 VMEM_ASSERT_LOCKED(vm); 418 list = bt_hashhead(vm, addr); 419 LIST_FOREACH(bt, list, bt_hashlist) { 420 if (bt->bt_start == addr) { 421 break; 422 } 423 } 424 425 return bt; 426 } 427 428 static void 429 bt_rembusy(vmem_t *vm, bt_t *bt) 430 { 431 432 VMEM_ASSERT_LOCKED(vm); 433 MPASS(vm->vm_nbusytag > 0); 434 vm->vm_inuse -= bt->bt_size; 435 vm->vm_nbusytag--; 436 LIST_REMOVE(bt, bt_hashlist); 437 } 438 439 static void 440 bt_insbusy(vmem_t *vm, bt_t *bt) 441 { 442 struct vmem_hashlist *list; 443 444 VMEM_ASSERT_LOCKED(vm); 445 MPASS(bt->bt_type == BT_TYPE_BUSY); 446 447 list = bt_hashhead(vm, bt->bt_start); 448 LIST_INSERT_HEAD(list, bt, bt_hashlist); 449 vm->vm_nbusytag++; 450 vm->vm_inuse += bt->bt_size; 451 } 452 453 /* ---- boundary tag list */ 454 455 static void 456 bt_remseg(vmem_t *vm, bt_t *bt) 457 { 458 459 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 460 bt_free(vm, bt); 461 } 462 463 static void 464 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 465 { 466 467 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 468 } 469 470 static void 471 bt_insseg_tail(vmem_t *vm, bt_t *bt) 472 { 473 474 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 475 } 476 477 static void 478 bt_remfree(vmem_t *vm, bt_t *bt) 479 { 480 481 MPASS(bt->bt_type == BT_TYPE_FREE); 482 483 LIST_REMOVE(bt, bt_freelist); 484 } 485 486 static void 487 bt_insfree(vmem_t *vm, bt_t *bt) 488 { 489 struct vmem_freelist *list; 490 491 list = bt_freehead_tofree(vm, bt->bt_size); 492 LIST_INSERT_HEAD(list, bt, bt_freelist); 493 } 494 495 /* ---- vmem internal functions */ 496 497 /* 498 * Import from the arena into the quantum cache in UMA. 499 */ 500 static int 501 qc_import(void *arg, void **store, int cnt, int flags) 502 { 503 qcache_t *qc; 504 vmem_addr_t addr; 505 int i; 506 507 qc = arg; 508 if ((flags & VMEM_FITMASK) == 0) 509 flags |= M_BESTFIT; 510 for (i = 0; i < cnt; i++) { 511 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 512 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 513 break; 514 store[i] = (void *)addr; 515 /* Only guarantee one allocation. */ 516 flags &= ~M_WAITOK; 517 flags |= M_NOWAIT; 518 } 519 return i; 520 } 521 522 /* 523 * Release memory from the UMA cache to the arena. 524 */ 525 static void 526 qc_release(void *arg, void **store, int cnt) 527 { 528 qcache_t *qc; 529 int i; 530 531 qc = arg; 532 for (i = 0; i < cnt; i++) 533 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 534 } 535 536 static void 537 qc_init(vmem_t *vm, vmem_size_t qcache_max) 538 { 539 qcache_t *qc; 540 vmem_size_t size; 541 int qcache_idx_max; 542 int i; 543 544 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 545 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 546 VMEM_QCACHE_IDX_MAX); 547 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 548 for (i = 0; i < qcache_idx_max; i++) { 549 qc = &vm->vm_qcache[i]; 550 size = (i + 1) << vm->vm_quantum_shift; 551 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 552 vm->vm_name, size); 553 qc->qc_vmem = vm; 554 qc->qc_size = size; 555 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 556 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 557 UMA_ZONE_VM); 558 MPASS(qc->qc_cache); 559 } 560 } 561 562 static void 563 qc_destroy(vmem_t *vm) 564 { 565 int qcache_idx_max; 566 int i; 567 568 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 569 for (i = 0; i < qcache_idx_max; i++) 570 uma_zdestroy(vm->vm_qcache[i].qc_cache); 571 } 572 573 static void 574 qc_drain(vmem_t *vm) 575 { 576 int qcache_idx_max; 577 int i; 578 579 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 580 for (i = 0; i < qcache_idx_max; i++) 581 zone_drain(vm->vm_qcache[i].qc_cache); 582 } 583 584 #ifndef UMA_MD_SMALL_ALLOC 585 586 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; 587 588 /* 589 * vmem_bt_alloc: Allocate a new page of boundary tags. 590 * 591 * On architectures with uma_small_alloc there is no recursion; no address 592 * space need be allocated to allocate boundary tags. For the others, we 593 * must handle recursion. Boundary tags are necessary to allocate new 594 * boundary tags. 595 * 596 * UMA guarantees that enough tags are held in reserve to allocate a new 597 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 598 * when allocating the page to hold new boundary tags. In this way the 599 * reserve is automatically filled by the allocation that uses the reserve. 600 * 601 * We still have to guarantee that the new tags are allocated atomically since 602 * many threads may try concurrently. The bt_lock provides this guarantee. 603 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 604 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 605 * loop again after checking to see if we lost the race to allocate. 606 * 607 * There is a small race between vmem_bt_alloc() returning the page and the 608 * zone lock being acquired to add the page to the zone. For WAITOK 609 * allocations we just pause briefly. NOWAIT may experience a transient 610 * failure. To alleviate this we permit a small number of simultaneous 611 * fills to proceed concurrently so NOWAIT is less likely to fail unless 612 * we are really out of KVA. 613 */ 614 static void * 615 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 616 { 617 vmem_addr_t addr; 618 619 *pflag = UMA_SLAB_KERNEL; 620 621 /* 622 * Single thread boundary tag allocation so that the address space 623 * and memory are added in one atomic operation. 624 */ 625 mtx_lock(&vmem_bt_lock); 626 if (vmem_xalloc(kernel_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, 627 VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, 628 &addr) == 0) { 629 if (kmem_back(kernel_object, addr, bytes, 630 M_NOWAIT | M_USE_RESERVE) == 0) { 631 mtx_unlock(&vmem_bt_lock); 632 return ((void *)addr); 633 } 634 vmem_xfree(kernel_arena, addr, bytes); 635 mtx_unlock(&vmem_bt_lock); 636 /* 637 * Out of memory, not address space. This may not even be 638 * possible due to M_USE_RESERVE page allocation. 639 */ 640 if (wait & M_WAITOK) 641 VM_WAIT; 642 return (NULL); 643 } 644 mtx_unlock(&vmem_bt_lock); 645 /* 646 * We're either out of address space or lost a fill race. 647 */ 648 if (wait & M_WAITOK) 649 pause("btalloc", 1); 650 651 return (NULL); 652 } 653 #endif 654 655 void 656 vmem_startup(void) 657 { 658 659 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 660 vmem_bt_zone = uma_zcreate("vmem btag", 661 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 662 UMA_ALIGN_PTR, UMA_ZONE_VM); 663 #ifndef UMA_MD_SMALL_ALLOC 664 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 665 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 666 /* 667 * Reserve enough tags to allocate new tags. We allow multiple 668 * CPUs to attempt to allocate new tags concurrently to limit 669 * false restarts in UMA. 670 */ 671 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 672 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 673 #endif 674 } 675 676 /* ---- rehash */ 677 678 static int 679 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 680 { 681 bt_t *bt; 682 int i; 683 struct vmem_hashlist *newhashlist; 684 struct vmem_hashlist *oldhashlist; 685 vmem_size_t oldhashsize; 686 687 MPASS(newhashsize > 0); 688 689 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 690 M_VMEM, M_NOWAIT); 691 if (newhashlist == NULL) 692 return ENOMEM; 693 for (i = 0; i < newhashsize; i++) { 694 LIST_INIT(&newhashlist[i]); 695 } 696 697 VMEM_LOCK(vm); 698 oldhashlist = vm->vm_hashlist; 699 oldhashsize = vm->vm_hashsize; 700 vm->vm_hashlist = newhashlist; 701 vm->vm_hashsize = newhashsize; 702 if (oldhashlist == NULL) { 703 VMEM_UNLOCK(vm); 704 return 0; 705 } 706 for (i = 0; i < oldhashsize; i++) { 707 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 708 bt_rembusy(vm, bt); 709 bt_insbusy(vm, bt); 710 } 711 } 712 VMEM_UNLOCK(vm); 713 714 if (oldhashlist != vm->vm_hash0) { 715 free(oldhashlist, M_VMEM); 716 } 717 718 return 0; 719 } 720 721 static void 722 vmem_periodic_kick(void *dummy) 723 { 724 725 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 726 } 727 728 static void 729 vmem_periodic(void *unused, int pending) 730 { 731 vmem_t *vm; 732 vmem_size_t desired; 733 vmem_size_t current; 734 735 mtx_lock(&vmem_list_lock); 736 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 737 #ifdef DIAGNOSTIC 738 /* Convenient time to verify vmem state. */ 739 if (enable_vmem_check == 1) { 740 VMEM_LOCK(vm); 741 vmem_check(vm); 742 VMEM_UNLOCK(vm); 743 } 744 #endif 745 desired = 1 << flsl(vm->vm_nbusytag); 746 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 747 VMEM_HASHSIZE_MAX); 748 current = vm->vm_hashsize; 749 750 /* Grow in powers of two. Shrink less aggressively. */ 751 if (desired >= current * 2 || desired * 4 <= current) 752 vmem_rehash(vm, desired); 753 754 /* 755 * Periodically wake up threads waiting for resources, 756 * so they could ask for reclamation again. 757 */ 758 VMEM_CONDVAR_BROADCAST(vm); 759 } 760 mtx_unlock(&vmem_list_lock); 761 762 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 763 vmem_periodic_kick, NULL); 764 } 765 766 static void 767 vmem_start_callout(void *unused) 768 { 769 770 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 771 vmem_periodic_interval = hz * 10; 772 callout_init(&vmem_periodic_ch, 1); 773 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 774 vmem_periodic_kick, NULL); 775 } 776 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 777 778 static void 779 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 780 { 781 bt_t *btspan; 782 bt_t *btfree; 783 784 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 785 MPASS((size & vm->vm_quantum_mask) == 0); 786 787 btspan = bt_alloc(vm); 788 btspan->bt_type = type; 789 btspan->bt_start = addr; 790 btspan->bt_size = size; 791 bt_insseg_tail(vm, btspan); 792 793 btfree = bt_alloc(vm); 794 btfree->bt_type = BT_TYPE_FREE; 795 btfree->bt_start = addr; 796 btfree->bt_size = size; 797 bt_insseg(vm, btfree, btspan); 798 bt_insfree(vm, btfree); 799 800 vm->vm_size += size; 801 } 802 803 static void 804 vmem_destroy1(vmem_t *vm) 805 { 806 bt_t *bt; 807 808 /* 809 * Drain per-cpu quantum caches. 810 */ 811 qc_destroy(vm); 812 813 /* 814 * The vmem should now only contain empty segments. 815 */ 816 VMEM_LOCK(vm); 817 MPASS(vm->vm_nbusytag == 0); 818 819 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 820 bt_remseg(vm, bt); 821 822 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 823 free(vm->vm_hashlist, M_VMEM); 824 825 bt_freetrim(vm, 0); 826 827 VMEM_CONDVAR_DESTROY(vm); 828 VMEM_LOCK_DESTROY(vm); 829 free(vm, M_VMEM); 830 } 831 832 static int 833 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 834 { 835 vmem_addr_t addr; 836 int error; 837 838 if (vm->vm_importfn == NULL) 839 return (EINVAL); 840 841 /* 842 * To make sure we get a span that meets the alignment we double it 843 * and add the size to the tail. This slightly overestimates. 844 */ 845 if (align != vm->vm_quantum_mask + 1) 846 size = (align * 2) + size; 847 size = roundup(size, vm->vm_import_quantum); 848 849 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) 850 return (ENOMEM); 851 852 /* 853 * Hide MAXALLOC tags so we're guaranteed to be able to add this 854 * span and the tag we want to allocate from it. 855 */ 856 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 857 vm->vm_nfreetags -= BT_MAXALLOC; 858 VMEM_UNLOCK(vm); 859 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 860 VMEM_LOCK(vm); 861 vm->vm_nfreetags += BT_MAXALLOC; 862 if (error) 863 return (ENOMEM); 864 865 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 866 867 return 0; 868 } 869 870 /* 871 * vmem_fit: check if a bt can satisfy the given restrictions. 872 * 873 * it's a caller's responsibility to ensure the region is big enough 874 * before calling us. 875 */ 876 static int 877 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 878 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 879 vmem_addr_t maxaddr, vmem_addr_t *addrp) 880 { 881 vmem_addr_t start; 882 vmem_addr_t end; 883 884 MPASS(size > 0); 885 MPASS(bt->bt_size >= size); /* caller's responsibility */ 886 887 /* 888 * XXX assumption: vmem_addr_t and vmem_size_t are 889 * unsigned integer of the same size. 890 */ 891 892 start = bt->bt_start; 893 if (start < minaddr) { 894 start = minaddr; 895 } 896 end = BT_END(bt); 897 if (end > maxaddr) 898 end = maxaddr; 899 if (start > end) 900 return (ENOMEM); 901 902 start = VMEM_ALIGNUP(start - phase, align) + phase; 903 if (start < bt->bt_start) 904 start += align; 905 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 906 MPASS(align < nocross); 907 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 908 } 909 if (start <= end && end - start >= size - 1) { 910 MPASS((start & (align - 1)) == phase); 911 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 912 MPASS(minaddr <= start); 913 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 914 MPASS(bt->bt_start <= start); 915 MPASS(BT_END(bt) - start >= size - 1); 916 *addrp = start; 917 918 return (0); 919 } 920 return (ENOMEM); 921 } 922 923 /* 924 * vmem_clip: Trim the boundary tag edges to the requested start and size. 925 */ 926 static void 927 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 928 { 929 bt_t *btnew; 930 bt_t *btprev; 931 932 VMEM_ASSERT_LOCKED(vm); 933 MPASS(bt->bt_type == BT_TYPE_FREE); 934 MPASS(bt->bt_size >= size); 935 bt_remfree(vm, bt); 936 if (bt->bt_start != start) { 937 btprev = bt_alloc(vm); 938 btprev->bt_type = BT_TYPE_FREE; 939 btprev->bt_start = bt->bt_start; 940 btprev->bt_size = start - bt->bt_start; 941 bt->bt_start = start; 942 bt->bt_size -= btprev->bt_size; 943 bt_insfree(vm, btprev); 944 bt_insseg(vm, btprev, 945 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 946 } 947 MPASS(bt->bt_start == start); 948 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 949 /* split */ 950 btnew = bt_alloc(vm); 951 btnew->bt_type = BT_TYPE_BUSY; 952 btnew->bt_start = bt->bt_start; 953 btnew->bt_size = size; 954 bt->bt_start = bt->bt_start + size; 955 bt->bt_size -= size; 956 bt_insfree(vm, bt); 957 bt_insseg(vm, btnew, 958 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 959 bt_insbusy(vm, btnew); 960 bt = btnew; 961 } else { 962 bt->bt_type = BT_TYPE_BUSY; 963 bt_insbusy(vm, bt); 964 } 965 MPASS(bt->bt_size >= size); 966 bt->bt_type = BT_TYPE_BUSY; 967 } 968 969 /* ---- vmem API */ 970 971 void 972 vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 973 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 974 { 975 976 VMEM_LOCK(vm); 977 vm->vm_importfn = importfn; 978 vm->vm_releasefn = releasefn; 979 vm->vm_arg = arg; 980 vm->vm_import_quantum = import_quantum; 981 VMEM_UNLOCK(vm); 982 } 983 984 void 985 vmem_set_limit(vmem_t *vm, vmem_size_t limit) 986 { 987 988 VMEM_LOCK(vm); 989 vm->vm_limit = limit; 990 VMEM_UNLOCK(vm); 991 } 992 993 void 994 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 995 { 996 997 VMEM_LOCK(vm); 998 vm->vm_reclaimfn = reclaimfn; 999 VMEM_UNLOCK(vm); 1000 } 1001 1002 /* 1003 * vmem_init: Initializes vmem arena. 1004 */ 1005 vmem_t * 1006 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 1007 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1008 { 1009 int i; 1010 1011 MPASS(quantum > 0); 1012 MPASS((quantum & (quantum - 1)) == 0); 1013 1014 bzero(vm, sizeof(*vm)); 1015 1016 VMEM_CONDVAR_INIT(vm, name); 1017 VMEM_LOCK_INIT(vm, name); 1018 vm->vm_nfreetags = 0; 1019 LIST_INIT(&vm->vm_freetags); 1020 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1021 vm->vm_quantum_mask = quantum - 1; 1022 vm->vm_quantum_shift = flsl(quantum) - 1; 1023 vm->vm_nbusytag = 0; 1024 vm->vm_size = 0; 1025 vm->vm_limit = 0; 1026 vm->vm_inuse = 0; 1027 qc_init(vm, qcache_max); 1028 1029 TAILQ_INIT(&vm->vm_seglist); 1030 for (i = 0; i < VMEM_MAXORDER; i++) { 1031 LIST_INIT(&vm->vm_freelist[i]); 1032 } 1033 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1034 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1035 vm->vm_hashlist = vm->vm_hash0; 1036 1037 if (size != 0) { 1038 if (vmem_add(vm, base, size, flags) != 0) { 1039 vmem_destroy1(vm); 1040 return NULL; 1041 } 1042 } 1043 1044 mtx_lock(&vmem_list_lock); 1045 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1046 mtx_unlock(&vmem_list_lock); 1047 1048 return vm; 1049 } 1050 1051 /* 1052 * vmem_create: create an arena. 1053 */ 1054 vmem_t * 1055 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1056 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1057 { 1058 1059 vmem_t *vm; 1060 1061 vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT)); 1062 if (vm == NULL) 1063 return (NULL); 1064 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1065 flags) == NULL) 1066 return (NULL); 1067 return (vm); 1068 } 1069 1070 void 1071 vmem_destroy(vmem_t *vm) 1072 { 1073 1074 mtx_lock(&vmem_list_lock); 1075 LIST_REMOVE(vm, vm_alllist); 1076 mtx_unlock(&vmem_list_lock); 1077 1078 vmem_destroy1(vm); 1079 } 1080 1081 vmem_size_t 1082 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1083 { 1084 1085 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1086 } 1087 1088 /* 1089 * vmem_alloc: allocate resource from the arena. 1090 */ 1091 int 1092 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1093 { 1094 const int strat __unused = flags & VMEM_FITMASK; 1095 qcache_t *qc; 1096 1097 flags &= VMEM_FLAGS; 1098 MPASS(size > 0); 1099 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1100 if ((flags & M_NOWAIT) == 0) 1101 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1102 1103 if (size <= vm->vm_qcache_max) { 1104 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1105 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1106 if (*addrp == 0) 1107 return (ENOMEM); 1108 return (0); 1109 } 1110 1111 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1112 flags, addrp); 1113 } 1114 1115 int 1116 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1117 const vmem_size_t phase, const vmem_size_t nocross, 1118 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1119 vmem_addr_t *addrp) 1120 { 1121 const vmem_size_t size = vmem_roundup_size(vm, size0); 1122 struct vmem_freelist *list; 1123 struct vmem_freelist *first; 1124 struct vmem_freelist *end; 1125 vmem_size_t avail; 1126 bt_t *bt; 1127 int error; 1128 int strat; 1129 1130 flags &= VMEM_FLAGS; 1131 strat = flags & VMEM_FITMASK; 1132 MPASS(size0 > 0); 1133 MPASS(size > 0); 1134 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1135 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1136 if ((flags & M_NOWAIT) == 0) 1137 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1138 MPASS((align & vm->vm_quantum_mask) == 0); 1139 MPASS((align & (align - 1)) == 0); 1140 MPASS((phase & vm->vm_quantum_mask) == 0); 1141 MPASS((nocross & vm->vm_quantum_mask) == 0); 1142 MPASS((nocross & (nocross - 1)) == 0); 1143 MPASS((align == 0 && phase == 0) || phase < align); 1144 MPASS(nocross == 0 || nocross >= size); 1145 MPASS(minaddr <= maxaddr); 1146 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1147 1148 if (align == 0) 1149 align = vm->vm_quantum_mask + 1; 1150 1151 *addrp = 0; 1152 end = &vm->vm_freelist[VMEM_MAXORDER]; 1153 /* 1154 * choose a free block from which we allocate. 1155 */ 1156 first = bt_freehead_toalloc(vm, size, strat); 1157 VMEM_LOCK(vm); 1158 for (;;) { 1159 /* 1160 * Make sure we have enough tags to complete the 1161 * operation. 1162 */ 1163 if (vm->vm_nfreetags < BT_MAXALLOC && 1164 bt_fill(vm, flags) != 0) { 1165 error = ENOMEM; 1166 break; 1167 } 1168 /* 1169 * Scan freelists looking for a tag that satisfies the 1170 * allocation. If we're doing BESTFIT we may encounter 1171 * sizes below the request. If we're doing FIRSTFIT we 1172 * inspect only the first element from each list. 1173 */ 1174 for (list = first; list < end; list++) { 1175 LIST_FOREACH(bt, list, bt_freelist) { 1176 if (bt->bt_size >= size) { 1177 error = vmem_fit(bt, size, align, phase, 1178 nocross, minaddr, maxaddr, addrp); 1179 if (error == 0) { 1180 vmem_clip(vm, bt, *addrp, size); 1181 goto out; 1182 } 1183 } 1184 /* FIRST skips to the next list. */ 1185 if (strat == M_FIRSTFIT) 1186 break; 1187 } 1188 } 1189 /* 1190 * Retry if the fast algorithm failed. 1191 */ 1192 if (strat == M_FIRSTFIT) { 1193 strat = M_BESTFIT; 1194 first = bt_freehead_toalloc(vm, size, strat); 1195 continue; 1196 } 1197 /* 1198 * XXX it is possible to fail to meet restrictions with the 1199 * imported region. It is up to the user to specify the 1200 * import quantum such that it can satisfy any allocation. 1201 */ 1202 if (vmem_import(vm, size, align, flags) == 0) 1203 continue; 1204 1205 /* 1206 * Try to free some space from the quantum cache or reclaim 1207 * functions if available. 1208 */ 1209 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1210 avail = vm->vm_size - vm->vm_inuse; 1211 VMEM_UNLOCK(vm); 1212 if (vm->vm_qcache_max != 0) 1213 qc_drain(vm); 1214 if (vm->vm_reclaimfn != NULL) 1215 vm->vm_reclaimfn(vm, flags); 1216 VMEM_LOCK(vm); 1217 /* If we were successful retry even NOWAIT. */ 1218 if (vm->vm_size - vm->vm_inuse > avail) 1219 continue; 1220 } 1221 if ((flags & M_NOWAIT) != 0) { 1222 error = ENOMEM; 1223 break; 1224 } 1225 VMEM_CONDVAR_WAIT(vm); 1226 } 1227 out: 1228 VMEM_UNLOCK(vm); 1229 if (error != 0 && (flags & M_NOWAIT) == 0) 1230 panic("failed to allocate waiting allocation\n"); 1231 1232 return (error); 1233 } 1234 1235 /* 1236 * vmem_free: free the resource to the arena. 1237 */ 1238 void 1239 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1240 { 1241 qcache_t *qc; 1242 MPASS(size > 0); 1243 1244 if (size <= vm->vm_qcache_max) { 1245 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1246 uma_zfree(qc->qc_cache, (void *)addr); 1247 } else 1248 vmem_xfree(vm, addr, size); 1249 } 1250 1251 void 1252 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1253 { 1254 bt_t *bt; 1255 bt_t *t; 1256 1257 MPASS(size > 0); 1258 1259 VMEM_LOCK(vm); 1260 bt = bt_lookupbusy(vm, addr); 1261 MPASS(bt != NULL); 1262 MPASS(bt->bt_start == addr); 1263 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1264 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1265 MPASS(bt->bt_type == BT_TYPE_BUSY); 1266 bt_rembusy(vm, bt); 1267 bt->bt_type = BT_TYPE_FREE; 1268 1269 /* coalesce */ 1270 t = TAILQ_NEXT(bt, bt_seglist); 1271 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1272 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1273 bt->bt_size += t->bt_size; 1274 bt_remfree(vm, t); 1275 bt_remseg(vm, t); 1276 } 1277 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1278 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1279 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1280 bt->bt_size += t->bt_size; 1281 bt->bt_start = t->bt_start; 1282 bt_remfree(vm, t); 1283 bt_remseg(vm, t); 1284 } 1285 1286 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1287 MPASS(t != NULL); 1288 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1289 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1290 t->bt_size == bt->bt_size) { 1291 vmem_addr_t spanaddr; 1292 vmem_size_t spansize; 1293 1294 MPASS(t->bt_start == bt->bt_start); 1295 spanaddr = bt->bt_start; 1296 spansize = bt->bt_size; 1297 bt_remseg(vm, bt); 1298 bt_remseg(vm, t); 1299 vm->vm_size -= spansize; 1300 VMEM_CONDVAR_BROADCAST(vm); 1301 bt_freetrim(vm, BT_MAXFREE); 1302 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1303 } else { 1304 bt_insfree(vm, bt); 1305 VMEM_CONDVAR_BROADCAST(vm); 1306 bt_freetrim(vm, BT_MAXFREE); 1307 } 1308 } 1309 1310 /* 1311 * vmem_add: 1312 * 1313 */ 1314 int 1315 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1316 { 1317 int error; 1318 1319 error = 0; 1320 flags &= VMEM_FLAGS; 1321 VMEM_LOCK(vm); 1322 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1323 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1324 else 1325 error = ENOMEM; 1326 VMEM_UNLOCK(vm); 1327 1328 return (error); 1329 } 1330 1331 /* 1332 * vmem_size: information about arenas size 1333 */ 1334 vmem_size_t 1335 vmem_size(vmem_t *vm, int typemask) 1336 { 1337 int i; 1338 1339 switch (typemask) { 1340 case VMEM_ALLOC: 1341 return vm->vm_inuse; 1342 case VMEM_FREE: 1343 return vm->vm_size - vm->vm_inuse; 1344 case VMEM_FREE|VMEM_ALLOC: 1345 return vm->vm_size; 1346 case VMEM_MAXFREE: 1347 VMEM_LOCK(vm); 1348 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1349 if (LIST_EMPTY(&vm->vm_freelist[i])) 1350 continue; 1351 VMEM_UNLOCK(vm); 1352 return ((vmem_size_t)ORDER2SIZE(i) << 1353 vm->vm_quantum_shift); 1354 } 1355 VMEM_UNLOCK(vm); 1356 return (0); 1357 default: 1358 panic("vmem_size"); 1359 } 1360 } 1361 1362 /* ---- debug */ 1363 1364 #if defined(DDB) || defined(DIAGNOSTIC) 1365 1366 static void bt_dump(const bt_t *, int (*)(const char *, ...) 1367 __printflike(1, 2)); 1368 1369 static const char * 1370 bt_type_string(int type) 1371 { 1372 1373 switch (type) { 1374 case BT_TYPE_BUSY: 1375 return "busy"; 1376 case BT_TYPE_FREE: 1377 return "free"; 1378 case BT_TYPE_SPAN: 1379 return "span"; 1380 case BT_TYPE_SPAN_STATIC: 1381 return "static span"; 1382 default: 1383 break; 1384 } 1385 return "BOGUS"; 1386 } 1387 1388 static void 1389 bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1390 { 1391 1392 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1393 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1394 bt->bt_type, bt_type_string(bt->bt_type)); 1395 } 1396 1397 static void 1398 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1399 { 1400 const bt_t *bt; 1401 int i; 1402 1403 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1404 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1405 bt_dump(bt, pr); 1406 } 1407 1408 for (i = 0; i < VMEM_MAXORDER; i++) { 1409 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1410 1411 if (LIST_EMPTY(fl)) { 1412 continue; 1413 } 1414 1415 (*pr)("freelist[%d]\n", i); 1416 LIST_FOREACH(bt, fl, bt_freelist) { 1417 bt_dump(bt, pr); 1418 } 1419 } 1420 } 1421 1422 #endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1423 1424 #if defined(DDB) 1425 #include <ddb/ddb.h> 1426 1427 static bt_t * 1428 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1429 { 1430 bt_t *bt; 1431 1432 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1433 if (BT_ISSPAN_P(bt)) { 1434 continue; 1435 } 1436 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1437 return bt; 1438 } 1439 } 1440 1441 return NULL; 1442 } 1443 1444 void 1445 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1446 { 1447 vmem_t *vm; 1448 1449 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1450 bt_t *bt; 1451 1452 bt = vmem_whatis_lookup(vm, addr); 1453 if (bt == NULL) { 1454 continue; 1455 } 1456 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1457 (void *)addr, (void *)bt->bt_start, 1458 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1459 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1460 } 1461 } 1462 1463 void 1464 vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1465 { 1466 const vmem_t *vm; 1467 1468 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1469 vmem_dump(vm, pr); 1470 } 1471 } 1472 1473 void 1474 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1475 { 1476 const vmem_t *vm = (const void *)addr; 1477 1478 vmem_dump(vm, pr); 1479 } 1480 1481 DB_SHOW_COMMAND(vmemdump, vmemdump) 1482 { 1483 1484 if (!have_addr) { 1485 db_printf("usage: show vmemdump <addr>\n"); 1486 return; 1487 } 1488 1489 vmem_dump((const vmem_t *)addr, db_printf); 1490 } 1491 1492 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall) 1493 { 1494 const vmem_t *vm; 1495 1496 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1497 vmem_dump(vm, db_printf); 1498 } 1499 1500 DB_SHOW_COMMAND(vmem, vmem_summ) 1501 { 1502 const vmem_t *vm = (const void *)addr; 1503 const bt_t *bt; 1504 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER]; 1505 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER]; 1506 int ord; 1507 1508 if (!have_addr) { 1509 db_printf("usage: show vmem <addr>\n"); 1510 return; 1511 } 1512 1513 db_printf("vmem %p '%s'\n", vm, vm->vm_name); 1514 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1); 1515 db_printf("\tsize:\t%zu\n", vm->vm_size); 1516 db_printf("\tinuse:\t%zu\n", vm->vm_inuse); 1517 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse); 1518 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag); 1519 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags); 1520 1521 memset(&ft, 0, sizeof(ft)); 1522 memset(&ut, 0, sizeof(ut)); 1523 memset(&fs, 0, sizeof(fs)); 1524 memset(&us, 0, sizeof(us)); 1525 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1526 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift); 1527 if (bt->bt_type == BT_TYPE_BUSY) { 1528 ut[ord]++; 1529 us[ord] += bt->bt_size; 1530 } else if (bt->bt_type == BT_TYPE_FREE) { 1531 ft[ord]++; 1532 fs[ord] += bt->bt_size; 1533 } 1534 } 1535 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n"); 1536 for (ord = 0; ord < VMEM_MAXORDER; ord++) { 1537 if (ut[ord] == 0 && ft[ord] == 0) 1538 continue; 1539 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n", 1540 ORDER2SIZE(ord) << vm->vm_quantum_shift, 1541 ut[ord], us[ord], ft[ord], fs[ord]); 1542 } 1543 } 1544 1545 DB_SHOW_ALL_COMMAND(vmem, vmem_summall) 1546 { 1547 const vmem_t *vm; 1548 1549 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1550 vmem_summ((db_expr_t)vm, TRUE, count, modif); 1551 } 1552 #endif /* defined(DDB) */ 1553 1554 #define vmem_printf printf 1555 1556 #if defined(DIAGNOSTIC) 1557 1558 static bool 1559 vmem_check_sanity(vmem_t *vm) 1560 { 1561 const bt_t *bt, *bt2; 1562 1563 MPASS(vm != NULL); 1564 1565 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1566 if (bt->bt_start > BT_END(bt)) { 1567 printf("corrupted tag\n"); 1568 bt_dump(bt, vmem_printf); 1569 return false; 1570 } 1571 } 1572 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1573 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1574 if (bt == bt2) { 1575 continue; 1576 } 1577 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1578 continue; 1579 } 1580 if (bt->bt_start <= BT_END(bt2) && 1581 bt2->bt_start <= BT_END(bt)) { 1582 printf("overwrapped tags\n"); 1583 bt_dump(bt, vmem_printf); 1584 bt_dump(bt2, vmem_printf); 1585 return false; 1586 } 1587 } 1588 } 1589 1590 return true; 1591 } 1592 1593 static void 1594 vmem_check(vmem_t *vm) 1595 { 1596 1597 if (!vmem_check_sanity(vm)) { 1598 panic("insanity vmem %p", vm); 1599 } 1600 } 1601 1602 #endif /* defined(DIAGNOSTIC) */ 1603