1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * Copyright (c) 2013 EMC Corp. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * From: 32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 34 */ 35 36 /* 37 * reference: 38 * - Magazines and Vmem: Extending the Slab Allocator 39 * to Many CPUs and Arbitrary Resources 40 * http://www.usenix.org/event/usenix01/bonwick.html 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/queue.h> 52 #include <sys/callout.h> 53 #include <sys/hash.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/smp.h> 58 #include <sys/condvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/taskqueue.h> 61 #include <sys/vmem.h> 62 #include <sys/vmmeter.h> 63 64 #include "opt_vm.h" 65 66 #include <vm/uma.h> 67 #include <vm/vm.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pageout.h> 76 #include <vm/vm_phys.h> 77 #include <vm/vm_pagequeue.h> 78 #include <vm/uma_int.h> 79 80 int vmem_startup_count(void); 81 82 #define VMEM_OPTORDER 5 83 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 84 #define VMEM_MAXORDER \ 85 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 86 87 #define VMEM_HASHSIZE_MIN 16 88 #define VMEM_HASHSIZE_MAX 131072 89 90 #define VMEM_QCACHE_IDX_MAX 16 91 92 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 93 94 #define VMEM_FLAGS \ 95 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 96 97 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 98 99 #define QC_NAME_MAX 16 100 101 /* 102 * Data structures private to vmem. 103 */ 104 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 105 106 typedef struct vmem_btag bt_t; 107 108 TAILQ_HEAD(vmem_seglist, vmem_btag); 109 LIST_HEAD(vmem_freelist, vmem_btag); 110 LIST_HEAD(vmem_hashlist, vmem_btag); 111 112 struct qcache { 113 uma_zone_t qc_cache; 114 vmem_t *qc_vmem; 115 vmem_size_t qc_size; 116 char qc_name[QC_NAME_MAX]; 117 }; 118 typedef struct qcache qcache_t; 119 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 120 121 #define VMEM_NAME_MAX 16 122 123 /* vmem arena */ 124 struct vmem { 125 struct mtx_padalign vm_lock; 126 struct cv vm_cv; 127 char vm_name[VMEM_NAME_MAX+1]; 128 LIST_ENTRY(vmem) vm_alllist; 129 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 130 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 131 struct vmem_seglist vm_seglist; 132 struct vmem_hashlist *vm_hashlist; 133 vmem_size_t vm_hashsize; 134 135 /* Constant after init */ 136 vmem_size_t vm_qcache_max; 137 vmem_size_t vm_quantum_mask; 138 vmem_size_t vm_import_quantum; 139 int vm_quantum_shift; 140 141 /* Written on alloc/free */ 142 LIST_HEAD(, vmem_btag) vm_freetags; 143 int vm_nfreetags; 144 int vm_nbusytag; 145 vmem_size_t vm_inuse; 146 vmem_size_t vm_size; 147 vmem_size_t vm_limit; 148 149 /* Used on import. */ 150 vmem_import_t *vm_importfn; 151 vmem_release_t *vm_releasefn; 152 void *vm_arg; 153 154 /* Space exhaustion callback. */ 155 vmem_reclaim_t *vm_reclaimfn; 156 157 /* quantum cache */ 158 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 159 }; 160 161 /* boundary tag */ 162 struct vmem_btag { 163 TAILQ_ENTRY(vmem_btag) bt_seglist; 164 union { 165 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 166 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 167 } bt_u; 168 #define bt_hashlist bt_u.u_hashlist 169 #define bt_freelist bt_u.u_freelist 170 vmem_addr_t bt_start; 171 vmem_size_t bt_size; 172 int bt_type; 173 }; 174 175 #define BT_TYPE_SPAN 1 /* Allocated from importfn */ 176 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 177 #define BT_TYPE_FREE 3 /* Available space. */ 178 #define BT_TYPE_BUSY 4 /* Used space. */ 179 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 180 181 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 182 183 #if defined(DIAGNOSTIC) 184 static int enable_vmem_check = 1; 185 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 186 &enable_vmem_check, 0, "Enable vmem check"); 187 static void vmem_check(vmem_t *); 188 #endif 189 190 static struct callout vmem_periodic_ch; 191 static int vmem_periodic_interval; 192 static struct task vmem_periodic_wk; 193 194 static struct mtx_padalign __exclusive_cache_line vmem_list_lock; 195 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 196 static uma_zone_t vmem_zone; 197 198 /* ---- misc */ 199 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 200 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 201 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 202 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 203 204 205 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 206 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 207 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 208 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 209 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 210 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 211 212 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 213 214 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 215 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 216 217 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 218 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 219 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 220 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 221 222 /* 223 * Maximum number of boundary tags that may be required to satisfy an 224 * allocation. Two may be required to import. Another two may be 225 * required to clip edges. 226 */ 227 #define BT_MAXALLOC 4 228 229 /* 230 * Max free limits the number of locally cached boundary tags. We 231 * just want to avoid hitting the zone allocator for every call. 232 */ 233 #define BT_MAXFREE (BT_MAXALLOC * 8) 234 235 /* Allocator for boundary tags. */ 236 static uma_zone_t vmem_bt_zone; 237 238 /* boot time arena storage. */ 239 static struct vmem kernel_arena_storage; 240 static struct vmem buffer_arena_storage; 241 static struct vmem transient_arena_storage; 242 /* kernel and kmem arenas are aliased for backwards KPI compat. */ 243 vmem_t *kernel_arena = &kernel_arena_storage; 244 #if VM_NRESERVLEVEL > 0 245 vmem_t *kernel_rwx_arena = NULL; 246 #endif 247 vmem_t *kmem_arena = &kernel_arena_storage; 248 vmem_t *buffer_arena = &buffer_arena_storage; 249 vmem_t *transient_arena = &transient_arena_storage; 250 251 #ifdef DEBUG_MEMGUARD 252 static struct vmem memguard_arena_storage; 253 vmem_t *memguard_arena = &memguard_arena_storage; 254 #endif 255 256 /* 257 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 258 * allocation will not fail once bt_fill() passes. To do so we cache 259 * at least the maximum possible tag allocations in the arena. 260 */ 261 static int 262 bt_fill(vmem_t *vm, int flags) 263 { 264 bt_t *bt; 265 266 VMEM_ASSERT_LOCKED(vm); 267 268 /* 269 * Only allow the kernel arena and arenas derived from kernel arena to 270 * dip into reserve tags. They are where new tags come from. 271 */ 272 flags &= BT_FLAGS; 273 if (vm != kernel_arena && vm->vm_arg != kernel_arena) 274 flags &= ~M_USE_RESERVE; 275 276 /* 277 * Loop until we meet the reserve. To minimize the lock shuffle 278 * and prevent simultaneous fills we first try a NOWAIT regardless 279 * of the caller's flags. Specify M_NOVM so we don't recurse while 280 * holding a vmem lock. 281 */ 282 while (vm->vm_nfreetags < BT_MAXALLOC) { 283 bt = uma_zalloc(vmem_bt_zone, 284 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 285 if (bt == NULL) { 286 VMEM_UNLOCK(vm); 287 bt = uma_zalloc(vmem_bt_zone, flags); 288 VMEM_LOCK(vm); 289 if (bt == NULL && (flags & M_NOWAIT) != 0) 290 break; 291 } 292 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 293 vm->vm_nfreetags++; 294 } 295 296 if (vm->vm_nfreetags < BT_MAXALLOC) 297 return ENOMEM; 298 299 return 0; 300 } 301 302 /* 303 * Pop a tag off of the freetag stack. 304 */ 305 static bt_t * 306 bt_alloc(vmem_t *vm) 307 { 308 bt_t *bt; 309 310 VMEM_ASSERT_LOCKED(vm); 311 bt = LIST_FIRST(&vm->vm_freetags); 312 MPASS(bt != NULL); 313 LIST_REMOVE(bt, bt_freelist); 314 vm->vm_nfreetags--; 315 316 return bt; 317 } 318 319 /* 320 * Trim the per-vmem free list. Returns with the lock released to 321 * avoid allocator recursions. 322 */ 323 static void 324 bt_freetrim(vmem_t *vm, int freelimit) 325 { 326 LIST_HEAD(, vmem_btag) freetags; 327 bt_t *bt; 328 329 LIST_INIT(&freetags); 330 VMEM_ASSERT_LOCKED(vm); 331 while (vm->vm_nfreetags > freelimit) { 332 bt = LIST_FIRST(&vm->vm_freetags); 333 LIST_REMOVE(bt, bt_freelist); 334 vm->vm_nfreetags--; 335 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 336 } 337 VMEM_UNLOCK(vm); 338 while ((bt = LIST_FIRST(&freetags)) != NULL) { 339 LIST_REMOVE(bt, bt_freelist); 340 uma_zfree(vmem_bt_zone, bt); 341 } 342 } 343 344 static inline void 345 bt_free(vmem_t *vm, bt_t *bt) 346 { 347 348 VMEM_ASSERT_LOCKED(vm); 349 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 350 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 351 vm->vm_nfreetags++; 352 } 353 354 /* 355 * freelist[0] ... [1, 1] 356 * freelist[1] ... [2, 2] 357 * : 358 * freelist[29] ... [30, 30] 359 * freelist[30] ... [31, 31] 360 * freelist[31] ... [32, 63] 361 * freelist[33] ... [64, 127] 362 * : 363 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 364 * : 365 */ 366 367 static struct vmem_freelist * 368 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 369 { 370 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 371 const int idx = SIZE2ORDER(qsize); 372 373 MPASS(size != 0 && qsize != 0); 374 MPASS((size & vm->vm_quantum_mask) == 0); 375 MPASS(idx >= 0); 376 MPASS(idx < VMEM_MAXORDER); 377 378 return &vm->vm_freelist[idx]; 379 } 380 381 /* 382 * bt_freehead_toalloc: return the freelist for the given size and allocation 383 * strategy. 384 * 385 * For M_FIRSTFIT, return the list in which any blocks are large enough 386 * for the requested size. otherwise, return the list which can have blocks 387 * large enough for the requested size. 388 */ 389 static struct vmem_freelist * 390 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 391 { 392 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 393 int idx = SIZE2ORDER(qsize); 394 395 MPASS(size != 0 && qsize != 0); 396 MPASS((size & vm->vm_quantum_mask) == 0); 397 398 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 399 idx++; 400 /* check too large request? */ 401 } 402 MPASS(idx >= 0); 403 MPASS(idx < VMEM_MAXORDER); 404 405 return &vm->vm_freelist[idx]; 406 } 407 408 /* ---- boundary tag hash */ 409 410 static struct vmem_hashlist * 411 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 412 { 413 struct vmem_hashlist *list; 414 unsigned int hash; 415 416 hash = hash32_buf(&addr, sizeof(addr), 0); 417 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 418 419 return list; 420 } 421 422 static bt_t * 423 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 424 { 425 struct vmem_hashlist *list; 426 bt_t *bt; 427 428 VMEM_ASSERT_LOCKED(vm); 429 list = bt_hashhead(vm, addr); 430 LIST_FOREACH(bt, list, bt_hashlist) { 431 if (bt->bt_start == addr) { 432 break; 433 } 434 } 435 436 return bt; 437 } 438 439 static void 440 bt_rembusy(vmem_t *vm, bt_t *bt) 441 { 442 443 VMEM_ASSERT_LOCKED(vm); 444 MPASS(vm->vm_nbusytag > 0); 445 vm->vm_inuse -= bt->bt_size; 446 vm->vm_nbusytag--; 447 LIST_REMOVE(bt, bt_hashlist); 448 } 449 450 static void 451 bt_insbusy(vmem_t *vm, bt_t *bt) 452 { 453 struct vmem_hashlist *list; 454 455 VMEM_ASSERT_LOCKED(vm); 456 MPASS(bt->bt_type == BT_TYPE_BUSY); 457 458 list = bt_hashhead(vm, bt->bt_start); 459 LIST_INSERT_HEAD(list, bt, bt_hashlist); 460 vm->vm_nbusytag++; 461 vm->vm_inuse += bt->bt_size; 462 } 463 464 /* ---- boundary tag list */ 465 466 static void 467 bt_remseg(vmem_t *vm, bt_t *bt) 468 { 469 470 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 471 bt_free(vm, bt); 472 } 473 474 static void 475 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 476 { 477 478 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 479 } 480 481 static void 482 bt_insseg_tail(vmem_t *vm, bt_t *bt) 483 { 484 485 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 486 } 487 488 static void 489 bt_remfree(vmem_t *vm, bt_t *bt) 490 { 491 492 MPASS(bt->bt_type == BT_TYPE_FREE); 493 494 LIST_REMOVE(bt, bt_freelist); 495 } 496 497 static void 498 bt_insfree(vmem_t *vm, bt_t *bt) 499 { 500 struct vmem_freelist *list; 501 502 list = bt_freehead_tofree(vm, bt->bt_size); 503 LIST_INSERT_HEAD(list, bt, bt_freelist); 504 } 505 506 /* ---- vmem internal functions */ 507 508 /* 509 * Import from the arena into the quantum cache in UMA. 510 */ 511 static int 512 qc_import(void *arg, void **store, int cnt, int domain, int flags) 513 { 514 qcache_t *qc; 515 vmem_addr_t addr; 516 int i; 517 518 qc = arg; 519 if ((flags & VMEM_FITMASK) == 0) 520 flags |= M_BESTFIT; 521 for (i = 0; i < cnt; i++) { 522 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 523 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 524 break; 525 store[i] = (void *)addr; 526 /* Only guarantee one allocation. */ 527 flags &= ~M_WAITOK; 528 flags |= M_NOWAIT; 529 } 530 return i; 531 } 532 533 /* 534 * Release memory from the UMA cache to the arena. 535 */ 536 static void 537 qc_release(void *arg, void **store, int cnt) 538 { 539 qcache_t *qc; 540 int i; 541 542 qc = arg; 543 for (i = 0; i < cnt; i++) 544 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 545 } 546 547 static void 548 qc_init(vmem_t *vm, vmem_size_t qcache_max) 549 { 550 qcache_t *qc; 551 vmem_size_t size; 552 int qcache_idx_max; 553 int i; 554 555 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 556 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 557 VMEM_QCACHE_IDX_MAX); 558 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 559 for (i = 0; i < qcache_idx_max; i++) { 560 qc = &vm->vm_qcache[i]; 561 size = (i + 1) << vm->vm_quantum_shift; 562 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 563 vm->vm_name, size); 564 qc->qc_vmem = vm; 565 qc->qc_size = size; 566 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 567 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 568 UMA_ZONE_VM); 569 MPASS(qc->qc_cache); 570 } 571 } 572 573 static void 574 qc_destroy(vmem_t *vm) 575 { 576 int qcache_idx_max; 577 int i; 578 579 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 580 for (i = 0; i < qcache_idx_max; i++) 581 uma_zdestroy(vm->vm_qcache[i].qc_cache); 582 } 583 584 static void 585 qc_drain(vmem_t *vm) 586 { 587 int qcache_idx_max; 588 int i; 589 590 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 591 for (i = 0; i < qcache_idx_max; i++) 592 zone_drain(vm->vm_qcache[i].qc_cache); 593 } 594 595 #ifndef UMA_MD_SMALL_ALLOC 596 597 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; 598 599 /* 600 * vmem_bt_alloc: Allocate a new page of boundary tags. 601 * 602 * On architectures with uma_small_alloc there is no recursion; no address 603 * space need be allocated to allocate boundary tags. For the others, we 604 * must handle recursion. Boundary tags are necessary to allocate new 605 * boundary tags. 606 * 607 * UMA guarantees that enough tags are held in reserve to allocate a new 608 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 609 * when allocating the page to hold new boundary tags. In this way the 610 * reserve is automatically filled by the allocation that uses the reserve. 611 * 612 * We still have to guarantee that the new tags are allocated atomically since 613 * many threads may try concurrently. The bt_lock provides this guarantee. 614 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 615 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 616 * loop again after checking to see if we lost the race to allocate. 617 * 618 * There is a small race between vmem_bt_alloc() returning the page and the 619 * zone lock being acquired to add the page to the zone. For WAITOK 620 * allocations we just pause briefly. NOWAIT may experience a transient 621 * failure. To alleviate this we permit a small number of simultaneous 622 * fills to proceed concurrently so NOWAIT is less likely to fail unless 623 * we are really out of KVA. 624 */ 625 static void * 626 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 627 int wait) 628 { 629 vmem_addr_t addr; 630 631 *pflag = UMA_SLAB_KERNEL; 632 633 /* 634 * Single thread boundary tag allocation so that the address space 635 * and memory are added in one atomic operation. 636 */ 637 mtx_lock(&vmem_bt_lock); 638 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0, 639 VMEM_ADDR_MIN, VMEM_ADDR_MAX, 640 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { 641 if (kmem_back_domain(domain, kernel_object, addr, bytes, 642 M_NOWAIT | M_USE_RESERVE) == 0) { 643 mtx_unlock(&vmem_bt_lock); 644 return ((void *)addr); 645 } 646 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes); 647 mtx_unlock(&vmem_bt_lock); 648 /* 649 * Out of memory, not address space. This may not even be 650 * possible due to M_USE_RESERVE page allocation. 651 */ 652 if (wait & M_WAITOK) 653 vm_wait_domain(domain); 654 return (NULL); 655 } 656 mtx_unlock(&vmem_bt_lock); 657 /* 658 * We're either out of address space or lost a fill race. 659 */ 660 if (wait & M_WAITOK) 661 pause("btalloc", 1); 662 663 return (NULL); 664 } 665 666 /* 667 * How many pages do we need to startup_alloc. 668 */ 669 int 670 vmem_startup_count(void) 671 { 672 673 return (howmany(BT_MAXALLOC, 674 UMA_SLAB_SPACE / sizeof(struct vmem_btag))); 675 } 676 #endif 677 678 void 679 vmem_startup(void) 680 { 681 682 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 683 vmem_zone = uma_zcreate("vmem", 684 sizeof(struct vmem), NULL, NULL, NULL, NULL, 685 UMA_ALIGN_PTR, UMA_ZONE_VM); 686 vmem_bt_zone = uma_zcreate("vmem btag", 687 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 688 UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 689 #ifndef UMA_MD_SMALL_ALLOC 690 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 691 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 692 /* 693 * Reserve enough tags to allocate new tags. We allow multiple 694 * CPUs to attempt to allocate new tags concurrently to limit 695 * false restarts in UMA. 696 */ 697 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 698 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 699 #endif 700 } 701 702 /* ---- rehash */ 703 704 static int 705 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 706 { 707 bt_t *bt; 708 int i; 709 struct vmem_hashlist *newhashlist; 710 struct vmem_hashlist *oldhashlist; 711 vmem_size_t oldhashsize; 712 713 MPASS(newhashsize > 0); 714 715 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 716 M_VMEM, M_NOWAIT); 717 if (newhashlist == NULL) 718 return ENOMEM; 719 for (i = 0; i < newhashsize; i++) { 720 LIST_INIT(&newhashlist[i]); 721 } 722 723 VMEM_LOCK(vm); 724 oldhashlist = vm->vm_hashlist; 725 oldhashsize = vm->vm_hashsize; 726 vm->vm_hashlist = newhashlist; 727 vm->vm_hashsize = newhashsize; 728 if (oldhashlist == NULL) { 729 VMEM_UNLOCK(vm); 730 return 0; 731 } 732 for (i = 0; i < oldhashsize; i++) { 733 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 734 bt_rembusy(vm, bt); 735 bt_insbusy(vm, bt); 736 } 737 } 738 VMEM_UNLOCK(vm); 739 740 if (oldhashlist != vm->vm_hash0) { 741 free(oldhashlist, M_VMEM); 742 } 743 744 return 0; 745 } 746 747 static void 748 vmem_periodic_kick(void *dummy) 749 { 750 751 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 752 } 753 754 static void 755 vmem_periodic(void *unused, int pending) 756 { 757 vmem_t *vm; 758 vmem_size_t desired; 759 vmem_size_t current; 760 761 mtx_lock(&vmem_list_lock); 762 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 763 #ifdef DIAGNOSTIC 764 /* Convenient time to verify vmem state. */ 765 if (enable_vmem_check == 1) { 766 VMEM_LOCK(vm); 767 vmem_check(vm); 768 VMEM_UNLOCK(vm); 769 } 770 #endif 771 desired = 1 << flsl(vm->vm_nbusytag); 772 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 773 VMEM_HASHSIZE_MAX); 774 current = vm->vm_hashsize; 775 776 /* Grow in powers of two. Shrink less aggressively. */ 777 if (desired >= current * 2 || desired * 4 <= current) 778 vmem_rehash(vm, desired); 779 780 /* 781 * Periodically wake up threads waiting for resources, 782 * so they could ask for reclamation again. 783 */ 784 VMEM_CONDVAR_BROADCAST(vm); 785 } 786 mtx_unlock(&vmem_list_lock); 787 788 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 789 vmem_periodic_kick, NULL); 790 } 791 792 static void 793 vmem_start_callout(void *unused) 794 { 795 796 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 797 vmem_periodic_interval = hz * 10; 798 callout_init(&vmem_periodic_ch, 1); 799 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 800 vmem_periodic_kick, NULL); 801 } 802 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 803 804 static void 805 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 806 { 807 bt_t *btspan; 808 bt_t *btfree; 809 810 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 811 MPASS((size & vm->vm_quantum_mask) == 0); 812 813 btspan = bt_alloc(vm); 814 btspan->bt_type = type; 815 btspan->bt_start = addr; 816 btspan->bt_size = size; 817 bt_insseg_tail(vm, btspan); 818 819 btfree = bt_alloc(vm); 820 btfree->bt_type = BT_TYPE_FREE; 821 btfree->bt_start = addr; 822 btfree->bt_size = size; 823 bt_insseg(vm, btfree, btspan); 824 bt_insfree(vm, btfree); 825 826 vm->vm_size += size; 827 } 828 829 static void 830 vmem_destroy1(vmem_t *vm) 831 { 832 bt_t *bt; 833 834 /* 835 * Drain per-cpu quantum caches. 836 */ 837 qc_destroy(vm); 838 839 /* 840 * The vmem should now only contain empty segments. 841 */ 842 VMEM_LOCK(vm); 843 MPASS(vm->vm_nbusytag == 0); 844 845 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 846 bt_remseg(vm, bt); 847 848 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 849 free(vm->vm_hashlist, M_VMEM); 850 851 bt_freetrim(vm, 0); 852 853 VMEM_CONDVAR_DESTROY(vm); 854 VMEM_LOCK_DESTROY(vm); 855 uma_zfree(vmem_zone, vm); 856 } 857 858 static int 859 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 860 { 861 vmem_addr_t addr; 862 int error; 863 864 if (vm->vm_importfn == NULL) 865 return (EINVAL); 866 867 /* 868 * To make sure we get a span that meets the alignment we double it 869 * and add the size to the tail. This slightly overestimates. 870 */ 871 if (align != vm->vm_quantum_mask + 1) 872 size = (align * 2) + size; 873 size = roundup(size, vm->vm_import_quantum); 874 875 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) 876 return (ENOMEM); 877 878 /* 879 * Hide MAXALLOC tags so we're guaranteed to be able to add this 880 * span and the tag we want to allocate from it. 881 */ 882 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 883 vm->vm_nfreetags -= BT_MAXALLOC; 884 VMEM_UNLOCK(vm); 885 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 886 VMEM_LOCK(vm); 887 vm->vm_nfreetags += BT_MAXALLOC; 888 if (error) 889 return (ENOMEM); 890 891 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 892 893 return 0; 894 } 895 896 /* 897 * vmem_fit: check if a bt can satisfy the given restrictions. 898 * 899 * it's a caller's responsibility to ensure the region is big enough 900 * before calling us. 901 */ 902 static int 903 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 904 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 905 vmem_addr_t maxaddr, vmem_addr_t *addrp) 906 { 907 vmem_addr_t start; 908 vmem_addr_t end; 909 910 MPASS(size > 0); 911 MPASS(bt->bt_size >= size); /* caller's responsibility */ 912 913 /* 914 * XXX assumption: vmem_addr_t and vmem_size_t are 915 * unsigned integer of the same size. 916 */ 917 918 start = bt->bt_start; 919 if (start < minaddr) { 920 start = minaddr; 921 } 922 end = BT_END(bt); 923 if (end > maxaddr) 924 end = maxaddr; 925 if (start > end) 926 return (ENOMEM); 927 928 start = VMEM_ALIGNUP(start - phase, align) + phase; 929 if (start < bt->bt_start) 930 start += align; 931 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 932 MPASS(align < nocross); 933 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 934 } 935 if (start <= end && end - start >= size - 1) { 936 MPASS((start & (align - 1)) == phase); 937 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 938 MPASS(minaddr <= start); 939 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 940 MPASS(bt->bt_start <= start); 941 MPASS(BT_END(bt) - start >= size - 1); 942 *addrp = start; 943 944 return (0); 945 } 946 return (ENOMEM); 947 } 948 949 /* 950 * vmem_clip: Trim the boundary tag edges to the requested start and size. 951 */ 952 static void 953 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 954 { 955 bt_t *btnew; 956 bt_t *btprev; 957 958 VMEM_ASSERT_LOCKED(vm); 959 MPASS(bt->bt_type == BT_TYPE_FREE); 960 MPASS(bt->bt_size >= size); 961 bt_remfree(vm, bt); 962 if (bt->bt_start != start) { 963 btprev = bt_alloc(vm); 964 btprev->bt_type = BT_TYPE_FREE; 965 btprev->bt_start = bt->bt_start; 966 btprev->bt_size = start - bt->bt_start; 967 bt->bt_start = start; 968 bt->bt_size -= btprev->bt_size; 969 bt_insfree(vm, btprev); 970 bt_insseg(vm, btprev, 971 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 972 } 973 MPASS(bt->bt_start == start); 974 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 975 /* split */ 976 btnew = bt_alloc(vm); 977 btnew->bt_type = BT_TYPE_BUSY; 978 btnew->bt_start = bt->bt_start; 979 btnew->bt_size = size; 980 bt->bt_start = bt->bt_start + size; 981 bt->bt_size -= size; 982 bt_insfree(vm, bt); 983 bt_insseg(vm, btnew, 984 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 985 bt_insbusy(vm, btnew); 986 bt = btnew; 987 } else { 988 bt->bt_type = BT_TYPE_BUSY; 989 bt_insbusy(vm, bt); 990 } 991 MPASS(bt->bt_size >= size); 992 } 993 994 /* ---- vmem API */ 995 996 void 997 vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 998 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 999 { 1000 1001 VMEM_LOCK(vm); 1002 vm->vm_importfn = importfn; 1003 vm->vm_releasefn = releasefn; 1004 vm->vm_arg = arg; 1005 vm->vm_import_quantum = import_quantum; 1006 VMEM_UNLOCK(vm); 1007 } 1008 1009 void 1010 vmem_set_limit(vmem_t *vm, vmem_size_t limit) 1011 { 1012 1013 VMEM_LOCK(vm); 1014 vm->vm_limit = limit; 1015 VMEM_UNLOCK(vm); 1016 } 1017 1018 void 1019 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 1020 { 1021 1022 VMEM_LOCK(vm); 1023 vm->vm_reclaimfn = reclaimfn; 1024 VMEM_UNLOCK(vm); 1025 } 1026 1027 /* 1028 * vmem_init: Initializes vmem arena. 1029 */ 1030 vmem_t * 1031 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 1032 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1033 { 1034 int i; 1035 1036 MPASS(quantum > 0); 1037 MPASS((quantum & (quantum - 1)) == 0); 1038 1039 bzero(vm, sizeof(*vm)); 1040 1041 VMEM_CONDVAR_INIT(vm, name); 1042 VMEM_LOCK_INIT(vm, name); 1043 vm->vm_nfreetags = 0; 1044 LIST_INIT(&vm->vm_freetags); 1045 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1046 vm->vm_quantum_mask = quantum - 1; 1047 vm->vm_quantum_shift = flsl(quantum) - 1; 1048 vm->vm_nbusytag = 0; 1049 vm->vm_size = 0; 1050 vm->vm_limit = 0; 1051 vm->vm_inuse = 0; 1052 qc_init(vm, qcache_max); 1053 1054 TAILQ_INIT(&vm->vm_seglist); 1055 for (i = 0; i < VMEM_MAXORDER; i++) { 1056 LIST_INIT(&vm->vm_freelist[i]); 1057 } 1058 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1059 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1060 vm->vm_hashlist = vm->vm_hash0; 1061 1062 if (size != 0) { 1063 if (vmem_add(vm, base, size, flags) != 0) { 1064 vmem_destroy1(vm); 1065 return NULL; 1066 } 1067 } 1068 1069 mtx_lock(&vmem_list_lock); 1070 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1071 mtx_unlock(&vmem_list_lock); 1072 1073 return vm; 1074 } 1075 1076 /* 1077 * vmem_create: create an arena. 1078 */ 1079 vmem_t * 1080 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1081 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1082 { 1083 1084 vmem_t *vm; 1085 1086 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); 1087 if (vm == NULL) 1088 return (NULL); 1089 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1090 flags) == NULL) 1091 return (NULL); 1092 return (vm); 1093 } 1094 1095 void 1096 vmem_destroy(vmem_t *vm) 1097 { 1098 1099 mtx_lock(&vmem_list_lock); 1100 LIST_REMOVE(vm, vm_alllist); 1101 mtx_unlock(&vmem_list_lock); 1102 1103 vmem_destroy1(vm); 1104 } 1105 1106 vmem_size_t 1107 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1108 { 1109 1110 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1111 } 1112 1113 /* 1114 * vmem_alloc: allocate resource from the arena. 1115 */ 1116 int 1117 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1118 { 1119 const int strat __unused = flags & VMEM_FITMASK; 1120 qcache_t *qc; 1121 1122 flags &= VMEM_FLAGS; 1123 MPASS(size > 0); 1124 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1125 if ((flags & M_NOWAIT) == 0) 1126 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1127 1128 if (size <= vm->vm_qcache_max) { 1129 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1130 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1131 if (*addrp == 0) 1132 return (ENOMEM); 1133 return (0); 1134 } 1135 1136 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1137 flags, addrp); 1138 } 1139 1140 int 1141 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1142 const vmem_size_t phase, const vmem_size_t nocross, 1143 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1144 vmem_addr_t *addrp) 1145 { 1146 const vmem_size_t size = vmem_roundup_size(vm, size0); 1147 struct vmem_freelist *list; 1148 struct vmem_freelist *first; 1149 struct vmem_freelist *end; 1150 vmem_size_t avail; 1151 bt_t *bt; 1152 int error; 1153 int strat; 1154 1155 flags &= VMEM_FLAGS; 1156 strat = flags & VMEM_FITMASK; 1157 MPASS(size0 > 0); 1158 MPASS(size > 0); 1159 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1160 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1161 if ((flags & M_NOWAIT) == 0) 1162 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1163 MPASS((align & vm->vm_quantum_mask) == 0); 1164 MPASS((align & (align - 1)) == 0); 1165 MPASS((phase & vm->vm_quantum_mask) == 0); 1166 MPASS((nocross & vm->vm_quantum_mask) == 0); 1167 MPASS((nocross & (nocross - 1)) == 0); 1168 MPASS((align == 0 && phase == 0) || phase < align); 1169 MPASS(nocross == 0 || nocross >= size); 1170 MPASS(minaddr <= maxaddr); 1171 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1172 1173 if (align == 0) 1174 align = vm->vm_quantum_mask + 1; 1175 1176 *addrp = 0; 1177 end = &vm->vm_freelist[VMEM_MAXORDER]; 1178 /* 1179 * choose a free block from which we allocate. 1180 */ 1181 first = bt_freehead_toalloc(vm, size, strat); 1182 VMEM_LOCK(vm); 1183 for (;;) { 1184 /* 1185 * Make sure we have enough tags to complete the 1186 * operation. 1187 */ 1188 if (vm->vm_nfreetags < BT_MAXALLOC && 1189 bt_fill(vm, flags) != 0) { 1190 error = ENOMEM; 1191 break; 1192 } 1193 /* 1194 * Scan freelists looking for a tag that satisfies the 1195 * allocation. If we're doing BESTFIT we may encounter 1196 * sizes below the request. If we're doing FIRSTFIT we 1197 * inspect only the first element from each list. 1198 */ 1199 for (list = first; list < end; list++) { 1200 LIST_FOREACH(bt, list, bt_freelist) { 1201 if (bt->bt_size >= size) { 1202 error = vmem_fit(bt, size, align, phase, 1203 nocross, minaddr, maxaddr, addrp); 1204 if (error == 0) { 1205 vmem_clip(vm, bt, *addrp, size); 1206 goto out; 1207 } 1208 } 1209 /* FIRST skips to the next list. */ 1210 if (strat == M_FIRSTFIT) 1211 break; 1212 } 1213 } 1214 /* 1215 * Retry if the fast algorithm failed. 1216 */ 1217 if (strat == M_FIRSTFIT) { 1218 strat = M_BESTFIT; 1219 first = bt_freehead_toalloc(vm, size, strat); 1220 continue; 1221 } 1222 /* 1223 * XXX it is possible to fail to meet restrictions with the 1224 * imported region. It is up to the user to specify the 1225 * import quantum such that it can satisfy any allocation. 1226 */ 1227 if (vmem_import(vm, size, align, flags) == 0) 1228 continue; 1229 1230 /* 1231 * Try to free some space from the quantum cache or reclaim 1232 * functions if available. 1233 */ 1234 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1235 avail = vm->vm_size - vm->vm_inuse; 1236 VMEM_UNLOCK(vm); 1237 if (vm->vm_qcache_max != 0) 1238 qc_drain(vm); 1239 if (vm->vm_reclaimfn != NULL) 1240 vm->vm_reclaimfn(vm, flags); 1241 VMEM_LOCK(vm); 1242 /* If we were successful retry even NOWAIT. */ 1243 if (vm->vm_size - vm->vm_inuse > avail) 1244 continue; 1245 } 1246 if ((flags & M_NOWAIT) != 0) { 1247 error = ENOMEM; 1248 break; 1249 } 1250 VMEM_CONDVAR_WAIT(vm); 1251 } 1252 out: 1253 VMEM_UNLOCK(vm); 1254 if (error != 0 && (flags & M_NOWAIT) == 0) 1255 panic("failed to allocate waiting allocation\n"); 1256 1257 return (error); 1258 } 1259 1260 /* 1261 * vmem_free: free the resource to the arena. 1262 */ 1263 void 1264 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1265 { 1266 qcache_t *qc; 1267 MPASS(size > 0); 1268 1269 if (size <= vm->vm_qcache_max) { 1270 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1271 uma_zfree(qc->qc_cache, (void *)addr); 1272 } else 1273 vmem_xfree(vm, addr, size); 1274 } 1275 1276 void 1277 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1278 { 1279 bt_t *bt; 1280 bt_t *t; 1281 1282 MPASS(size > 0); 1283 1284 VMEM_LOCK(vm); 1285 bt = bt_lookupbusy(vm, addr); 1286 MPASS(bt != NULL); 1287 MPASS(bt->bt_start == addr); 1288 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1289 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1290 MPASS(bt->bt_type == BT_TYPE_BUSY); 1291 bt_rembusy(vm, bt); 1292 bt->bt_type = BT_TYPE_FREE; 1293 1294 /* coalesce */ 1295 t = TAILQ_NEXT(bt, bt_seglist); 1296 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1297 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1298 bt->bt_size += t->bt_size; 1299 bt_remfree(vm, t); 1300 bt_remseg(vm, t); 1301 } 1302 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1303 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1304 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1305 bt->bt_size += t->bt_size; 1306 bt->bt_start = t->bt_start; 1307 bt_remfree(vm, t); 1308 bt_remseg(vm, t); 1309 } 1310 1311 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1312 MPASS(t != NULL); 1313 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1314 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1315 t->bt_size == bt->bt_size) { 1316 vmem_addr_t spanaddr; 1317 vmem_size_t spansize; 1318 1319 MPASS(t->bt_start == bt->bt_start); 1320 spanaddr = bt->bt_start; 1321 spansize = bt->bt_size; 1322 bt_remseg(vm, bt); 1323 bt_remseg(vm, t); 1324 vm->vm_size -= spansize; 1325 VMEM_CONDVAR_BROADCAST(vm); 1326 bt_freetrim(vm, BT_MAXFREE); 1327 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1328 } else { 1329 bt_insfree(vm, bt); 1330 VMEM_CONDVAR_BROADCAST(vm); 1331 bt_freetrim(vm, BT_MAXFREE); 1332 } 1333 } 1334 1335 /* 1336 * vmem_add: 1337 * 1338 */ 1339 int 1340 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1341 { 1342 int error; 1343 1344 error = 0; 1345 flags &= VMEM_FLAGS; 1346 VMEM_LOCK(vm); 1347 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1348 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1349 else 1350 error = ENOMEM; 1351 VMEM_UNLOCK(vm); 1352 1353 return (error); 1354 } 1355 1356 /* 1357 * vmem_size: information about arenas size 1358 */ 1359 vmem_size_t 1360 vmem_size(vmem_t *vm, int typemask) 1361 { 1362 int i; 1363 1364 switch (typemask) { 1365 case VMEM_ALLOC: 1366 return vm->vm_inuse; 1367 case VMEM_FREE: 1368 return vm->vm_size - vm->vm_inuse; 1369 case VMEM_FREE|VMEM_ALLOC: 1370 return vm->vm_size; 1371 case VMEM_MAXFREE: 1372 VMEM_LOCK(vm); 1373 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1374 if (LIST_EMPTY(&vm->vm_freelist[i])) 1375 continue; 1376 VMEM_UNLOCK(vm); 1377 return ((vmem_size_t)ORDER2SIZE(i) << 1378 vm->vm_quantum_shift); 1379 } 1380 VMEM_UNLOCK(vm); 1381 return (0); 1382 default: 1383 panic("vmem_size"); 1384 } 1385 } 1386 1387 /* ---- debug */ 1388 1389 #if defined(DDB) || defined(DIAGNOSTIC) 1390 1391 static void bt_dump(const bt_t *, int (*)(const char *, ...) 1392 __printflike(1, 2)); 1393 1394 static const char * 1395 bt_type_string(int type) 1396 { 1397 1398 switch (type) { 1399 case BT_TYPE_BUSY: 1400 return "busy"; 1401 case BT_TYPE_FREE: 1402 return "free"; 1403 case BT_TYPE_SPAN: 1404 return "span"; 1405 case BT_TYPE_SPAN_STATIC: 1406 return "static span"; 1407 default: 1408 break; 1409 } 1410 return "BOGUS"; 1411 } 1412 1413 static void 1414 bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1415 { 1416 1417 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1418 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1419 bt->bt_type, bt_type_string(bt->bt_type)); 1420 } 1421 1422 static void 1423 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1424 { 1425 const bt_t *bt; 1426 int i; 1427 1428 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1429 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1430 bt_dump(bt, pr); 1431 } 1432 1433 for (i = 0; i < VMEM_MAXORDER; i++) { 1434 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1435 1436 if (LIST_EMPTY(fl)) { 1437 continue; 1438 } 1439 1440 (*pr)("freelist[%d]\n", i); 1441 LIST_FOREACH(bt, fl, bt_freelist) { 1442 bt_dump(bt, pr); 1443 } 1444 } 1445 } 1446 1447 #endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1448 1449 #if defined(DDB) 1450 #include <ddb/ddb.h> 1451 1452 static bt_t * 1453 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1454 { 1455 bt_t *bt; 1456 1457 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1458 if (BT_ISSPAN_P(bt)) { 1459 continue; 1460 } 1461 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1462 return bt; 1463 } 1464 } 1465 1466 return NULL; 1467 } 1468 1469 void 1470 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1471 { 1472 vmem_t *vm; 1473 1474 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1475 bt_t *bt; 1476 1477 bt = vmem_whatis_lookup(vm, addr); 1478 if (bt == NULL) { 1479 continue; 1480 } 1481 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1482 (void *)addr, (void *)bt->bt_start, 1483 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1484 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1485 } 1486 } 1487 1488 void 1489 vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1490 { 1491 const vmem_t *vm; 1492 1493 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1494 vmem_dump(vm, pr); 1495 } 1496 } 1497 1498 void 1499 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1500 { 1501 const vmem_t *vm = (const void *)addr; 1502 1503 vmem_dump(vm, pr); 1504 } 1505 1506 DB_SHOW_COMMAND(vmemdump, vmemdump) 1507 { 1508 1509 if (!have_addr) { 1510 db_printf("usage: show vmemdump <addr>\n"); 1511 return; 1512 } 1513 1514 vmem_dump((const vmem_t *)addr, db_printf); 1515 } 1516 1517 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall) 1518 { 1519 const vmem_t *vm; 1520 1521 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1522 vmem_dump(vm, db_printf); 1523 } 1524 1525 DB_SHOW_COMMAND(vmem, vmem_summ) 1526 { 1527 const vmem_t *vm = (const void *)addr; 1528 const bt_t *bt; 1529 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER]; 1530 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER]; 1531 int ord; 1532 1533 if (!have_addr) { 1534 db_printf("usage: show vmem <addr>\n"); 1535 return; 1536 } 1537 1538 db_printf("vmem %p '%s'\n", vm, vm->vm_name); 1539 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1); 1540 db_printf("\tsize:\t%zu\n", vm->vm_size); 1541 db_printf("\tinuse:\t%zu\n", vm->vm_inuse); 1542 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse); 1543 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag); 1544 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags); 1545 1546 memset(&ft, 0, sizeof(ft)); 1547 memset(&ut, 0, sizeof(ut)); 1548 memset(&fs, 0, sizeof(fs)); 1549 memset(&us, 0, sizeof(us)); 1550 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1551 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift); 1552 if (bt->bt_type == BT_TYPE_BUSY) { 1553 ut[ord]++; 1554 us[ord] += bt->bt_size; 1555 } else if (bt->bt_type == BT_TYPE_FREE) { 1556 ft[ord]++; 1557 fs[ord] += bt->bt_size; 1558 } 1559 } 1560 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n"); 1561 for (ord = 0; ord < VMEM_MAXORDER; ord++) { 1562 if (ut[ord] == 0 && ft[ord] == 0) 1563 continue; 1564 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n", 1565 ORDER2SIZE(ord) << vm->vm_quantum_shift, 1566 ut[ord], us[ord], ft[ord], fs[ord]); 1567 } 1568 } 1569 1570 DB_SHOW_ALL_COMMAND(vmem, vmem_summall) 1571 { 1572 const vmem_t *vm; 1573 1574 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1575 vmem_summ((db_expr_t)vm, TRUE, count, modif); 1576 } 1577 #endif /* defined(DDB) */ 1578 1579 #define vmem_printf printf 1580 1581 #if defined(DIAGNOSTIC) 1582 1583 static bool 1584 vmem_check_sanity(vmem_t *vm) 1585 { 1586 const bt_t *bt, *bt2; 1587 1588 MPASS(vm != NULL); 1589 1590 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1591 if (bt->bt_start > BT_END(bt)) { 1592 printf("corrupted tag\n"); 1593 bt_dump(bt, vmem_printf); 1594 return false; 1595 } 1596 } 1597 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1598 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1599 if (bt == bt2) { 1600 continue; 1601 } 1602 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1603 continue; 1604 } 1605 if (bt->bt_start <= BT_END(bt2) && 1606 bt2->bt_start <= BT_END(bt)) { 1607 printf("overwrapped tags\n"); 1608 bt_dump(bt, vmem_printf); 1609 bt_dump(bt2, vmem_printf); 1610 return false; 1611 } 1612 } 1613 } 1614 1615 return true; 1616 } 1617 1618 static void 1619 vmem_check(vmem_t *vm) 1620 { 1621 1622 if (!vmem_check_sanity(vm)) { 1623 panic("insanity vmem %p", vm); 1624 } 1625 } 1626 1627 #endif /* defined(DIAGNOSTIC) */ 1628