1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * Copyright (c) 2013 EMC Corp. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * From: 32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 34 */ 35 36 /* 37 * reference: 38 * - Magazines and Vmem: Extending the Slab Allocator 39 * to Many CPUs and Arbitrary Resources 40 * http://www.usenix.org/event/usenix01/bonwick.html 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/queue.h> 52 #include <sys/callout.h> 53 #include <sys/hash.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/smp.h> 58 #include <sys/condvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/taskqueue.h> 61 #include <sys/vmem.h> 62 #include <sys/vmmeter.h> 63 64 #include "opt_vm.h" 65 66 #include <vm/uma.h> 67 #include <vm/vm.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pageout.h> 76 #include <vm/vm_phys.h> 77 #include <vm/vm_pagequeue.h> 78 #include <vm/uma_int.h> 79 80 int vmem_startup_count(void); 81 82 #define VMEM_OPTORDER 5 83 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 84 #define VMEM_MAXORDER \ 85 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 86 87 #define VMEM_HASHSIZE_MIN 16 88 #define VMEM_HASHSIZE_MAX 131072 89 90 #define VMEM_QCACHE_IDX_MAX 16 91 92 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 93 94 #define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \ 95 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 96 97 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 98 99 #define QC_NAME_MAX 16 100 101 /* 102 * Data structures private to vmem. 103 */ 104 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 105 106 typedef struct vmem_btag bt_t; 107 108 TAILQ_HEAD(vmem_seglist, vmem_btag); 109 LIST_HEAD(vmem_freelist, vmem_btag); 110 LIST_HEAD(vmem_hashlist, vmem_btag); 111 112 struct qcache { 113 uma_zone_t qc_cache; 114 vmem_t *qc_vmem; 115 vmem_size_t qc_size; 116 char qc_name[QC_NAME_MAX]; 117 }; 118 typedef struct qcache qcache_t; 119 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 120 121 #define VMEM_NAME_MAX 16 122 123 /* boundary tag */ 124 struct vmem_btag { 125 TAILQ_ENTRY(vmem_btag) bt_seglist; 126 union { 127 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 128 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 129 } bt_u; 130 #define bt_hashlist bt_u.u_hashlist 131 #define bt_freelist bt_u.u_freelist 132 vmem_addr_t bt_start; 133 vmem_size_t bt_size; 134 int bt_type; 135 }; 136 137 /* vmem arena */ 138 struct vmem { 139 struct mtx_padalign vm_lock; 140 struct cv vm_cv; 141 char vm_name[VMEM_NAME_MAX+1]; 142 LIST_ENTRY(vmem) vm_alllist; 143 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 144 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 145 struct vmem_seglist vm_seglist; 146 struct vmem_hashlist *vm_hashlist; 147 vmem_size_t vm_hashsize; 148 149 /* Constant after init */ 150 vmem_size_t vm_qcache_max; 151 vmem_size_t vm_quantum_mask; 152 vmem_size_t vm_import_quantum; 153 int vm_quantum_shift; 154 155 /* Written on alloc/free */ 156 LIST_HEAD(, vmem_btag) vm_freetags; 157 int vm_nfreetags; 158 int vm_nbusytag; 159 vmem_size_t vm_inuse; 160 vmem_size_t vm_size; 161 vmem_size_t vm_limit; 162 struct vmem_btag vm_cursor; 163 164 /* Used on import. */ 165 vmem_import_t *vm_importfn; 166 vmem_release_t *vm_releasefn; 167 void *vm_arg; 168 169 /* Space exhaustion callback. */ 170 vmem_reclaim_t *vm_reclaimfn; 171 172 /* quantum cache */ 173 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 174 }; 175 176 #define BT_TYPE_SPAN 1 /* Allocated from importfn */ 177 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 178 #define BT_TYPE_FREE 3 /* Available space. */ 179 #define BT_TYPE_BUSY 4 /* Used space. */ 180 #define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */ 181 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 182 183 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 184 185 #if defined(DIAGNOSTIC) 186 static int enable_vmem_check = 1; 187 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 188 &enable_vmem_check, 0, "Enable vmem check"); 189 static void vmem_check(vmem_t *); 190 #endif 191 192 static struct callout vmem_periodic_ch; 193 static int vmem_periodic_interval; 194 static struct task vmem_periodic_wk; 195 196 static struct mtx_padalign __exclusive_cache_line vmem_list_lock; 197 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 198 static uma_zone_t vmem_zone; 199 200 /* ---- misc */ 201 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 202 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 203 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 204 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 205 206 207 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 208 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 209 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 210 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 211 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 212 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 213 214 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 215 216 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 217 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 218 219 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 220 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 221 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 222 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 223 224 /* 225 * Maximum number of boundary tags that may be required to satisfy an 226 * allocation. Two may be required to import. Another two may be 227 * required to clip edges. 228 */ 229 #define BT_MAXALLOC 4 230 231 /* 232 * Max free limits the number of locally cached boundary tags. We 233 * just want to avoid hitting the zone allocator for every call. 234 */ 235 #define BT_MAXFREE (BT_MAXALLOC * 8) 236 237 /* Allocator for boundary tags. */ 238 static uma_zone_t vmem_bt_zone; 239 240 /* boot time arena storage. */ 241 static struct vmem kernel_arena_storage; 242 static struct vmem buffer_arena_storage; 243 static struct vmem transient_arena_storage; 244 /* kernel and kmem arenas are aliased for backwards KPI compat. */ 245 vmem_t *kernel_arena = &kernel_arena_storage; 246 vmem_t *kmem_arena = &kernel_arena_storage; 247 vmem_t *buffer_arena = &buffer_arena_storage; 248 vmem_t *transient_arena = &transient_arena_storage; 249 250 #ifdef DEBUG_MEMGUARD 251 static struct vmem memguard_arena_storage; 252 vmem_t *memguard_arena = &memguard_arena_storage; 253 #endif 254 255 /* 256 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 257 * allocation will not fail once bt_fill() passes. To do so we cache 258 * at least the maximum possible tag allocations in the arena. 259 */ 260 static int 261 bt_fill(vmem_t *vm, int flags) 262 { 263 bt_t *bt; 264 265 VMEM_ASSERT_LOCKED(vm); 266 267 /* 268 * Only allow the kernel arena and arenas derived from kernel arena to 269 * dip into reserve tags. They are where new tags come from. 270 */ 271 flags &= BT_FLAGS; 272 if (vm != kernel_arena && vm->vm_arg != kernel_arena) 273 flags &= ~M_USE_RESERVE; 274 275 /* 276 * Loop until we meet the reserve. To minimize the lock shuffle 277 * and prevent simultaneous fills we first try a NOWAIT regardless 278 * of the caller's flags. Specify M_NOVM so we don't recurse while 279 * holding a vmem lock. 280 */ 281 while (vm->vm_nfreetags < BT_MAXALLOC) { 282 bt = uma_zalloc(vmem_bt_zone, 283 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 284 if (bt == NULL) { 285 VMEM_UNLOCK(vm); 286 bt = uma_zalloc(vmem_bt_zone, flags); 287 VMEM_LOCK(vm); 288 if (bt == NULL) 289 break; 290 } 291 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 292 vm->vm_nfreetags++; 293 } 294 295 if (vm->vm_nfreetags < BT_MAXALLOC) 296 return ENOMEM; 297 298 return 0; 299 } 300 301 /* 302 * Pop a tag off of the freetag stack. 303 */ 304 static bt_t * 305 bt_alloc(vmem_t *vm) 306 { 307 bt_t *bt; 308 309 VMEM_ASSERT_LOCKED(vm); 310 bt = LIST_FIRST(&vm->vm_freetags); 311 MPASS(bt != NULL); 312 LIST_REMOVE(bt, bt_freelist); 313 vm->vm_nfreetags--; 314 315 return bt; 316 } 317 318 /* 319 * Trim the per-vmem free list. Returns with the lock released to 320 * avoid allocator recursions. 321 */ 322 static void 323 bt_freetrim(vmem_t *vm, int freelimit) 324 { 325 LIST_HEAD(, vmem_btag) freetags; 326 bt_t *bt; 327 328 LIST_INIT(&freetags); 329 VMEM_ASSERT_LOCKED(vm); 330 while (vm->vm_nfreetags > freelimit) { 331 bt = LIST_FIRST(&vm->vm_freetags); 332 LIST_REMOVE(bt, bt_freelist); 333 vm->vm_nfreetags--; 334 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 335 } 336 VMEM_UNLOCK(vm); 337 while ((bt = LIST_FIRST(&freetags)) != NULL) { 338 LIST_REMOVE(bt, bt_freelist); 339 uma_zfree(vmem_bt_zone, bt); 340 } 341 } 342 343 static inline void 344 bt_free(vmem_t *vm, bt_t *bt) 345 { 346 347 VMEM_ASSERT_LOCKED(vm); 348 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 349 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 350 vm->vm_nfreetags++; 351 } 352 353 /* 354 * freelist[0] ... [1, 1] 355 * freelist[1] ... [2, 2] 356 * : 357 * freelist[29] ... [30, 30] 358 * freelist[30] ... [31, 31] 359 * freelist[31] ... [32, 63] 360 * freelist[33] ... [64, 127] 361 * : 362 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 363 * : 364 */ 365 366 static struct vmem_freelist * 367 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 368 { 369 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 370 const int idx = SIZE2ORDER(qsize); 371 372 MPASS(size != 0 && qsize != 0); 373 MPASS((size & vm->vm_quantum_mask) == 0); 374 MPASS(idx >= 0); 375 MPASS(idx < VMEM_MAXORDER); 376 377 return &vm->vm_freelist[idx]; 378 } 379 380 /* 381 * bt_freehead_toalloc: return the freelist for the given size and allocation 382 * strategy. 383 * 384 * For M_FIRSTFIT, return the list in which any blocks are large enough 385 * for the requested size. otherwise, return the list which can have blocks 386 * large enough for the requested size. 387 */ 388 static struct vmem_freelist * 389 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 390 { 391 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 392 int idx = SIZE2ORDER(qsize); 393 394 MPASS(size != 0 && qsize != 0); 395 MPASS((size & vm->vm_quantum_mask) == 0); 396 397 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 398 idx++; 399 /* check too large request? */ 400 } 401 MPASS(idx >= 0); 402 MPASS(idx < VMEM_MAXORDER); 403 404 return &vm->vm_freelist[idx]; 405 } 406 407 /* ---- boundary tag hash */ 408 409 static struct vmem_hashlist * 410 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 411 { 412 struct vmem_hashlist *list; 413 unsigned int hash; 414 415 hash = hash32_buf(&addr, sizeof(addr), 0); 416 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 417 418 return list; 419 } 420 421 static bt_t * 422 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 423 { 424 struct vmem_hashlist *list; 425 bt_t *bt; 426 427 VMEM_ASSERT_LOCKED(vm); 428 list = bt_hashhead(vm, addr); 429 LIST_FOREACH(bt, list, bt_hashlist) { 430 if (bt->bt_start == addr) { 431 break; 432 } 433 } 434 435 return bt; 436 } 437 438 static void 439 bt_rembusy(vmem_t *vm, bt_t *bt) 440 { 441 442 VMEM_ASSERT_LOCKED(vm); 443 MPASS(vm->vm_nbusytag > 0); 444 vm->vm_inuse -= bt->bt_size; 445 vm->vm_nbusytag--; 446 LIST_REMOVE(bt, bt_hashlist); 447 } 448 449 static void 450 bt_insbusy(vmem_t *vm, bt_t *bt) 451 { 452 struct vmem_hashlist *list; 453 454 VMEM_ASSERT_LOCKED(vm); 455 MPASS(bt->bt_type == BT_TYPE_BUSY); 456 457 list = bt_hashhead(vm, bt->bt_start); 458 LIST_INSERT_HEAD(list, bt, bt_hashlist); 459 vm->vm_nbusytag++; 460 vm->vm_inuse += bt->bt_size; 461 } 462 463 /* ---- boundary tag list */ 464 465 static void 466 bt_remseg(vmem_t *vm, bt_t *bt) 467 { 468 469 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 470 bt_free(vm, bt); 471 } 472 473 static void 474 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 475 { 476 477 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 478 } 479 480 static void 481 bt_insseg_tail(vmem_t *vm, bt_t *bt) 482 { 483 484 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 485 } 486 487 static void 488 bt_remfree(vmem_t *vm, bt_t *bt) 489 { 490 491 MPASS(bt->bt_type == BT_TYPE_FREE); 492 493 LIST_REMOVE(bt, bt_freelist); 494 } 495 496 static void 497 bt_insfree(vmem_t *vm, bt_t *bt) 498 { 499 struct vmem_freelist *list; 500 501 list = bt_freehead_tofree(vm, bt->bt_size); 502 LIST_INSERT_HEAD(list, bt, bt_freelist); 503 } 504 505 /* ---- vmem internal functions */ 506 507 /* 508 * Import from the arena into the quantum cache in UMA. 509 * 510 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate 511 * failure, so UMA can't be used to cache a resource with value 0. 512 */ 513 static int 514 qc_import(void *arg, void **store, int cnt, int domain, int flags) 515 { 516 qcache_t *qc; 517 vmem_addr_t addr; 518 int i; 519 520 KASSERT((flags & M_WAITOK) == 0, ("blocking allocation")); 521 522 qc = arg; 523 for (i = 0; i < cnt; i++) { 524 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 525 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 526 break; 527 store[i] = (void *)addr; 528 } 529 return (i); 530 } 531 532 /* 533 * Release memory from the UMA cache to the arena. 534 */ 535 static void 536 qc_release(void *arg, void **store, int cnt) 537 { 538 qcache_t *qc; 539 int i; 540 541 qc = arg; 542 for (i = 0; i < cnt; i++) 543 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 544 } 545 546 static void 547 qc_init(vmem_t *vm, vmem_size_t qcache_max) 548 { 549 qcache_t *qc; 550 vmem_size_t size; 551 int qcache_idx_max; 552 int i; 553 554 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 555 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 556 VMEM_QCACHE_IDX_MAX); 557 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 558 for (i = 0; i < qcache_idx_max; i++) { 559 qc = &vm->vm_qcache[i]; 560 size = (i + 1) << vm->vm_quantum_shift; 561 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 562 vm->vm_name, size); 563 qc->qc_vmem = vm; 564 qc->qc_size = size; 565 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 566 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 567 UMA_ZONE_VM); 568 MPASS(qc->qc_cache); 569 } 570 } 571 572 static void 573 qc_destroy(vmem_t *vm) 574 { 575 int qcache_idx_max; 576 int i; 577 578 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 579 for (i = 0; i < qcache_idx_max; i++) 580 uma_zdestroy(vm->vm_qcache[i].qc_cache); 581 } 582 583 static void 584 qc_drain(vmem_t *vm) 585 { 586 int qcache_idx_max; 587 int i; 588 589 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 590 for (i = 0; i < qcache_idx_max; i++) 591 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN); 592 } 593 594 #ifndef UMA_MD_SMALL_ALLOC 595 596 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; 597 598 /* 599 * vmem_bt_alloc: Allocate a new page of boundary tags. 600 * 601 * On architectures with uma_small_alloc there is no recursion; no address 602 * space need be allocated to allocate boundary tags. For the others, we 603 * must handle recursion. Boundary tags are necessary to allocate new 604 * boundary tags. 605 * 606 * UMA guarantees that enough tags are held in reserve to allocate a new 607 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 608 * when allocating the page to hold new boundary tags. In this way the 609 * reserve is automatically filled by the allocation that uses the reserve. 610 * 611 * We still have to guarantee that the new tags are allocated atomically since 612 * many threads may try concurrently. The bt_lock provides this guarantee. 613 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 614 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 615 * loop again after checking to see if we lost the race to allocate. 616 * 617 * There is a small race between vmem_bt_alloc() returning the page and the 618 * zone lock being acquired to add the page to the zone. For WAITOK 619 * allocations we just pause briefly. NOWAIT may experience a transient 620 * failure. To alleviate this we permit a small number of simultaneous 621 * fills to proceed concurrently so NOWAIT is less likely to fail unless 622 * we are really out of KVA. 623 */ 624 static void * 625 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 626 int wait) 627 { 628 vmem_addr_t addr; 629 630 *pflag = UMA_SLAB_KERNEL; 631 632 /* 633 * Single thread boundary tag allocation so that the address space 634 * and memory are added in one atomic operation. 635 */ 636 mtx_lock(&vmem_bt_lock); 637 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0, 638 VMEM_ADDR_MIN, VMEM_ADDR_MAX, 639 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { 640 if (kmem_back_domain(domain, kernel_object, addr, bytes, 641 M_NOWAIT | M_USE_RESERVE) == 0) { 642 mtx_unlock(&vmem_bt_lock); 643 return ((void *)addr); 644 } 645 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes); 646 mtx_unlock(&vmem_bt_lock); 647 /* 648 * Out of memory, not address space. This may not even be 649 * possible due to M_USE_RESERVE page allocation. 650 */ 651 if (wait & M_WAITOK) 652 vm_wait_domain(domain); 653 return (NULL); 654 } 655 mtx_unlock(&vmem_bt_lock); 656 /* 657 * We're either out of address space or lost a fill race. 658 */ 659 if (wait & M_WAITOK) 660 pause("btalloc", 1); 661 662 return (NULL); 663 } 664 665 /* 666 * How many pages do we need to startup_alloc. 667 */ 668 int 669 vmem_startup_count(void) 670 { 671 672 return (howmany(BT_MAXALLOC, 673 UMA_SLAB_SPACE / sizeof(struct vmem_btag))); 674 } 675 #endif 676 677 void 678 vmem_startup(void) 679 { 680 681 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 682 vmem_zone = uma_zcreate("vmem", 683 sizeof(struct vmem), NULL, NULL, NULL, NULL, 684 UMA_ALIGN_PTR, UMA_ZONE_VM); 685 vmem_bt_zone = uma_zcreate("vmem btag", 686 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 687 UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 688 #ifndef UMA_MD_SMALL_ALLOC 689 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 690 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 691 /* 692 * Reserve enough tags to allocate new tags. We allow multiple 693 * CPUs to attempt to allocate new tags concurrently to limit 694 * false restarts in UMA. vmem_bt_alloc() allocates from a per-domain 695 * arena, which may involve importing a range from the kernel arena, 696 * so we need to keep at least 2 * BT_MAXALLOC tags reserved. 697 */ 698 uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus); 699 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 700 #endif 701 } 702 703 /* ---- rehash */ 704 705 static int 706 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 707 { 708 bt_t *bt; 709 int i; 710 struct vmem_hashlist *newhashlist; 711 struct vmem_hashlist *oldhashlist; 712 vmem_size_t oldhashsize; 713 714 MPASS(newhashsize > 0); 715 716 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 717 M_VMEM, M_NOWAIT); 718 if (newhashlist == NULL) 719 return ENOMEM; 720 for (i = 0; i < newhashsize; i++) { 721 LIST_INIT(&newhashlist[i]); 722 } 723 724 VMEM_LOCK(vm); 725 oldhashlist = vm->vm_hashlist; 726 oldhashsize = vm->vm_hashsize; 727 vm->vm_hashlist = newhashlist; 728 vm->vm_hashsize = newhashsize; 729 if (oldhashlist == NULL) { 730 VMEM_UNLOCK(vm); 731 return 0; 732 } 733 for (i = 0; i < oldhashsize; i++) { 734 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 735 bt_rembusy(vm, bt); 736 bt_insbusy(vm, bt); 737 } 738 } 739 VMEM_UNLOCK(vm); 740 741 if (oldhashlist != vm->vm_hash0) { 742 free(oldhashlist, M_VMEM); 743 } 744 745 return 0; 746 } 747 748 static void 749 vmem_periodic_kick(void *dummy) 750 { 751 752 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 753 } 754 755 static void 756 vmem_periodic(void *unused, int pending) 757 { 758 vmem_t *vm; 759 vmem_size_t desired; 760 vmem_size_t current; 761 762 mtx_lock(&vmem_list_lock); 763 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 764 #ifdef DIAGNOSTIC 765 /* Convenient time to verify vmem state. */ 766 if (enable_vmem_check == 1) { 767 VMEM_LOCK(vm); 768 vmem_check(vm); 769 VMEM_UNLOCK(vm); 770 } 771 #endif 772 desired = 1 << flsl(vm->vm_nbusytag); 773 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 774 VMEM_HASHSIZE_MAX); 775 current = vm->vm_hashsize; 776 777 /* Grow in powers of two. Shrink less aggressively. */ 778 if (desired >= current * 2 || desired * 4 <= current) 779 vmem_rehash(vm, desired); 780 781 /* 782 * Periodically wake up threads waiting for resources, 783 * so they could ask for reclamation again. 784 */ 785 VMEM_CONDVAR_BROADCAST(vm); 786 } 787 mtx_unlock(&vmem_list_lock); 788 789 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 790 vmem_periodic_kick, NULL); 791 } 792 793 static void 794 vmem_start_callout(void *unused) 795 { 796 797 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 798 vmem_periodic_interval = hz * 10; 799 callout_init(&vmem_periodic_ch, 1); 800 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 801 vmem_periodic_kick, NULL); 802 } 803 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 804 805 static void 806 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 807 { 808 bt_t *btspan; 809 bt_t *btfree; 810 811 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 812 MPASS((size & vm->vm_quantum_mask) == 0); 813 814 btspan = bt_alloc(vm); 815 btspan->bt_type = type; 816 btspan->bt_start = addr; 817 btspan->bt_size = size; 818 bt_insseg_tail(vm, btspan); 819 820 btfree = bt_alloc(vm); 821 btfree->bt_type = BT_TYPE_FREE; 822 btfree->bt_start = addr; 823 btfree->bt_size = size; 824 bt_insseg(vm, btfree, btspan); 825 bt_insfree(vm, btfree); 826 827 vm->vm_size += size; 828 } 829 830 static void 831 vmem_destroy1(vmem_t *vm) 832 { 833 bt_t *bt; 834 835 /* 836 * Drain per-cpu quantum caches. 837 */ 838 qc_destroy(vm); 839 840 /* 841 * The vmem should now only contain empty segments. 842 */ 843 VMEM_LOCK(vm); 844 MPASS(vm->vm_nbusytag == 0); 845 846 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 847 bt_remseg(vm, bt); 848 849 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 850 free(vm->vm_hashlist, M_VMEM); 851 852 bt_freetrim(vm, 0); 853 854 VMEM_CONDVAR_DESTROY(vm); 855 VMEM_LOCK_DESTROY(vm); 856 uma_zfree(vmem_zone, vm); 857 } 858 859 static int 860 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 861 { 862 vmem_addr_t addr; 863 int error; 864 865 if (vm->vm_importfn == NULL) 866 return (EINVAL); 867 868 /* 869 * To make sure we get a span that meets the alignment we double it 870 * and add the size to the tail. This slightly overestimates. 871 */ 872 if (align != vm->vm_quantum_mask + 1) 873 size = (align * 2) + size; 874 size = roundup(size, vm->vm_import_quantum); 875 876 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) 877 return (ENOMEM); 878 879 /* 880 * Hide MAXALLOC tags so we're guaranteed to be able to add this 881 * span and the tag we want to allocate from it. 882 */ 883 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 884 vm->vm_nfreetags -= BT_MAXALLOC; 885 VMEM_UNLOCK(vm); 886 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 887 VMEM_LOCK(vm); 888 vm->vm_nfreetags += BT_MAXALLOC; 889 if (error) 890 return (ENOMEM); 891 892 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 893 894 return 0; 895 } 896 897 /* 898 * vmem_fit: check if a bt can satisfy the given restrictions. 899 * 900 * it's a caller's responsibility to ensure the region is big enough 901 * before calling us. 902 */ 903 static int 904 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 905 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 906 vmem_addr_t maxaddr, vmem_addr_t *addrp) 907 { 908 vmem_addr_t start; 909 vmem_addr_t end; 910 911 MPASS(size > 0); 912 MPASS(bt->bt_size >= size); /* caller's responsibility */ 913 914 /* 915 * XXX assumption: vmem_addr_t and vmem_size_t are 916 * unsigned integer of the same size. 917 */ 918 919 start = bt->bt_start; 920 if (start < minaddr) { 921 start = minaddr; 922 } 923 end = BT_END(bt); 924 if (end > maxaddr) 925 end = maxaddr; 926 if (start > end) 927 return (ENOMEM); 928 929 start = VMEM_ALIGNUP(start - phase, align) + phase; 930 if (start < bt->bt_start) 931 start += align; 932 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 933 MPASS(align < nocross); 934 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 935 } 936 if (start <= end && end - start >= size - 1) { 937 MPASS((start & (align - 1)) == phase); 938 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 939 MPASS(minaddr <= start); 940 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 941 MPASS(bt->bt_start <= start); 942 MPASS(BT_END(bt) - start >= size - 1); 943 *addrp = start; 944 945 return (0); 946 } 947 return (ENOMEM); 948 } 949 950 /* 951 * vmem_clip: Trim the boundary tag edges to the requested start and size. 952 */ 953 static void 954 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 955 { 956 bt_t *btnew; 957 bt_t *btprev; 958 959 VMEM_ASSERT_LOCKED(vm); 960 MPASS(bt->bt_type == BT_TYPE_FREE); 961 MPASS(bt->bt_size >= size); 962 bt_remfree(vm, bt); 963 if (bt->bt_start != start) { 964 btprev = bt_alloc(vm); 965 btprev->bt_type = BT_TYPE_FREE; 966 btprev->bt_start = bt->bt_start; 967 btprev->bt_size = start - bt->bt_start; 968 bt->bt_start = start; 969 bt->bt_size -= btprev->bt_size; 970 bt_insfree(vm, btprev); 971 bt_insseg(vm, btprev, 972 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 973 } 974 MPASS(bt->bt_start == start); 975 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 976 /* split */ 977 btnew = bt_alloc(vm); 978 btnew->bt_type = BT_TYPE_BUSY; 979 btnew->bt_start = bt->bt_start; 980 btnew->bt_size = size; 981 bt->bt_start = bt->bt_start + size; 982 bt->bt_size -= size; 983 bt_insfree(vm, bt); 984 bt_insseg(vm, btnew, 985 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 986 bt_insbusy(vm, btnew); 987 bt = btnew; 988 } else { 989 bt->bt_type = BT_TYPE_BUSY; 990 bt_insbusy(vm, bt); 991 } 992 MPASS(bt->bt_size >= size); 993 } 994 995 static int 996 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags) 997 { 998 vmem_size_t avail; 999 1000 VMEM_ASSERT_LOCKED(vm); 1001 1002 /* 1003 * XXX it is possible to fail to meet xalloc constraints with the 1004 * imported region. It is up to the user to specify the 1005 * import quantum such that it can satisfy any allocation. 1006 */ 1007 if (vmem_import(vm, size, align, flags) == 0) 1008 return (1); 1009 1010 /* 1011 * Try to free some space from the quantum cache or reclaim 1012 * functions if available. 1013 */ 1014 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1015 avail = vm->vm_size - vm->vm_inuse; 1016 VMEM_UNLOCK(vm); 1017 if (vm->vm_qcache_max != 0) 1018 qc_drain(vm); 1019 if (vm->vm_reclaimfn != NULL) 1020 vm->vm_reclaimfn(vm, flags); 1021 VMEM_LOCK(vm); 1022 /* If we were successful retry even NOWAIT. */ 1023 if (vm->vm_size - vm->vm_inuse > avail) 1024 return (1); 1025 } 1026 if ((flags & M_NOWAIT) != 0) 1027 return (0); 1028 VMEM_CONDVAR_WAIT(vm); 1029 return (1); 1030 } 1031 1032 static int 1033 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree) 1034 { 1035 struct vmem_btag *prev; 1036 1037 MPASS(bt->bt_type == BT_TYPE_FREE); 1038 1039 if (vm->vm_releasefn == NULL) 1040 return (0); 1041 1042 prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1043 MPASS(prev != NULL); 1044 MPASS(prev->bt_type != BT_TYPE_FREE); 1045 1046 if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) { 1047 vmem_addr_t spanaddr; 1048 vmem_size_t spansize; 1049 1050 MPASS(prev->bt_start == bt->bt_start); 1051 spanaddr = prev->bt_start; 1052 spansize = prev->bt_size; 1053 if (remfree) 1054 bt_remfree(vm, bt); 1055 bt_remseg(vm, bt); 1056 bt_remseg(vm, prev); 1057 vm->vm_size -= spansize; 1058 VMEM_CONDVAR_BROADCAST(vm); 1059 bt_freetrim(vm, BT_MAXFREE); 1060 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize); 1061 return (1); 1062 } 1063 return (0); 1064 } 1065 1066 static int 1067 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align, 1068 const vmem_size_t phase, const vmem_size_t nocross, int flags, 1069 vmem_addr_t *addrp) 1070 { 1071 struct vmem_btag *bt, *cursor, *next, *prev; 1072 int error; 1073 1074 error = ENOMEM; 1075 VMEM_LOCK(vm); 1076 retry: 1077 /* 1078 * Make sure we have enough tags to complete the operation. 1079 */ 1080 if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0) 1081 goto out; 1082 1083 /* 1084 * Find the next free tag meeting our constraints. If one is found, 1085 * perform the allocation. 1086 */ 1087 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist); 1088 bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) { 1089 if (bt == NULL) 1090 bt = TAILQ_FIRST(&vm->vm_seglist); 1091 if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size && 1092 (error = vmem_fit(bt, size, align, phase, nocross, 1093 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1094 vmem_clip(vm, bt, *addrp, size); 1095 break; 1096 } 1097 } 1098 1099 /* 1100 * Try to coalesce free segments around the cursor. If we succeed, and 1101 * have not yet satisfied the allocation request, try again with the 1102 * newly coalesced segment. 1103 */ 1104 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL && 1105 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL && 1106 next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE && 1107 prev->bt_start + prev->bt_size == next->bt_start) { 1108 prev->bt_size += next->bt_size; 1109 bt_remfree(vm, next); 1110 bt_remseg(vm, next); 1111 1112 /* 1113 * The coalesced segment might be able to satisfy our request. 1114 * If not, we might need to release it from the arena. 1115 */ 1116 if (error == ENOMEM && prev->bt_size >= size && 1117 (error = vmem_fit(prev, size, align, phase, nocross, 1118 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1119 vmem_clip(vm, prev, *addrp, size); 1120 bt = prev; 1121 } else 1122 (void)vmem_try_release(vm, prev, true); 1123 } 1124 1125 /* 1126 * If the allocation was successful, advance the cursor. 1127 */ 1128 if (error == 0) { 1129 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist); 1130 for (; bt != NULL && bt->bt_start < *addrp + size; 1131 bt = TAILQ_NEXT(bt, bt_seglist)) 1132 ; 1133 if (bt != NULL) 1134 TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist); 1135 else 1136 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist); 1137 } 1138 1139 /* 1140 * Attempt to bring additional resources into the arena. If that fails 1141 * and M_WAITOK is specified, sleep waiting for resources to be freed. 1142 */ 1143 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags)) 1144 goto retry; 1145 1146 out: 1147 VMEM_UNLOCK(vm); 1148 return (error); 1149 } 1150 1151 /* ---- vmem API */ 1152 1153 void 1154 vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 1155 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 1156 { 1157 1158 VMEM_LOCK(vm); 1159 vm->vm_importfn = importfn; 1160 vm->vm_releasefn = releasefn; 1161 vm->vm_arg = arg; 1162 vm->vm_import_quantum = import_quantum; 1163 VMEM_UNLOCK(vm); 1164 } 1165 1166 void 1167 vmem_set_limit(vmem_t *vm, vmem_size_t limit) 1168 { 1169 1170 VMEM_LOCK(vm); 1171 vm->vm_limit = limit; 1172 VMEM_UNLOCK(vm); 1173 } 1174 1175 void 1176 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 1177 { 1178 1179 VMEM_LOCK(vm); 1180 vm->vm_reclaimfn = reclaimfn; 1181 VMEM_UNLOCK(vm); 1182 } 1183 1184 /* 1185 * vmem_init: Initializes vmem arena. 1186 */ 1187 vmem_t * 1188 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 1189 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1190 { 1191 int i; 1192 1193 MPASS(quantum > 0); 1194 MPASS((quantum & (quantum - 1)) == 0); 1195 1196 bzero(vm, sizeof(*vm)); 1197 1198 VMEM_CONDVAR_INIT(vm, name); 1199 VMEM_LOCK_INIT(vm, name); 1200 vm->vm_nfreetags = 0; 1201 LIST_INIT(&vm->vm_freetags); 1202 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1203 vm->vm_quantum_mask = quantum - 1; 1204 vm->vm_quantum_shift = flsl(quantum) - 1; 1205 vm->vm_nbusytag = 0; 1206 vm->vm_size = 0; 1207 vm->vm_limit = 0; 1208 vm->vm_inuse = 0; 1209 qc_init(vm, qcache_max); 1210 1211 TAILQ_INIT(&vm->vm_seglist); 1212 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0; 1213 vm->vm_cursor.bt_type = BT_TYPE_CURSOR; 1214 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); 1215 1216 for (i = 0; i < VMEM_MAXORDER; i++) 1217 LIST_INIT(&vm->vm_freelist[i]); 1218 1219 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1220 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1221 vm->vm_hashlist = vm->vm_hash0; 1222 1223 if (size != 0) { 1224 if (vmem_add(vm, base, size, flags) != 0) { 1225 vmem_destroy1(vm); 1226 return NULL; 1227 } 1228 } 1229 1230 mtx_lock(&vmem_list_lock); 1231 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1232 mtx_unlock(&vmem_list_lock); 1233 1234 return vm; 1235 } 1236 1237 /* 1238 * vmem_create: create an arena. 1239 */ 1240 vmem_t * 1241 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1242 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1243 { 1244 1245 vmem_t *vm; 1246 1247 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); 1248 if (vm == NULL) 1249 return (NULL); 1250 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1251 flags) == NULL) 1252 return (NULL); 1253 return (vm); 1254 } 1255 1256 void 1257 vmem_destroy(vmem_t *vm) 1258 { 1259 1260 mtx_lock(&vmem_list_lock); 1261 LIST_REMOVE(vm, vm_alllist); 1262 mtx_unlock(&vmem_list_lock); 1263 1264 vmem_destroy1(vm); 1265 } 1266 1267 vmem_size_t 1268 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1269 { 1270 1271 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1272 } 1273 1274 /* 1275 * vmem_alloc: allocate resource from the arena. 1276 */ 1277 int 1278 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1279 { 1280 const int strat __unused = flags & VMEM_FITMASK; 1281 qcache_t *qc; 1282 1283 flags &= VMEM_FLAGS; 1284 MPASS(size > 0); 1285 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1286 if ((flags & M_NOWAIT) == 0) 1287 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1288 1289 if (size <= vm->vm_qcache_max) { 1290 /* 1291 * Resource 0 cannot be cached, so avoid a blocking allocation 1292 * in qc_import() and give the vmem_xalloc() call below a chance 1293 * to return 0. 1294 */ 1295 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1296 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, 1297 (flags & ~M_WAITOK) | M_NOWAIT); 1298 if (__predict_true(*addrp != 0)) 1299 return (0); 1300 } 1301 1302 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1303 flags, addrp)); 1304 } 1305 1306 int 1307 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1308 const vmem_size_t phase, const vmem_size_t nocross, 1309 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1310 vmem_addr_t *addrp) 1311 { 1312 const vmem_size_t size = vmem_roundup_size(vm, size0); 1313 struct vmem_freelist *list; 1314 struct vmem_freelist *first; 1315 struct vmem_freelist *end; 1316 bt_t *bt; 1317 int error; 1318 int strat; 1319 1320 flags &= VMEM_FLAGS; 1321 strat = flags & VMEM_FITMASK; 1322 MPASS(size0 > 0); 1323 MPASS(size > 0); 1324 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1325 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1326 if ((flags & M_NOWAIT) == 0) 1327 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1328 MPASS((align & vm->vm_quantum_mask) == 0); 1329 MPASS((align & (align - 1)) == 0); 1330 MPASS((phase & vm->vm_quantum_mask) == 0); 1331 MPASS((nocross & vm->vm_quantum_mask) == 0); 1332 MPASS((nocross & (nocross - 1)) == 0); 1333 MPASS((align == 0 && phase == 0) || phase < align); 1334 MPASS(nocross == 0 || nocross >= size); 1335 MPASS(minaddr <= maxaddr); 1336 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1337 if (strat == M_NEXTFIT) 1338 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX); 1339 1340 if (align == 0) 1341 align = vm->vm_quantum_mask + 1; 1342 *addrp = 0; 1343 1344 /* 1345 * Next-fit allocations don't use the freelists. 1346 */ 1347 if (strat == M_NEXTFIT) 1348 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross, 1349 flags, addrp)); 1350 1351 end = &vm->vm_freelist[VMEM_MAXORDER]; 1352 /* 1353 * choose a free block from which we allocate. 1354 */ 1355 first = bt_freehead_toalloc(vm, size, strat); 1356 VMEM_LOCK(vm); 1357 for (;;) { 1358 /* 1359 * Make sure we have enough tags to complete the 1360 * operation. 1361 */ 1362 if (vm->vm_nfreetags < BT_MAXALLOC && 1363 bt_fill(vm, flags) != 0) { 1364 error = ENOMEM; 1365 break; 1366 } 1367 1368 /* 1369 * Scan freelists looking for a tag that satisfies the 1370 * allocation. If we're doing BESTFIT we may encounter 1371 * sizes below the request. If we're doing FIRSTFIT we 1372 * inspect only the first element from each list. 1373 */ 1374 for (list = first; list < end; list++) { 1375 LIST_FOREACH(bt, list, bt_freelist) { 1376 if (bt->bt_size >= size) { 1377 error = vmem_fit(bt, size, align, phase, 1378 nocross, minaddr, maxaddr, addrp); 1379 if (error == 0) { 1380 vmem_clip(vm, bt, *addrp, size); 1381 goto out; 1382 } 1383 } 1384 /* FIRST skips to the next list. */ 1385 if (strat == M_FIRSTFIT) 1386 break; 1387 } 1388 } 1389 1390 /* 1391 * Retry if the fast algorithm failed. 1392 */ 1393 if (strat == M_FIRSTFIT) { 1394 strat = M_BESTFIT; 1395 first = bt_freehead_toalloc(vm, size, strat); 1396 continue; 1397 } 1398 1399 /* 1400 * Try a few measures to bring additional resources into the 1401 * arena. If all else fails, we will sleep waiting for 1402 * resources to be freed. 1403 */ 1404 if (!vmem_try_fetch(vm, size, align, flags)) { 1405 error = ENOMEM; 1406 break; 1407 } 1408 } 1409 out: 1410 VMEM_UNLOCK(vm); 1411 if (error != 0 && (flags & M_NOWAIT) == 0) 1412 panic("failed to allocate waiting allocation\n"); 1413 1414 return (error); 1415 } 1416 1417 /* 1418 * vmem_free: free the resource to the arena. 1419 */ 1420 void 1421 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1422 { 1423 qcache_t *qc; 1424 MPASS(size > 0); 1425 1426 if (size <= vm->vm_qcache_max && 1427 __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { 1428 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1429 uma_zfree(qc->qc_cache, (void *)addr); 1430 } else 1431 vmem_xfree(vm, addr, size); 1432 } 1433 1434 void 1435 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1436 { 1437 bt_t *bt; 1438 bt_t *t; 1439 1440 MPASS(size > 0); 1441 1442 VMEM_LOCK(vm); 1443 bt = bt_lookupbusy(vm, addr); 1444 MPASS(bt != NULL); 1445 MPASS(bt->bt_start == addr); 1446 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1447 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1448 MPASS(bt->bt_type == BT_TYPE_BUSY); 1449 bt_rembusy(vm, bt); 1450 bt->bt_type = BT_TYPE_FREE; 1451 1452 /* coalesce */ 1453 t = TAILQ_NEXT(bt, bt_seglist); 1454 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1455 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1456 bt->bt_size += t->bt_size; 1457 bt_remfree(vm, t); 1458 bt_remseg(vm, t); 1459 } 1460 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1461 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1462 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1463 bt->bt_size += t->bt_size; 1464 bt->bt_start = t->bt_start; 1465 bt_remfree(vm, t); 1466 bt_remseg(vm, t); 1467 } 1468 1469 if (!vmem_try_release(vm, bt, false)) { 1470 bt_insfree(vm, bt); 1471 VMEM_CONDVAR_BROADCAST(vm); 1472 bt_freetrim(vm, BT_MAXFREE); 1473 } 1474 } 1475 1476 /* 1477 * vmem_add: 1478 * 1479 */ 1480 int 1481 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1482 { 1483 int error; 1484 1485 error = 0; 1486 flags &= VMEM_FLAGS; 1487 VMEM_LOCK(vm); 1488 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1489 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1490 else 1491 error = ENOMEM; 1492 VMEM_UNLOCK(vm); 1493 1494 return (error); 1495 } 1496 1497 /* 1498 * vmem_size: information about arenas size 1499 */ 1500 vmem_size_t 1501 vmem_size(vmem_t *vm, int typemask) 1502 { 1503 int i; 1504 1505 switch (typemask) { 1506 case VMEM_ALLOC: 1507 return vm->vm_inuse; 1508 case VMEM_FREE: 1509 return vm->vm_size - vm->vm_inuse; 1510 case VMEM_FREE|VMEM_ALLOC: 1511 return vm->vm_size; 1512 case VMEM_MAXFREE: 1513 VMEM_LOCK(vm); 1514 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1515 if (LIST_EMPTY(&vm->vm_freelist[i])) 1516 continue; 1517 VMEM_UNLOCK(vm); 1518 return ((vmem_size_t)ORDER2SIZE(i) << 1519 vm->vm_quantum_shift); 1520 } 1521 VMEM_UNLOCK(vm); 1522 return (0); 1523 default: 1524 panic("vmem_size"); 1525 } 1526 } 1527 1528 /* ---- debug */ 1529 1530 #if defined(DDB) || defined(DIAGNOSTIC) 1531 1532 static void bt_dump(const bt_t *, int (*)(const char *, ...) 1533 __printflike(1, 2)); 1534 1535 static const char * 1536 bt_type_string(int type) 1537 { 1538 1539 switch (type) { 1540 case BT_TYPE_BUSY: 1541 return "busy"; 1542 case BT_TYPE_FREE: 1543 return "free"; 1544 case BT_TYPE_SPAN: 1545 return "span"; 1546 case BT_TYPE_SPAN_STATIC: 1547 return "static span"; 1548 case BT_TYPE_CURSOR: 1549 return "cursor"; 1550 default: 1551 break; 1552 } 1553 return "BOGUS"; 1554 } 1555 1556 static void 1557 bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1558 { 1559 1560 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1561 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1562 bt->bt_type, bt_type_string(bt->bt_type)); 1563 } 1564 1565 static void 1566 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1567 { 1568 const bt_t *bt; 1569 int i; 1570 1571 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1572 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1573 bt_dump(bt, pr); 1574 } 1575 1576 for (i = 0; i < VMEM_MAXORDER; i++) { 1577 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1578 1579 if (LIST_EMPTY(fl)) { 1580 continue; 1581 } 1582 1583 (*pr)("freelist[%d]\n", i); 1584 LIST_FOREACH(bt, fl, bt_freelist) { 1585 bt_dump(bt, pr); 1586 } 1587 } 1588 } 1589 1590 #endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1591 1592 #if defined(DDB) 1593 #include <ddb/ddb.h> 1594 1595 static bt_t * 1596 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1597 { 1598 bt_t *bt; 1599 1600 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1601 if (BT_ISSPAN_P(bt)) { 1602 continue; 1603 } 1604 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1605 return bt; 1606 } 1607 } 1608 1609 return NULL; 1610 } 1611 1612 void 1613 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1614 { 1615 vmem_t *vm; 1616 1617 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1618 bt_t *bt; 1619 1620 bt = vmem_whatis_lookup(vm, addr); 1621 if (bt == NULL) { 1622 continue; 1623 } 1624 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1625 (void *)addr, (void *)bt->bt_start, 1626 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1627 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1628 } 1629 } 1630 1631 void 1632 vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1633 { 1634 const vmem_t *vm; 1635 1636 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1637 vmem_dump(vm, pr); 1638 } 1639 } 1640 1641 void 1642 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1643 { 1644 const vmem_t *vm = (const void *)addr; 1645 1646 vmem_dump(vm, pr); 1647 } 1648 1649 DB_SHOW_COMMAND(vmemdump, vmemdump) 1650 { 1651 1652 if (!have_addr) { 1653 db_printf("usage: show vmemdump <addr>\n"); 1654 return; 1655 } 1656 1657 vmem_dump((const vmem_t *)addr, db_printf); 1658 } 1659 1660 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall) 1661 { 1662 const vmem_t *vm; 1663 1664 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1665 vmem_dump(vm, db_printf); 1666 } 1667 1668 DB_SHOW_COMMAND(vmem, vmem_summ) 1669 { 1670 const vmem_t *vm = (const void *)addr; 1671 const bt_t *bt; 1672 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER]; 1673 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER]; 1674 int ord; 1675 1676 if (!have_addr) { 1677 db_printf("usage: show vmem <addr>\n"); 1678 return; 1679 } 1680 1681 db_printf("vmem %p '%s'\n", vm, vm->vm_name); 1682 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1); 1683 db_printf("\tsize:\t%zu\n", vm->vm_size); 1684 db_printf("\tinuse:\t%zu\n", vm->vm_inuse); 1685 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse); 1686 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag); 1687 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags); 1688 1689 memset(&ft, 0, sizeof(ft)); 1690 memset(&ut, 0, sizeof(ut)); 1691 memset(&fs, 0, sizeof(fs)); 1692 memset(&us, 0, sizeof(us)); 1693 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1694 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift); 1695 if (bt->bt_type == BT_TYPE_BUSY) { 1696 ut[ord]++; 1697 us[ord] += bt->bt_size; 1698 } else if (bt->bt_type == BT_TYPE_FREE) { 1699 ft[ord]++; 1700 fs[ord] += bt->bt_size; 1701 } 1702 } 1703 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n"); 1704 for (ord = 0; ord < VMEM_MAXORDER; ord++) { 1705 if (ut[ord] == 0 && ft[ord] == 0) 1706 continue; 1707 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n", 1708 ORDER2SIZE(ord) << vm->vm_quantum_shift, 1709 ut[ord], us[ord], ft[ord], fs[ord]); 1710 } 1711 } 1712 1713 DB_SHOW_ALL_COMMAND(vmem, vmem_summall) 1714 { 1715 const vmem_t *vm; 1716 1717 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1718 vmem_summ((db_expr_t)vm, TRUE, count, modif); 1719 } 1720 #endif /* defined(DDB) */ 1721 1722 #define vmem_printf printf 1723 1724 #if defined(DIAGNOSTIC) 1725 1726 static bool 1727 vmem_check_sanity(vmem_t *vm) 1728 { 1729 const bt_t *bt, *bt2; 1730 1731 MPASS(vm != NULL); 1732 1733 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1734 if (bt->bt_start > BT_END(bt)) { 1735 printf("corrupted tag\n"); 1736 bt_dump(bt, vmem_printf); 1737 return false; 1738 } 1739 } 1740 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1741 if (bt->bt_type == BT_TYPE_CURSOR) { 1742 if (bt->bt_start != 0 || bt->bt_size != 0) { 1743 printf("corrupted cursor\n"); 1744 return false; 1745 } 1746 continue; 1747 } 1748 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1749 if (bt == bt2) { 1750 continue; 1751 } 1752 if (bt2->bt_type == BT_TYPE_CURSOR) { 1753 continue; 1754 } 1755 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1756 continue; 1757 } 1758 if (bt->bt_start <= BT_END(bt2) && 1759 bt2->bt_start <= BT_END(bt)) { 1760 printf("overwrapped tags\n"); 1761 bt_dump(bt, vmem_printf); 1762 bt_dump(bt2, vmem_printf); 1763 return false; 1764 } 1765 } 1766 } 1767 1768 return true; 1769 } 1770 1771 static void 1772 vmem_check(vmem_t *vm) 1773 { 1774 1775 if (!vmem_check_sanity(vm)) { 1776 panic("insanity vmem %p", vm); 1777 } 1778 } 1779 1780 #endif /* defined(DIAGNOSTIC) */ 1781