1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * Copyright (c) 2013 EMC Corp. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * From: 32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 34 */ 35 36 /* 37 * reference: 38 * - Magazines and Vmem: Extending the Slab Allocator 39 * to Many CPUs and Arbitrary Resources 40 * http://www.usenix.org/event/usenix01/bonwick.html 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/queue.h> 52 #include <sys/callout.h> 53 #include <sys/hash.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/smp.h> 58 #include <sys/condvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/taskqueue.h> 61 #include <sys/vmem.h> 62 #include <sys/vmmeter.h> 63 64 #include "opt_vm.h" 65 66 #include <vm/uma.h> 67 #include <vm/vm.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pageout.h> 76 #include <vm/vm_phys.h> 77 #include <vm/vm_pagequeue.h> 78 #include <vm/uma_int.h> 79 80 #define VMEM_OPTORDER 5 81 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 82 #define VMEM_MAXORDER \ 83 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 84 85 #define VMEM_HASHSIZE_MIN 16 86 #define VMEM_HASHSIZE_MAX 131072 87 88 #define VMEM_QCACHE_IDX_MAX 16 89 90 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 91 92 #define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \ 93 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 94 95 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 96 97 #define QC_NAME_MAX 16 98 99 /* 100 * Data structures private to vmem. 101 */ 102 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 103 104 typedef struct vmem_btag bt_t; 105 106 TAILQ_HEAD(vmem_seglist, vmem_btag); 107 LIST_HEAD(vmem_freelist, vmem_btag); 108 LIST_HEAD(vmem_hashlist, vmem_btag); 109 110 struct qcache { 111 uma_zone_t qc_cache; 112 vmem_t *qc_vmem; 113 vmem_size_t qc_size; 114 char qc_name[QC_NAME_MAX]; 115 }; 116 typedef struct qcache qcache_t; 117 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 118 119 #define VMEM_NAME_MAX 16 120 121 /* boundary tag */ 122 struct vmem_btag { 123 TAILQ_ENTRY(vmem_btag) bt_seglist; 124 union { 125 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 126 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 127 } bt_u; 128 #define bt_hashlist bt_u.u_hashlist 129 #define bt_freelist bt_u.u_freelist 130 vmem_addr_t bt_start; 131 vmem_size_t bt_size; 132 int bt_type; 133 }; 134 135 /* vmem arena */ 136 struct vmem { 137 struct mtx_padalign vm_lock; 138 struct cv vm_cv; 139 char vm_name[VMEM_NAME_MAX+1]; 140 LIST_ENTRY(vmem) vm_alllist; 141 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 142 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 143 struct vmem_seglist vm_seglist; 144 struct vmem_hashlist *vm_hashlist; 145 vmem_size_t vm_hashsize; 146 147 /* Constant after init */ 148 vmem_size_t vm_qcache_max; 149 vmem_size_t vm_quantum_mask; 150 vmem_size_t vm_import_quantum; 151 int vm_quantum_shift; 152 153 /* Written on alloc/free */ 154 LIST_HEAD(, vmem_btag) vm_freetags; 155 int vm_nfreetags; 156 int vm_nbusytag; 157 vmem_size_t vm_inuse; 158 vmem_size_t vm_size; 159 vmem_size_t vm_limit; 160 struct vmem_btag vm_cursor; 161 162 /* Used on import. */ 163 vmem_import_t *vm_importfn; 164 vmem_release_t *vm_releasefn; 165 void *vm_arg; 166 167 /* Space exhaustion callback. */ 168 vmem_reclaim_t *vm_reclaimfn; 169 170 /* quantum cache */ 171 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 172 }; 173 174 #define BT_TYPE_SPAN 1 /* Allocated from importfn */ 175 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 176 #define BT_TYPE_FREE 3 /* Available space. */ 177 #define BT_TYPE_BUSY 4 /* Used space. */ 178 #define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */ 179 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 180 181 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 182 183 #if defined(DIAGNOSTIC) 184 static int enable_vmem_check = 1; 185 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 186 &enable_vmem_check, 0, "Enable vmem check"); 187 static void vmem_check(vmem_t *); 188 #endif 189 190 static struct callout vmem_periodic_ch; 191 static int vmem_periodic_interval; 192 static struct task vmem_periodic_wk; 193 194 static struct mtx_padalign __exclusive_cache_line vmem_list_lock; 195 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 196 static uma_zone_t vmem_zone; 197 198 /* ---- misc */ 199 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 200 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 201 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 202 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 203 204 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 205 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 206 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 207 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 208 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 209 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 210 211 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 212 213 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 214 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 215 216 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 217 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 218 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 219 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 220 221 /* 222 * Maximum number of boundary tags that may be required to satisfy an 223 * allocation. Two may be required to import. Another two may be 224 * required to clip edges. 225 */ 226 #define BT_MAXALLOC 4 227 228 /* 229 * Max free limits the number of locally cached boundary tags. We 230 * just want to avoid hitting the zone allocator for every call. 231 */ 232 #define BT_MAXFREE (BT_MAXALLOC * 8) 233 234 /* Allocator for boundary tags. */ 235 static uma_zone_t vmem_bt_zone; 236 237 /* boot time arena storage. */ 238 static struct vmem kernel_arena_storage; 239 static struct vmem buffer_arena_storage; 240 static struct vmem transient_arena_storage; 241 /* kernel and kmem arenas are aliased for backwards KPI compat. */ 242 vmem_t *kernel_arena = &kernel_arena_storage; 243 vmem_t *kmem_arena = &kernel_arena_storage; 244 vmem_t *buffer_arena = &buffer_arena_storage; 245 vmem_t *transient_arena = &transient_arena_storage; 246 247 #ifdef DEBUG_MEMGUARD 248 static struct vmem memguard_arena_storage; 249 vmem_t *memguard_arena = &memguard_arena_storage; 250 #endif 251 252 /* 253 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 254 * allocation will not fail once bt_fill() passes. To do so we cache 255 * at least the maximum possible tag allocations in the arena. 256 */ 257 static int 258 bt_fill(vmem_t *vm, int flags) 259 { 260 bt_t *bt; 261 262 VMEM_ASSERT_LOCKED(vm); 263 264 /* 265 * Only allow the kernel arena and arenas derived from kernel arena to 266 * dip into reserve tags. They are where new tags come from. 267 */ 268 flags &= BT_FLAGS; 269 if (vm != kernel_arena && vm->vm_arg != kernel_arena) 270 flags &= ~M_USE_RESERVE; 271 272 /* 273 * Loop until we meet the reserve. To minimize the lock shuffle 274 * and prevent simultaneous fills we first try a NOWAIT regardless 275 * of the caller's flags. Specify M_NOVM so we don't recurse while 276 * holding a vmem lock. 277 */ 278 while (vm->vm_nfreetags < BT_MAXALLOC) { 279 bt = uma_zalloc(vmem_bt_zone, 280 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 281 if (bt == NULL) { 282 VMEM_UNLOCK(vm); 283 bt = uma_zalloc(vmem_bt_zone, flags); 284 VMEM_LOCK(vm); 285 if (bt == NULL) 286 break; 287 } 288 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 289 vm->vm_nfreetags++; 290 } 291 292 if (vm->vm_nfreetags < BT_MAXALLOC) 293 return ENOMEM; 294 295 return 0; 296 } 297 298 /* 299 * Pop a tag off of the freetag stack. 300 */ 301 static bt_t * 302 bt_alloc(vmem_t *vm) 303 { 304 bt_t *bt; 305 306 VMEM_ASSERT_LOCKED(vm); 307 bt = LIST_FIRST(&vm->vm_freetags); 308 MPASS(bt != NULL); 309 LIST_REMOVE(bt, bt_freelist); 310 vm->vm_nfreetags--; 311 312 return bt; 313 } 314 315 /* 316 * Trim the per-vmem free list. Returns with the lock released to 317 * avoid allocator recursions. 318 */ 319 static void 320 bt_freetrim(vmem_t *vm, int freelimit) 321 { 322 LIST_HEAD(, vmem_btag) freetags; 323 bt_t *bt; 324 325 LIST_INIT(&freetags); 326 VMEM_ASSERT_LOCKED(vm); 327 while (vm->vm_nfreetags > freelimit) { 328 bt = LIST_FIRST(&vm->vm_freetags); 329 LIST_REMOVE(bt, bt_freelist); 330 vm->vm_nfreetags--; 331 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 332 } 333 VMEM_UNLOCK(vm); 334 while ((bt = LIST_FIRST(&freetags)) != NULL) { 335 LIST_REMOVE(bt, bt_freelist); 336 uma_zfree(vmem_bt_zone, bt); 337 } 338 } 339 340 static inline void 341 bt_free(vmem_t *vm, bt_t *bt) 342 { 343 344 VMEM_ASSERT_LOCKED(vm); 345 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 346 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 347 vm->vm_nfreetags++; 348 } 349 350 /* 351 * freelist[0] ... [1, 1] 352 * freelist[1] ... [2, 2] 353 * : 354 * freelist[29] ... [30, 30] 355 * freelist[30] ... [31, 31] 356 * freelist[31] ... [32, 63] 357 * freelist[33] ... [64, 127] 358 * : 359 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 360 * : 361 */ 362 363 static struct vmem_freelist * 364 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 365 { 366 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 367 const int idx = SIZE2ORDER(qsize); 368 369 MPASS(size != 0 && qsize != 0); 370 MPASS((size & vm->vm_quantum_mask) == 0); 371 MPASS(idx >= 0); 372 MPASS(idx < VMEM_MAXORDER); 373 374 return &vm->vm_freelist[idx]; 375 } 376 377 /* 378 * bt_freehead_toalloc: return the freelist for the given size and allocation 379 * strategy. 380 * 381 * For M_FIRSTFIT, return the list in which any blocks are large enough 382 * for the requested size. otherwise, return the list which can have blocks 383 * large enough for the requested size. 384 */ 385 static struct vmem_freelist * 386 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 387 { 388 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 389 int idx = SIZE2ORDER(qsize); 390 391 MPASS(size != 0 && qsize != 0); 392 MPASS((size & vm->vm_quantum_mask) == 0); 393 394 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 395 idx++; 396 /* check too large request? */ 397 } 398 MPASS(idx >= 0); 399 MPASS(idx < VMEM_MAXORDER); 400 401 return &vm->vm_freelist[idx]; 402 } 403 404 /* ---- boundary tag hash */ 405 406 static struct vmem_hashlist * 407 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 408 { 409 struct vmem_hashlist *list; 410 unsigned int hash; 411 412 hash = hash32_buf(&addr, sizeof(addr), 0); 413 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 414 415 return list; 416 } 417 418 static bt_t * 419 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 420 { 421 struct vmem_hashlist *list; 422 bt_t *bt; 423 424 VMEM_ASSERT_LOCKED(vm); 425 list = bt_hashhead(vm, addr); 426 LIST_FOREACH(bt, list, bt_hashlist) { 427 if (bt->bt_start == addr) { 428 break; 429 } 430 } 431 432 return bt; 433 } 434 435 static void 436 bt_rembusy(vmem_t *vm, bt_t *bt) 437 { 438 439 VMEM_ASSERT_LOCKED(vm); 440 MPASS(vm->vm_nbusytag > 0); 441 vm->vm_inuse -= bt->bt_size; 442 vm->vm_nbusytag--; 443 LIST_REMOVE(bt, bt_hashlist); 444 } 445 446 static void 447 bt_insbusy(vmem_t *vm, bt_t *bt) 448 { 449 struct vmem_hashlist *list; 450 451 VMEM_ASSERT_LOCKED(vm); 452 MPASS(bt->bt_type == BT_TYPE_BUSY); 453 454 list = bt_hashhead(vm, bt->bt_start); 455 LIST_INSERT_HEAD(list, bt, bt_hashlist); 456 vm->vm_nbusytag++; 457 vm->vm_inuse += bt->bt_size; 458 } 459 460 /* ---- boundary tag list */ 461 462 static void 463 bt_remseg(vmem_t *vm, bt_t *bt) 464 { 465 466 MPASS(bt->bt_type != BT_TYPE_CURSOR); 467 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 468 bt_free(vm, bt); 469 } 470 471 static void 472 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 473 { 474 475 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 476 } 477 478 static void 479 bt_insseg_tail(vmem_t *vm, bt_t *bt) 480 { 481 482 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 483 } 484 485 static void 486 bt_remfree(vmem_t *vm, bt_t *bt) 487 { 488 489 MPASS(bt->bt_type == BT_TYPE_FREE); 490 491 LIST_REMOVE(bt, bt_freelist); 492 } 493 494 static void 495 bt_insfree(vmem_t *vm, bt_t *bt) 496 { 497 struct vmem_freelist *list; 498 499 list = bt_freehead_tofree(vm, bt->bt_size); 500 LIST_INSERT_HEAD(list, bt, bt_freelist); 501 } 502 503 /* ---- vmem internal functions */ 504 505 /* 506 * Import from the arena into the quantum cache in UMA. 507 * 508 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate 509 * failure, so UMA can't be used to cache a resource with value 0. 510 */ 511 static int 512 qc_import(void *arg, void **store, int cnt, int domain, int flags) 513 { 514 qcache_t *qc; 515 vmem_addr_t addr; 516 int i; 517 518 KASSERT((flags & M_WAITOK) == 0, ("blocking allocation")); 519 520 qc = arg; 521 for (i = 0; i < cnt; i++) { 522 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 523 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 524 break; 525 store[i] = (void *)addr; 526 } 527 return (i); 528 } 529 530 /* 531 * Release memory from the UMA cache to the arena. 532 */ 533 static void 534 qc_release(void *arg, void **store, int cnt) 535 { 536 qcache_t *qc; 537 int i; 538 539 qc = arg; 540 for (i = 0; i < cnt; i++) 541 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 542 } 543 544 static void 545 qc_init(vmem_t *vm, vmem_size_t qcache_max) 546 { 547 qcache_t *qc; 548 vmem_size_t size; 549 int qcache_idx_max; 550 int i; 551 552 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 553 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 554 VMEM_QCACHE_IDX_MAX); 555 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 556 for (i = 0; i < qcache_idx_max; i++) { 557 qc = &vm->vm_qcache[i]; 558 size = (i + 1) << vm->vm_quantum_shift; 559 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 560 vm->vm_name, size); 561 qc->qc_vmem = vm; 562 qc->qc_size = size; 563 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 564 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 0); 565 MPASS(qc->qc_cache); 566 } 567 } 568 569 static void 570 qc_destroy(vmem_t *vm) 571 { 572 int qcache_idx_max; 573 int i; 574 575 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 576 for (i = 0; i < qcache_idx_max; i++) 577 uma_zdestroy(vm->vm_qcache[i].qc_cache); 578 } 579 580 static void 581 qc_drain(vmem_t *vm) 582 { 583 int qcache_idx_max; 584 int i; 585 586 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 587 for (i = 0; i < qcache_idx_max; i++) 588 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN); 589 } 590 591 #ifndef UMA_MD_SMALL_ALLOC 592 593 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; 594 595 /* 596 * vmem_bt_alloc: Allocate a new page of boundary tags. 597 * 598 * On architectures with uma_small_alloc there is no recursion; no address 599 * space need be allocated to allocate boundary tags. For the others, we 600 * must handle recursion. Boundary tags are necessary to allocate new 601 * boundary tags. 602 * 603 * UMA guarantees that enough tags are held in reserve to allocate a new 604 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 605 * when allocating the page to hold new boundary tags. In this way the 606 * reserve is automatically filled by the allocation that uses the reserve. 607 * 608 * We still have to guarantee that the new tags are allocated atomically since 609 * many threads may try concurrently. The bt_lock provides this guarantee. 610 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 611 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 612 * loop again after checking to see if we lost the race to allocate. 613 * 614 * There is a small race between vmem_bt_alloc() returning the page and the 615 * zone lock being acquired to add the page to the zone. For WAITOK 616 * allocations we just pause briefly. NOWAIT may experience a transient 617 * failure. To alleviate this we permit a small number of simultaneous 618 * fills to proceed concurrently so NOWAIT is less likely to fail unless 619 * we are really out of KVA. 620 */ 621 static void * 622 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 623 int wait) 624 { 625 vmem_addr_t addr; 626 627 *pflag = UMA_SLAB_KERNEL; 628 629 /* 630 * Single thread boundary tag allocation so that the address space 631 * and memory are added in one atomic operation. 632 */ 633 mtx_lock(&vmem_bt_lock); 634 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0, 635 VMEM_ADDR_MIN, VMEM_ADDR_MAX, 636 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { 637 if (kmem_back_domain(domain, kernel_object, addr, bytes, 638 M_NOWAIT | M_USE_RESERVE) == 0) { 639 mtx_unlock(&vmem_bt_lock); 640 return ((void *)addr); 641 } 642 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes); 643 mtx_unlock(&vmem_bt_lock); 644 /* 645 * Out of memory, not address space. This may not even be 646 * possible due to M_USE_RESERVE page allocation. 647 */ 648 if (wait & M_WAITOK) 649 vm_wait_domain(domain); 650 return (NULL); 651 } 652 mtx_unlock(&vmem_bt_lock); 653 /* 654 * We're either out of address space or lost a fill race. 655 */ 656 if (wait & M_WAITOK) 657 pause("btalloc", 1); 658 659 return (NULL); 660 } 661 #endif 662 663 void 664 vmem_startup(void) 665 { 666 667 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 668 vmem_zone = uma_zcreate("vmem", 669 sizeof(struct vmem), NULL, NULL, NULL, NULL, 670 UMA_ALIGN_PTR, 0); 671 #ifdef UMA_MD_SMALL_ALLOC 672 vmem_bt_zone = uma_zcreate("vmem btag", 673 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 674 UMA_ALIGN_PTR, UMA_ZONE_VM); 675 #else 676 vmem_bt_zone = uma_zcreate("vmem btag", 677 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 678 UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 679 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 680 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 681 /* 682 * Reserve enough tags to allocate new tags. We allow multiple 683 * CPUs to attempt to allocate new tags concurrently to limit 684 * false restarts in UMA. vmem_bt_alloc() allocates from a per-domain 685 * arena, which may involve importing a range from the kernel arena, 686 * so we need to keep at least 2 * BT_MAXALLOC tags reserved. 687 */ 688 uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus); 689 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 690 #endif 691 } 692 693 /* ---- rehash */ 694 695 static int 696 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 697 { 698 bt_t *bt; 699 int i; 700 struct vmem_hashlist *newhashlist; 701 struct vmem_hashlist *oldhashlist; 702 vmem_size_t oldhashsize; 703 704 MPASS(newhashsize > 0); 705 706 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 707 M_VMEM, M_NOWAIT); 708 if (newhashlist == NULL) 709 return ENOMEM; 710 for (i = 0; i < newhashsize; i++) { 711 LIST_INIT(&newhashlist[i]); 712 } 713 714 VMEM_LOCK(vm); 715 oldhashlist = vm->vm_hashlist; 716 oldhashsize = vm->vm_hashsize; 717 vm->vm_hashlist = newhashlist; 718 vm->vm_hashsize = newhashsize; 719 if (oldhashlist == NULL) { 720 VMEM_UNLOCK(vm); 721 return 0; 722 } 723 for (i = 0; i < oldhashsize; i++) { 724 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 725 bt_rembusy(vm, bt); 726 bt_insbusy(vm, bt); 727 } 728 } 729 VMEM_UNLOCK(vm); 730 731 if (oldhashlist != vm->vm_hash0) { 732 free(oldhashlist, M_VMEM); 733 } 734 735 return 0; 736 } 737 738 static void 739 vmem_periodic_kick(void *dummy) 740 { 741 742 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 743 } 744 745 static void 746 vmem_periodic(void *unused, int pending) 747 { 748 vmem_t *vm; 749 vmem_size_t desired; 750 vmem_size_t current; 751 752 mtx_lock(&vmem_list_lock); 753 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 754 #ifdef DIAGNOSTIC 755 /* Convenient time to verify vmem state. */ 756 if (enable_vmem_check == 1) { 757 VMEM_LOCK(vm); 758 vmem_check(vm); 759 VMEM_UNLOCK(vm); 760 } 761 #endif 762 desired = 1 << flsl(vm->vm_nbusytag); 763 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 764 VMEM_HASHSIZE_MAX); 765 current = vm->vm_hashsize; 766 767 /* Grow in powers of two. Shrink less aggressively. */ 768 if (desired >= current * 2 || desired * 4 <= current) 769 vmem_rehash(vm, desired); 770 771 /* 772 * Periodically wake up threads waiting for resources, 773 * so they could ask for reclamation again. 774 */ 775 VMEM_CONDVAR_BROADCAST(vm); 776 } 777 mtx_unlock(&vmem_list_lock); 778 779 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 780 vmem_periodic_kick, NULL); 781 } 782 783 static void 784 vmem_start_callout(void *unused) 785 { 786 787 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 788 vmem_periodic_interval = hz * 10; 789 callout_init(&vmem_periodic_ch, 1); 790 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 791 vmem_periodic_kick, NULL); 792 } 793 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 794 795 static void 796 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 797 { 798 bt_t *btspan; 799 bt_t *btfree; 800 801 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 802 MPASS((size & vm->vm_quantum_mask) == 0); 803 804 btspan = bt_alloc(vm); 805 btspan->bt_type = type; 806 btspan->bt_start = addr; 807 btspan->bt_size = size; 808 bt_insseg_tail(vm, btspan); 809 810 btfree = bt_alloc(vm); 811 btfree->bt_type = BT_TYPE_FREE; 812 btfree->bt_start = addr; 813 btfree->bt_size = size; 814 bt_insseg(vm, btfree, btspan); 815 bt_insfree(vm, btfree); 816 817 vm->vm_size += size; 818 } 819 820 static void 821 vmem_destroy1(vmem_t *vm) 822 { 823 bt_t *bt; 824 825 /* 826 * Drain per-cpu quantum caches. 827 */ 828 qc_destroy(vm); 829 830 /* 831 * The vmem should now only contain empty segments. 832 */ 833 VMEM_LOCK(vm); 834 MPASS(vm->vm_nbusytag == 0); 835 836 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); 837 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 838 bt_remseg(vm, bt); 839 840 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 841 free(vm->vm_hashlist, M_VMEM); 842 843 bt_freetrim(vm, 0); 844 845 VMEM_CONDVAR_DESTROY(vm); 846 VMEM_LOCK_DESTROY(vm); 847 uma_zfree(vmem_zone, vm); 848 } 849 850 static int 851 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 852 { 853 vmem_addr_t addr; 854 int error; 855 856 if (vm->vm_importfn == NULL) 857 return (EINVAL); 858 859 /* 860 * To make sure we get a span that meets the alignment we double it 861 * and add the size to the tail. This slightly overestimates. 862 */ 863 if (align != vm->vm_quantum_mask + 1) 864 size = (align * 2) + size; 865 size = roundup(size, vm->vm_import_quantum); 866 867 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) 868 return (ENOMEM); 869 870 /* 871 * Hide MAXALLOC tags so we're guaranteed to be able to add this 872 * span and the tag we want to allocate from it. 873 */ 874 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 875 vm->vm_nfreetags -= BT_MAXALLOC; 876 VMEM_UNLOCK(vm); 877 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 878 VMEM_LOCK(vm); 879 vm->vm_nfreetags += BT_MAXALLOC; 880 if (error) 881 return (ENOMEM); 882 883 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 884 885 return 0; 886 } 887 888 /* 889 * vmem_fit: check if a bt can satisfy the given restrictions. 890 * 891 * it's a caller's responsibility to ensure the region is big enough 892 * before calling us. 893 */ 894 static int 895 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 896 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 897 vmem_addr_t maxaddr, vmem_addr_t *addrp) 898 { 899 vmem_addr_t start; 900 vmem_addr_t end; 901 902 MPASS(size > 0); 903 MPASS(bt->bt_size >= size); /* caller's responsibility */ 904 905 /* 906 * XXX assumption: vmem_addr_t and vmem_size_t are 907 * unsigned integer of the same size. 908 */ 909 910 start = bt->bt_start; 911 if (start < minaddr) { 912 start = minaddr; 913 } 914 end = BT_END(bt); 915 if (end > maxaddr) 916 end = maxaddr; 917 if (start > end) 918 return (ENOMEM); 919 920 start = VMEM_ALIGNUP(start - phase, align) + phase; 921 if (start < bt->bt_start) 922 start += align; 923 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 924 MPASS(align < nocross); 925 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 926 } 927 if (start <= end && end - start >= size - 1) { 928 MPASS((start & (align - 1)) == phase); 929 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 930 MPASS(minaddr <= start); 931 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 932 MPASS(bt->bt_start <= start); 933 MPASS(BT_END(bt) - start >= size - 1); 934 *addrp = start; 935 936 return (0); 937 } 938 return (ENOMEM); 939 } 940 941 /* 942 * vmem_clip: Trim the boundary tag edges to the requested start and size. 943 */ 944 static void 945 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 946 { 947 bt_t *btnew; 948 bt_t *btprev; 949 950 VMEM_ASSERT_LOCKED(vm); 951 MPASS(bt->bt_type == BT_TYPE_FREE); 952 MPASS(bt->bt_size >= size); 953 bt_remfree(vm, bt); 954 if (bt->bt_start != start) { 955 btprev = bt_alloc(vm); 956 btprev->bt_type = BT_TYPE_FREE; 957 btprev->bt_start = bt->bt_start; 958 btprev->bt_size = start - bt->bt_start; 959 bt->bt_start = start; 960 bt->bt_size -= btprev->bt_size; 961 bt_insfree(vm, btprev); 962 bt_insseg(vm, btprev, 963 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 964 } 965 MPASS(bt->bt_start == start); 966 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 967 /* split */ 968 btnew = bt_alloc(vm); 969 btnew->bt_type = BT_TYPE_BUSY; 970 btnew->bt_start = bt->bt_start; 971 btnew->bt_size = size; 972 bt->bt_start = bt->bt_start + size; 973 bt->bt_size -= size; 974 bt_insfree(vm, bt); 975 bt_insseg(vm, btnew, 976 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 977 bt_insbusy(vm, btnew); 978 bt = btnew; 979 } else { 980 bt->bt_type = BT_TYPE_BUSY; 981 bt_insbusy(vm, bt); 982 } 983 MPASS(bt->bt_size >= size); 984 } 985 986 static int 987 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags) 988 { 989 vmem_size_t avail; 990 991 VMEM_ASSERT_LOCKED(vm); 992 993 /* 994 * XXX it is possible to fail to meet xalloc constraints with the 995 * imported region. It is up to the user to specify the 996 * import quantum such that it can satisfy any allocation. 997 */ 998 if (vmem_import(vm, size, align, flags) == 0) 999 return (1); 1000 1001 /* 1002 * Try to free some space from the quantum cache or reclaim 1003 * functions if available. 1004 */ 1005 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1006 avail = vm->vm_size - vm->vm_inuse; 1007 VMEM_UNLOCK(vm); 1008 if (vm->vm_qcache_max != 0) 1009 qc_drain(vm); 1010 if (vm->vm_reclaimfn != NULL) 1011 vm->vm_reclaimfn(vm, flags); 1012 VMEM_LOCK(vm); 1013 /* If we were successful retry even NOWAIT. */ 1014 if (vm->vm_size - vm->vm_inuse > avail) 1015 return (1); 1016 } 1017 if ((flags & M_NOWAIT) != 0) 1018 return (0); 1019 VMEM_CONDVAR_WAIT(vm); 1020 return (1); 1021 } 1022 1023 static int 1024 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree) 1025 { 1026 struct vmem_btag *prev; 1027 1028 MPASS(bt->bt_type == BT_TYPE_FREE); 1029 1030 if (vm->vm_releasefn == NULL) 1031 return (0); 1032 1033 prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1034 MPASS(prev != NULL); 1035 MPASS(prev->bt_type != BT_TYPE_FREE); 1036 1037 if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) { 1038 vmem_addr_t spanaddr; 1039 vmem_size_t spansize; 1040 1041 MPASS(prev->bt_start == bt->bt_start); 1042 spanaddr = prev->bt_start; 1043 spansize = prev->bt_size; 1044 if (remfree) 1045 bt_remfree(vm, bt); 1046 bt_remseg(vm, bt); 1047 bt_remseg(vm, prev); 1048 vm->vm_size -= spansize; 1049 VMEM_CONDVAR_BROADCAST(vm); 1050 bt_freetrim(vm, BT_MAXFREE); 1051 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize); 1052 return (1); 1053 } 1054 return (0); 1055 } 1056 1057 static int 1058 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align, 1059 const vmem_size_t phase, const vmem_size_t nocross, int flags, 1060 vmem_addr_t *addrp) 1061 { 1062 struct vmem_btag *bt, *cursor, *next, *prev; 1063 int error; 1064 1065 error = ENOMEM; 1066 VMEM_LOCK(vm); 1067 retry: 1068 /* 1069 * Make sure we have enough tags to complete the operation. 1070 */ 1071 if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0) 1072 goto out; 1073 1074 /* 1075 * Find the next free tag meeting our constraints. If one is found, 1076 * perform the allocation. 1077 */ 1078 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist); 1079 bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) { 1080 if (bt == NULL) 1081 bt = TAILQ_FIRST(&vm->vm_seglist); 1082 if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size && 1083 (error = vmem_fit(bt, size, align, phase, nocross, 1084 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1085 vmem_clip(vm, bt, *addrp, size); 1086 break; 1087 } 1088 } 1089 1090 /* 1091 * Try to coalesce free segments around the cursor. If we succeed, and 1092 * have not yet satisfied the allocation request, try again with the 1093 * newly coalesced segment. 1094 */ 1095 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL && 1096 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL && 1097 next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE && 1098 prev->bt_start + prev->bt_size == next->bt_start) { 1099 prev->bt_size += next->bt_size; 1100 bt_remfree(vm, next); 1101 bt_remseg(vm, next); 1102 1103 /* 1104 * The coalesced segment might be able to satisfy our request. 1105 * If not, we might need to release it from the arena. 1106 */ 1107 if (error == ENOMEM && prev->bt_size >= size && 1108 (error = vmem_fit(prev, size, align, phase, nocross, 1109 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1110 vmem_clip(vm, prev, *addrp, size); 1111 bt = prev; 1112 } else 1113 (void)vmem_try_release(vm, prev, true); 1114 } 1115 1116 /* 1117 * If the allocation was successful, advance the cursor. 1118 */ 1119 if (error == 0) { 1120 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist); 1121 for (; bt != NULL && bt->bt_start < *addrp + size; 1122 bt = TAILQ_NEXT(bt, bt_seglist)) 1123 ; 1124 if (bt != NULL) 1125 TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist); 1126 else 1127 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist); 1128 } 1129 1130 /* 1131 * Attempt to bring additional resources into the arena. If that fails 1132 * and M_WAITOK is specified, sleep waiting for resources to be freed. 1133 */ 1134 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags)) 1135 goto retry; 1136 1137 out: 1138 VMEM_UNLOCK(vm); 1139 return (error); 1140 } 1141 1142 /* ---- vmem API */ 1143 1144 void 1145 vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 1146 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 1147 { 1148 1149 VMEM_LOCK(vm); 1150 vm->vm_importfn = importfn; 1151 vm->vm_releasefn = releasefn; 1152 vm->vm_arg = arg; 1153 vm->vm_import_quantum = import_quantum; 1154 VMEM_UNLOCK(vm); 1155 } 1156 1157 void 1158 vmem_set_limit(vmem_t *vm, vmem_size_t limit) 1159 { 1160 1161 VMEM_LOCK(vm); 1162 vm->vm_limit = limit; 1163 VMEM_UNLOCK(vm); 1164 } 1165 1166 void 1167 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 1168 { 1169 1170 VMEM_LOCK(vm); 1171 vm->vm_reclaimfn = reclaimfn; 1172 VMEM_UNLOCK(vm); 1173 } 1174 1175 /* 1176 * vmem_init: Initializes vmem arena. 1177 */ 1178 vmem_t * 1179 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 1180 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1181 { 1182 int i; 1183 1184 MPASS(quantum > 0); 1185 MPASS((quantum & (quantum - 1)) == 0); 1186 1187 bzero(vm, sizeof(*vm)); 1188 1189 VMEM_CONDVAR_INIT(vm, name); 1190 VMEM_LOCK_INIT(vm, name); 1191 vm->vm_nfreetags = 0; 1192 LIST_INIT(&vm->vm_freetags); 1193 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1194 vm->vm_quantum_mask = quantum - 1; 1195 vm->vm_quantum_shift = flsl(quantum) - 1; 1196 vm->vm_nbusytag = 0; 1197 vm->vm_size = 0; 1198 vm->vm_limit = 0; 1199 vm->vm_inuse = 0; 1200 qc_init(vm, qcache_max); 1201 1202 TAILQ_INIT(&vm->vm_seglist); 1203 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0; 1204 vm->vm_cursor.bt_type = BT_TYPE_CURSOR; 1205 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); 1206 1207 for (i = 0; i < VMEM_MAXORDER; i++) 1208 LIST_INIT(&vm->vm_freelist[i]); 1209 1210 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1211 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1212 vm->vm_hashlist = vm->vm_hash0; 1213 1214 if (size != 0) { 1215 if (vmem_add(vm, base, size, flags) != 0) { 1216 vmem_destroy1(vm); 1217 return NULL; 1218 } 1219 } 1220 1221 mtx_lock(&vmem_list_lock); 1222 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1223 mtx_unlock(&vmem_list_lock); 1224 1225 return vm; 1226 } 1227 1228 /* 1229 * vmem_create: create an arena. 1230 */ 1231 vmem_t * 1232 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1233 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1234 { 1235 1236 vmem_t *vm; 1237 1238 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); 1239 if (vm == NULL) 1240 return (NULL); 1241 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1242 flags) == NULL) 1243 return (NULL); 1244 return (vm); 1245 } 1246 1247 void 1248 vmem_destroy(vmem_t *vm) 1249 { 1250 1251 mtx_lock(&vmem_list_lock); 1252 LIST_REMOVE(vm, vm_alllist); 1253 mtx_unlock(&vmem_list_lock); 1254 1255 vmem_destroy1(vm); 1256 } 1257 1258 vmem_size_t 1259 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1260 { 1261 1262 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1263 } 1264 1265 /* 1266 * vmem_alloc: allocate resource from the arena. 1267 */ 1268 int 1269 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1270 { 1271 const int strat __unused = flags & VMEM_FITMASK; 1272 qcache_t *qc; 1273 1274 flags &= VMEM_FLAGS; 1275 MPASS(size > 0); 1276 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1277 if ((flags & M_NOWAIT) == 0) 1278 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1279 1280 if (size <= vm->vm_qcache_max) { 1281 /* 1282 * Resource 0 cannot be cached, so avoid a blocking allocation 1283 * in qc_import() and give the vmem_xalloc() call below a chance 1284 * to return 0. 1285 */ 1286 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1287 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, 1288 (flags & ~M_WAITOK) | M_NOWAIT); 1289 if (__predict_true(*addrp != 0)) 1290 return (0); 1291 } 1292 1293 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1294 flags, addrp)); 1295 } 1296 1297 int 1298 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1299 const vmem_size_t phase, const vmem_size_t nocross, 1300 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1301 vmem_addr_t *addrp) 1302 { 1303 const vmem_size_t size = vmem_roundup_size(vm, size0); 1304 struct vmem_freelist *list; 1305 struct vmem_freelist *first; 1306 struct vmem_freelist *end; 1307 bt_t *bt; 1308 int error; 1309 int strat; 1310 1311 flags &= VMEM_FLAGS; 1312 strat = flags & VMEM_FITMASK; 1313 MPASS(size0 > 0); 1314 MPASS(size > 0); 1315 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1316 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1317 if ((flags & M_NOWAIT) == 0) 1318 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1319 MPASS((align & vm->vm_quantum_mask) == 0); 1320 MPASS((align & (align - 1)) == 0); 1321 MPASS((phase & vm->vm_quantum_mask) == 0); 1322 MPASS((nocross & vm->vm_quantum_mask) == 0); 1323 MPASS((nocross & (nocross - 1)) == 0); 1324 MPASS((align == 0 && phase == 0) || phase < align); 1325 MPASS(nocross == 0 || nocross >= size); 1326 MPASS(minaddr <= maxaddr); 1327 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1328 if (strat == M_NEXTFIT) 1329 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX); 1330 1331 if (align == 0) 1332 align = vm->vm_quantum_mask + 1; 1333 *addrp = 0; 1334 1335 /* 1336 * Next-fit allocations don't use the freelists. 1337 */ 1338 if (strat == M_NEXTFIT) 1339 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross, 1340 flags, addrp)); 1341 1342 end = &vm->vm_freelist[VMEM_MAXORDER]; 1343 /* 1344 * choose a free block from which we allocate. 1345 */ 1346 first = bt_freehead_toalloc(vm, size, strat); 1347 VMEM_LOCK(vm); 1348 for (;;) { 1349 /* 1350 * Make sure we have enough tags to complete the 1351 * operation. 1352 */ 1353 if (vm->vm_nfreetags < BT_MAXALLOC && 1354 bt_fill(vm, flags) != 0) { 1355 error = ENOMEM; 1356 break; 1357 } 1358 1359 /* 1360 * Scan freelists looking for a tag that satisfies the 1361 * allocation. If we're doing BESTFIT we may encounter 1362 * sizes below the request. If we're doing FIRSTFIT we 1363 * inspect only the first element from each list. 1364 */ 1365 for (list = first; list < end; list++) { 1366 LIST_FOREACH(bt, list, bt_freelist) { 1367 if (bt->bt_size >= size) { 1368 error = vmem_fit(bt, size, align, phase, 1369 nocross, minaddr, maxaddr, addrp); 1370 if (error == 0) { 1371 vmem_clip(vm, bt, *addrp, size); 1372 goto out; 1373 } 1374 } 1375 /* FIRST skips to the next list. */ 1376 if (strat == M_FIRSTFIT) 1377 break; 1378 } 1379 } 1380 1381 /* 1382 * Retry if the fast algorithm failed. 1383 */ 1384 if (strat == M_FIRSTFIT) { 1385 strat = M_BESTFIT; 1386 first = bt_freehead_toalloc(vm, size, strat); 1387 continue; 1388 } 1389 1390 /* 1391 * Try a few measures to bring additional resources into the 1392 * arena. If all else fails, we will sleep waiting for 1393 * resources to be freed. 1394 */ 1395 if (!vmem_try_fetch(vm, size, align, flags)) { 1396 error = ENOMEM; 1397 break; 1398 } 1399 } 1400 out: 1401 VMEM_UNLOCK(vm); 1402 if (error != 0 && (flags & M_NOWAIT) == 0) 1403 panic("failed to allocate waiting allocation\n"); 1404 1405 return (error); 1406 } 1407 1408 /* 1409 * vmem_free: free the resource to the arena. 1410 */ 1411 void 1412 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1413 { 1414 qcache_t *qc; 1415 MPASS(size > 0); 1416 1417 if (size <= vm->vm_qcache_max && 1418 __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { 1419 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1420 uma_zfree(qc->qc_cache, (void *)addr); 1421 } else 1422 vmem_xfree(vm, addr, size); 1423 } 1424 1425 void 1426 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1427 { 1428 bt_t *bt; 1429 bt_t *t; 1430 1431 MPASS(size > 0); 1432 1433 VMEM_LOCK(vm); 1434 bt = bt_lookupbusy(vm, addr); 1435 MPASS(bt != NULL); 1436 MPASS(bt->bt_start == addr); 1437 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1438 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1439 MPASS(bt->bt_type == BT_TYPE_BUSY); 1440 bt_rembusy(vm, bt); 1441 bt->bt_type = BT_TYPE_FREE; 1442 1443 /* coalesce */ 1444 t = TAILQ_NEXT(bt, bt_seglist); 1445 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1446 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1447 bt->bt_size += t->bt_size; 1448 bt_remfree(vm, t); 1449 bt_remseg(vm, t); 1450 } 1451 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1452 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1453 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1454 bt->bt_size += t->bt_size; 1455 bt->bt_start = t->bt_start; 1456 bt_remfree(vm, t); 1457 bt_remseg(vm, t); 1458 } 1459 1460 if (!vmem_try_release(vm, bt, false)) { 1461 bt_insfree(vm, bt); 1462 VMEM_CONDVAR_BROADCAST(vm); 1463 bt_freetrim(vm, BT_MAXFREE); 1464 } 1465 } 1466 1467 /* 1468 * vmem_add: 1469 * 1470 */ 1471 int 1472 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1473 { 1474 int error; 1475 1476 error = 0; 1477 flags &= VMEM_FLAGS; 1478 VMEM_LOCK(vm); 1479 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1480 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1481 else 1482 error = ENOMEM; 1483 VMEM_UNLOCK(vm); 1484 1485 return (error); 1486 } 1487 1488 /* 1489 * vmem_size: information about arenas size 1490 */ 1491 vmem_size_t 1492 vmem_size(vmem_t *vm, int typemask) 1493 { 1494 int i; 1495 1496 switch (typemask) { 1497 case VMEM_ALLOC: 1498 return vm->vm_inuse; 1499 case VMEM_FREE: 1500 return vm->vm_size - vm->vm_inuse; 1501 case VMEM_FREE|VMEM_ALLOC: 1502 return vm->vm_size; 1503 case VMEM_MAXFREE: 1504 VMEM_LOCK(vm); 1505 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1506 if (LIST_EMPTY(&vm->vm_freelist[i])) 1507 continue; 1508 VMEM_UNLOCK(vm); 1509 return ((vmem_size_t)ORDER2SIZE(i) << 1510 vm->vm_quantum_shift); 1511 } 1512 VMEM_UNLOCK(vm); 1513 return (0); 1514 default: 1515 panic("vmem_size"); 1516 } 1517 } 1518 1519 /* ---- debug */ 1520 1521 #if defined(DDB) || defined(DIAGNOSTIC) 1522 1523 static void bt_dump(const bt_t *, int (*)(const char *, ...) 1524 __printflike(1, 2)); 1525 1526 static const char * 1527 bt_type_string(int type) 1528 { 1529 1530 switch (type) { 1531 case BT_TYPE_BUSY: 1532 return "busy"; 1533 case BT_TYPE_FREE: 1534 return "free"; 1535 case BT_TYPE_SPAN: 1536 return "span"; 1537 case BT_TYPE_SPAN_STATIC: 1538 return "static span"; 1539 case BT_TYPE_CURSOR: 1540 return "cursor"; 1541 default: 1542 break; 1543 } 1544 return "BOGUS"; 1545 } 1546 1547 static void 1548 bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1549 { 1550 1551 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1552 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1553 bt->bt_type, bt_type_string(bt->bt_type)); 1554 } 1555 1556 static void 1557 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1558 { 1559 const bt_t *bt; 1560 int i; 1561 1562 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1563 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1564 bt_dump(bt, pr); 1565 } 1566 1567 for (i = 0; i < VMEM_MAXORDER; i++) { 1568 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1569 1570 if (LIST_EMPTY(fl)) { 1571 continue; 1572 } 1573 1574 (*pr)("freelist[%d]\n", i); 1575 LIST_FOREACH(bt, fl, bt_freelist) { 1576 bt_dump(bt, pr); 1577 } 1578 } 1579 } 1580 1581 #endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1582 1583 #if defined(DDB) 1584 #include <ddb/ddb.h> 1585 1586 static bt_t * 1587 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1588 { 1589 bt_t *bt; 1590 1591 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1592 if (BT_ISSPAN_P(bt)) { 1593 continue; 1594 } 1595 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1596 return bt; 1597 } 1598 } 1599 1600 return NULL; 1601 } 1602 1603 void 1604 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1605 { 1606 vmem_t *vm; 1607 1608 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1609 bt_t *bt; 1610 1611 bt = vmem_whatis_lookup(vm, addr); 1612 if (bt == NULL) { 1613 continue; 1614 } 1615 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1616 (void *)addr, (void *)bt->bt_start, 1617 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1618 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1619 } 1620 } 1621 1622 void 1623 vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1624 { 1625 const vmem_t *vm; 1626 1627 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1628 vmem_dump(vm, pr); 1629 } 1630 } 1631 1632 void 1633 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1634 { 1635 const vmem_t *vm = (const void *)addr; 1636 1637 vmem_dump(vm, pr); 1638 } 1639 1640 DB_SHOW_COMMAND(vmemdump, vmemdump) 1641 { 1642 1643 if (!have_addr) { 1644 db_printf("usage: show vmemdump <addr>\n"); 1645 return; 1646 } 1647 1648 vmem_dump((const vmem_t *)addr, db_printf); 1649 } 1650 1651 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall) 1652 { 1653 const vmem_t *vm; 1654 1655 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1656 vmem_dump(vm, db_printf); 1657 } 1658 1659 DB_SHOW_COMMAND(vmem, vmem_summ) 1660 { 1661 const vmem_t *vm = (const void *)addr; 1662 const bt_t *bt; 1663 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER]; 1664 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER]; 1665 int ord; 1666 1667 if (!have_addr) { 1668 db_printf("usage: show vmem <addr>\n"); 1669 return; 1670 } 1671 1672 db_printf("vmem %p '%s'\n", vm, vm->vm_name); 1673 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1); 1674 db_printf("\tsize:\t%zu\n", vm->vm_size); 1675 db_printf("\tinuse:\t%zu\n", vm->vm_inuse); 1676 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse); 1677 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag); 1678 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags); 1679 1680 memset(&ft, 0, sizeof(ft)); 1681 memset(&ut, 0, sizeof(ut)); 1682 memset(&fs, 0, sizeof(fs)); 1683 memset(&us, 0, sizeof(us)); 1684 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1685 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift); 1686 if (bt->bt_type == BT_TYPE_BUSY) { 1687 ut[ord]++; 1688 us[ord] += bt->bt_size; 1689 } else if (bt->bt_type == BT_TYPE_FREE) { 1690 ft[ord]++; 1691 fs[ord] += bt->bt_size; 1692 } 1693 } 1694 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n"); 1695 for (ord = 0; ord < VMEM_MAXORDER; ord++) { 1696 if (ut[ord] == 0 && ft[ord] == 0) 1697 continue; 1698 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n", 1699 ORDER2SIZE(ord) << vm->vm_quantum_shift, 1700 ut[ord], us[ord], ft[ord], fs[ord]); 1701 } 1702 } 1703 1704 DB_SHOW_ALL_COMMAND(vmem, vmem_summall) 1705 { 1706 const vmem_t *vm; 1707 1708 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1709 vmem_summ((db_expr_t)vm, TRUE, count, modif); 1710 } 1711 #endif /* defined(DDB) */ 1712 1713 #define vmem_printf printf 1714 1715 #if defined(DIAGNOSTIC) 1716 1717 static bool 1718 vmem_check_sanity(vmem_t *vm) 1719 { 1720 const bt_t *bt, *bt2; 1721 1722 MPASS(vm != NULL); 1723 1724 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1725 if (bt->bt_start > BT_END(bt)) { 1726 printf("corrupted tag\n"); 1727 bt_dump(bt, vmem_printf); 1728 return false; 1729 } 1730 } 1731 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1732 if (bt->bt_type == BT_TYPE_CURSOR) { 1733 if (bt->bt_start != 0 || bt->bt_size != 0) { 1734 printf("corrupted cursor\n"); 1735 return false; 1736 } 1737 continue; 1738 } 1739 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1740 if (bt == bt2) { 1741 continue; 1742 } 1743 if (bt2->bt_type == BT_TYPE_CURSOR) { 1744 continue; 1745 } 1746 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1747 continue; 1748 } 1749 if (bt->bt_start <= BT_END(bt2) && 1750 bt2->bt_start <= BT_END(bt)) { 1751 printf("overwrapped tags\n"); 1752 bt_dump(bt, vmem_printf); 1753 bt_dump(bt2, vmem_printf); 1754 return false; 1755 } 1756 } 1757 } 1758 1759 return true; 1760 } 1761 1762 static void 1763 vmem_check(vmem_t *vm) 1764 { 1765 1766 if (!vmem_check_sanity(vm)) { 1767 panic("insanity vmem %p", vm); 1768 } 1769 } 1770 1771 #endif /* defined(DIAGNOSTIC) */ 1772