1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * Copyright (c) 2013 EMC Corp. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * From: 32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 34 */ 35 36 /* 37 * reference: 38 * - Magazines and Vmem: Extending the Slab Allocator 39 * to Many CPUs and Arbitrary Resources 40 * http://www.usenix.org/event/usenix01/bonwick.html 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/queue.h> 52 #include <sys/callout.h> 53 #include <sys/hash.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/smp.h> 58 #include <sys/condvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/taskqueue.h> 61 #include <sys/vmem.h> 62 #include <sys/vmmeter.h> 63 64 #include "opt_vm.h" 65 66 #include <vm/uma.h> 67 #include <vm/vm.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pageout.h> 76 #include <vm/vm_phys.h> 77 #include <vm/vm_pagequeue.h> 78 #include <vm/uma_int.h> 79 80 #define VMEM_OPTORDER 5 81 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 82 #define VMEM_MAXORDER \ 83 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 84 85 #define VMEM_HASHSIZE_MIN 16 86 #define VMEM_HASHSIZE_MAX 131072 87 88 #define VMEM_QCACHE_IDX_MAX 16 89 90 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 91 92 #define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \ 93 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) 94 95 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 96 97 #define QC_NAME_MAX 16 98 99 /* 100 * Data structures private to vmem. 101 */ 102 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 103 104 typedef struct vmem_btag bt_t; 105 106 TAILQ_HEAD(vmem_seglist, vmem_btag); 107 LIST_HEAD(vmem_freelist, vmem_btag); 108 LIST_HEAD(vmem_hashlist, vmem_btag); 109 110 struct qcache { 111 uma_zone_t qc_cache; 112 vmem_t *qc_vmem; 113 vmem_size_t qc_size; 114 char qc_name[QC_NAME_MAX]; 115 }; 116 typedef struct qcache qcache_t; 117 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 118 119 #define VMEM_NAME_MAX 16 120 121 /* boundary tag */ 122 struct vmem_btag { 123 TAILQ_ENTRY(vmem_btag) bt_seglist; 124 union { 125 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 126 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 127 } bt_u; 128 #define bt_hashlist bt_u.u_hashlist 129 #define bt_freelist bt_u.u_freelist 130 vmem_addr_t bt_start; 131 vmem_size_t bt_size; 132 int bt_type; 133 }; 134 135 /* vmem arena */ 136 struct vmem { 137 struct mtx_padalign vm_lock; 138 struct cv vm_cv; 139 char vm_name[VMEM_NAME_MAX+1]; 140 LIST_ENTRY(vmem) vm_alllist; 141 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 142 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 143 struct vmem_seglist vm_seglist; 144 struct vmem_hashlist *vm_hashlist; 145 vmem_size_t vm_hashsize; 146 147 /* Constant after init */ 148 vmem_size_t vm_qcache_max; 149 vmem_size_t vm_quantum_mask; 150 vmem_size_t vm_import_quantum; 151 int vm_quantum_shift; 152 153 /* Written on alloc/free */ 154 LIST_HEAD(, vmem_btag) vm_freetags; 155 int vm_nfreetags; 156 int vm_nbusytag; 157 vmem_size_t vm_inuse; 158 vmem_size_t vm_size; 159 vmem_size_t vm_limit; 160 struct vmem_btag vm_cursor; 161 162 /* Used on import. */ 163 vmem_import_t *vm_importfn; 164 vmem_release_t *vm_releasefn; 165 void *vm_arg; 166 167 /* Space exhaustion callback. */ 168 vmem_reclaim_t *vm_reclaimfn; 169 170 /* quantum cache */ 171 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 172 }; 173 174 #define BT_TYPE_SPAN 1 /* Allocated from importfn */ 175 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 176 #define BT_TYPE_FREE 3 /* Available space. */ 177 #define BT_TYPE_BUSY 4 /* Used space. */ 178 #define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */ 179 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 180 181 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 182 183 #if defined(DIAGNOSTIC) 184 static int enable_vmem_check = 1; 185 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 186 &enable_vmem_check, 0, "Enable vmem check"); 187 static void vmem_check(vmem_t *); 188 #endif 189 190 static struct callout vmem_periodic_ch; 191 static int vmem_periodic_interval; 192 static struct task vmem_periodic_wk; 193 194 static struct mtx_padalign __exclusive_cache_line vmem_list_lock; 195 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 196 static uma_zone_t vmem_zone; 197 198 /* ---- misc */ 199 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 200 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 201 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 202 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 203 204 205 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 206 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 207 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 208 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 209 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 210 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 211 212 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 213 214 #define VMEM_CROSS_P(addr1, addr2, boundary) \ 215 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 216 217 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 218 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 219 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 220 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 221 222 /* 223 * Maximum number of boundary tags that may be required to satisfy an 224 * allocation. Two may be required to import. Another two may be 225 * required to clip edges. 226 */ 227 #define BT_MAXALLOC 4 228 229 /* 230 * Max free limits the number of locally cached boundary tags. We 231 * just want to avoid hitting the zone allocator for every call. 232 */ 233 #define BT_MAXFREE (BT_MAXALLOC * 8) 234 235 /* Allocator for boundary tags. */ 236 static uma_zone_t vmem_bt_zone; 237 238 /* boot time arena storage. */ 239 static struct vmem kernel_arena_storage; 240 static struct vmem buffer_arena_storage; 241 static struct vmem transient_arena_storage; 242 /* kernel and kmem arenas are aliased for backwards KPI compat. */ 243 vmem_t *kernel_arena = &kernel_arena_storage; 244 vmem_t *kmem_arena = &kernel_arena_storage; 245 vmem_t *buffer_arena = &buffer_arena_storage; 246 vmem_t *transient_arena = &transient_arena_storage; 247 248 #ifdef DEBUG_MEMGUARD 249 static struct vmem memguard_arena_storage; 250 vmem_t *memguard_arena = &memguard_arena_storage; 251 #endif 252 253 /* 254 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 255 * allocation will not fail once bt_fill() passes. To do so we cache 256 * at least the maximum possible tag allocations in the arena. 257 */ 258 static int 259 bt_fill(vmem_t *vm, int flags) 260 { 261 bt_t *bt; 262 263 VMEM_ASSERT_LOCKED(vm); 264 265 /* 266 * Only allow the kernel arena and arenas derived from kernel arena to 267 * dip into reserve tags. They are where new tags come from. 268 */ 269 flags &= BT_FLAGS; 270 if (vm != kernel_arena && vm->vm_arg != kernel_arena) 271 flags &= ~M_USE_RESERVE; 272 273 /* 274 * Loop until we meet the reserve. To minimize the lock shuffle 275 * and prevent simultaneous fills we first try a NOWAIT regardless 276 * of the caller's flags. Specify M_NOVM so we don't recurse while 277 * holding a vmem lock. 278 */ 279 while (vm->vm_nfreetags < BT_MAXALLOC) { 280 bt = uma_zalloc(vmem_bt_zone, 281 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 282 if (bt == NULL) { 283 VMEM_UNLOCK(vm); 284 bt = uma_zalloc(vmem_bt_zone, flags); 285 VMEM_LOCK(vm); 286 if (bt == NULL) 287 break; 288 } 289 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 290 vm->vm_nfreetags++; 291 } 292 293 if (vm->vm_nfreetags < BT_MAXALLOC) 294 return ENOMEM; 295 296 return 0; 297 } 298 299 /* 300 * Pop a tag off of the freetag stack. 301 */ 302 static bt_t * 303 bt_alloc(vmem_t *vm) 304 { 305 bt_t *bt; 306 307 VMEM_ASSERT_LOCKED(vm); 308 bt = LIST_FIRST(&vm->vm_freetags); 309 MPASS(bt != NULL); 310 LIST_REMOVE(bt, bt_freelist); 311 vm->vm_nfreetags--; 312 313 return bt; 314 } 315 316 /* 317 * Trim the per-vmem free list. Returns with the lock released to 318 * avoid allocator recursions. 319 */ 320 static void 321 bt_freetrim(vmem_t *vm, int freelimit) 322 { 323 LIST_HEAD(, vmem_btag) freetags; 324 bt_t *bt; 325 326 LIST_INIT(&freetags); 327 VMEM_ASSERT_LOCKED(vm); 328 while (vm->vm_nfreetags > freelimit) { 329 bt = LIST_FIRST(&vm->vm_freetags); 330 LIST_REMOVE(bt, bt_freelist); 331 vm->vm_nfreetags--; 332 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 333 } 334 VMEM_UNLOCK(vm); 335 while ((bt = LIST_FIRST(&freetags)) != NULL) { 336 LIST_REMOVE(bt, bt_freelist); 337 uma_zfree(vmem_bt_zone, bt); 338 } 339 } 340 341 static inline void 342 bt_free(vmem_t *vm, bt_t *bt) 343 { 344 345 VMEM_ASSERT_LOCKED(vm); 346 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 347 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 348 vm->vm_nfreetags++; 349 } 350 351 /* 352 * freelist[0] ... [1, 1] 353 * freelist[1] ... [2, 2] 354 * : 355 * freelist[29] ... [30, 30] 356 * freelist[30] ... [31, 31] 357 * freelist[31] ... [32, 63] 358 * freelist[33] ... [64, 127] 359 * : 360 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 361 * : 362 */ 363 364 static struct vmem_freelist * 365 bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 366 { 367 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 368 const int idx = SIZE2ORDER(qsize); 369 370 MPASS(size != 0 && qsize != 0); 371 MPASS((size & vm->vm_quantum_mask) == 0); 372 MPASS(idx >= 0); 373 MPASS(idx < VMEM_MAXORDER); 374 375 return &vm->vm_freelist[idx]; 376 } 377 378 /* 379 * bt_freehead_toalloc: return the freelist for the given size and allocation 380 * strategy. 381 * 382 * For M_FIRSTFIT, return the list in which any blocks are large enough 383 * for the requested size. otherwise, return the list which can have blocks 384 * large enough for the requested size. 385 */ 386 static struct vmem_freelist * 387 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 388 { 389 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 390 int idx = SIZE2ORDER(qsize); 391 392 MPASS(size != 0 && qsize != 0); 393 MPASS((size & vm->vm_quantum_mask) == 0); 394 395 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 396 idx++; 397 /* check too large request? */ 398 } 399 MPASS(idx >= 0); 400 MPASS(idx < VMEM_MAXORDER); 401 402 return &vm->vm_freelist[idx]; 403 } 404 405 /* ---- boundary tag hash */ 406 407 static struct vmem_hashlist * 408 bt_hashhead(vmem_t *vm, vmem_addr_t addr) 409 { 410 struct vmem_hashlist *list; 411 unsigned int hash; 412 413 hash = hash32_buf(&addr, sizeof(addr), 0); 414 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 415 416 return list; 417 } 418 419 static bt_t * 420 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 421 { 422 struct vmem_hashlist *list; 423 bt_t *bt; 424 425 VMEM_ASSERT_LOCKED(vm); 426 list = bt_hashhead(vm, addr); 427 LIST_FOREACH(bt, list, bt_hashlist) { 428 if (bt->bt_start == addr) { 429 break; 430 } 431 } 432 433 return bt; 434 } 435 436 static void 437 bt_rembusy(vmem_t *vm, bt_t *bt) 438 { 439 440 VMEM_ASSERT_LOCKED(vm); 441 MPASS(vm->vm_nbusytag > 0); 442 vm->vm_inuse -= bt->bt_size; 443 vm->vm_nbusytag--; 444 LIST_REMOVE(bt, bt_hashlist); 445 } 446 447 static void 448 bt_insbusy(vmem_t *vm, bt_t *bt) 449 { 450 struct vmem_hashlist *list; 451 452 VMEM_ASSERT_LOCKED(vm); 453 MPASS(bt->bt_type == BT_TYPE_BUSY); 454 455 list = bt_hashhead(vm, bt->bt_start); 456 LIST_INSERT_HEAD(list, bt, bt_hashlist); 457 vm->vm_nbusytag++; 458 vm->vm_inuse += bt->bt_size; 459 } 460 461 /* ---- boundary tag list */ 462 463 static void 464 bt_remseg(vmem_t *vm, bt_t *bt) 465 { 466 467 MPASS(bt->bt_type != BT_TYPE_CURSOR); 468 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 469 bt_free(vm, bt); 470 } 471 472 static void 473 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 474 { 475 476 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 477 } 478 479 static void 480 bt_insseg_tail(vmem_t *vm, bt_t *bt) 481 { 482 483 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 484 } 485 486 static void 487 bt_remfree(vmem_t *vm, bt_t *bt) 488 { 489 490 MPASS(bt->bt_type == BT_TYPE_FREE); 491 492 LIST_REMOVE(bt, bt_freelist); 493 } 494 495 static void 496 bt_insfree(vmem_t *vm, bt_t *bt) 497 { 498 struct vmem_freelist *list; 499 500 list = bt_freehead_tofree(vm, bt->bt_size); 501 LIST_INSERT_HEAD(list, bt, bt_freelist); 502 } 503 504 /* ---- vmem internal functions */ 505 506 /* 507 * Import from the arena into the quantum cache in UMA. 508 * 509 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate 510 * failure, so UMA can't be used to cache a resource with value 0. 511 */ 512 static int 513 qc_import(void *arg, void **store, int cnt, int domain, int flags) 514 { 515 qcache_t *qc; 516 vmem_addr_t addr; 517 int i; 518 519 KASSERT((flags & M_WAITOK) == 0, ("blocking allocation")); 520 521 qc = arg; 522 for (i = 0; i < cnt; i++) { 523 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 524 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 525 break; 526 store[i] = (void *)addr; 527 } 528 return (i); 529 } 530 531 /* 532 * Release memory from the UMA cache to the arena. 533 */ 534 static void 535 qc_release(void *arg, void **store, int cnt) 536 { 537 qcache_t *qc; 538 int i; 539 540 qc = arg; 541 for (i = 0; i < cnt; i++) 542 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 543 } 544 545 static void 546 qc_init(vmem_t *vm, vmem_size_t qcache_max) 547 { 548 qcache_t *qc; 549 vmem_size_t size; 550 int qcache_idx_max; 551 int i; 552 553 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 554 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 555 VMEM_QCACHE_IDX_MAX); 556 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 557 for (i = 0; i < qcache_idx_max; i++) { 558 qc = &vm->vm_qcache[i]; 559 size = (i + 1) << vm->vm_quantum_shift; 560 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 561 vm->vm_name, size); 562 qc->qc_vmem = vm; 563 qc->qc_size = size; 564 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 565 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 566 UMA_ZONE_VM); 567 MPASS(qc->qc_cache); 568 } 569 } 570 571 static void 572 qc_destroy(vmem_t *vm) 573 { 574 int qcache_idx_max; 575 int i; 576 577 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 578 for (i = 0; i < qcache_idx_max; i++) 579 uma_zdestroy(vm->vm_qcache[i].qc_cache); 580 } 581 582 static void 583 qc_drain(vmem_t *vm) 584 { 585 int qcache_idx_max; 586 int i; 587 588 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 589 for (i = 0; i < qcache_idx_max; i++) 590 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN); 591 } 592 593 #ifndef UMA_MD_SMALL_ALLOC 594 595 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; 596 597 /* 598 * vmem_bt_alloc: Allocate a new page of boundary tags. 599 * 600 * On architectures with uma_small_alloc there is no recursion; no address 601 * space need be allocated to allocate boundary tags. For the others, we 602 * must handle recursion. Boundary tags are necessary to allocate new 603 * boundary tags. 604 * 605 * UMA guarantees that enough tags are held in reserve to allocate a new 606 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 607 * when allocating the page to hold new boundary tags. In this way the 608 * reserve is automatically filled by the allocation that uses the reserve. 609 * 610 * We still have to guarantee that the new tags are allocated atomically since 611 * many threads may try concurrently. The bt_lock provides this guarantee. 612 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 613 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 614 * loop again after checking to see if we lost the race to allocate. 615 * 616 * There is a small race between vmem_bt_alloc() returning the page and the 617 * zone lock being acquired to add the page to the zone. For WAITOK 618 * allocations we just pause briefly. NOWAIT may experience a transient 619 * failure. To alleviate this we permit a small number of simultaneous 620 * fills to proceed concurrently so NOWAIT is less likely to fail unless 621 * we are really out of KVA. 622 */ 623 static void * 624 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 625 int wait) 626 { 627 vmem_addr_t addr; 628 629 *pflag = UMA_SLAB_KERNEL; 630 631 /* 632 * Single thread boundary tag allocation so that the address space 633 * and memory are added in one atomic operation. 634 */ 635 mtx_lock(&vmem_bt_lock); 636 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0, 637 VMEM_ADDR_MIN, VMEM_ADDR_MAX, 638 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { 639 if (kmem_back_domain(domain, kernel_object, addr, bytes, 640 M_NOWAIT | M_USE_RESERVE) == 0) { 641 mtx_unlock(&vmem_bt_lock); 642 return ((void *)addr); 643 } 644 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes); 645 mtx_unlock(&vmem_bt_lock); 646 /* 647 * Out of memory, not address space. This may not even be 648 * possible due to M_USE_RESERVE page allocation. 649 */ 650 if (wait & M_WAITOK) 651 vm_wait_domain(domain); 652 return (NULL); 653 } 654 mtx_unlock(&vmem_bt_lock); 655 /* 656 * We're either out of address space or lost a fill race. 657 */ 658 if (wait & M_WAITOK) 659 pause("btalloc", 1); 660 661 return (NULL); 662 } 663 #endif 664 665 void 666 vmem_startup(void) 667 { 668 669 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 670 vmem_zone = uma_zcreate("vmem", 671 sizeof(struct vmem), NULL, NULL, NULL, NULL, 672 UMA_ALIGN_PTR, UMA_ZONE_VM); 673 vmem_bt_zone = uma_zcreate("vmem btag", 674 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 675 UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 676 #ifndef UMA_MD_SMALL_ALLOC 677 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 678 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 679 /* 680 * Reserve enough tags to allocate new tags. We allow multiple 681 * CPUs to attempt to allocate new tags concurrently to limit 682 * false restarts in UMA. vmem_bt_alloc() allocates from a per-domain 683 * arena, which may involve importing a range from the kernel arena, 684 * so we need to keep at least 2 * BT_MAXALLOC tags reserved. 685 */ 686 uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus); 687 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 688 #endif 689 } 690 691 /* ---- rehash */ 692 693 static int 694 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 695 { 696 bt_t *bt; 697 int i; 698 struct vmem_hashlist *newhashlist; 699 struct vmem_hashlist *oldhashlist; 700 vmem_size_t oldhashsize; 701 702 MPASS(newhashsize > 0); 703 704 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 705 M_VMEM, M_NOWAIT); 706 if (newhashlist == NULL) 707 return ENOMEM; 708 for (i = 0; i < newhashsize; i++) { 709 LIST_INIT(&newhashlist[i]); 710 } 711 712 VMEM_LOCK(vm); 713 oldhashlist = vm->vm_hashlist; 714 oldhashsize = vm->vm_hashsize; 715 vm->vm_hashlist = newhashlist; 716 vm->vm_hashsize = newhashsize; 717 if (oldhashlist == NULL) { 718 VMEM_UNLOCK(vm); 719 return 0; 720 } 721 for (i = 0; i < oldhashsize; i++) { 722 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 723 bt_rembusy(vm, bt); 724 bt_insbusy(vm, bt); 725 } 726 } 727 VMEM_UNLOCK(vm); 728 729 if (oldhashlist != vm->vm_hash0) { 730 free(oldhashlist, M_VMEM); 731 } 732 733 return 0; 734 } 735 736 static void 737 vmem_periodic_kick(void *dummy) 738 { 739 740 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 741 } 742 743 static void 744 vmem_periodic(void *unused, int pending) 745 { 746 vmem_t *vm; 747 vmem_size_t desired; 748 vmem_size_t current; 749 750 mtx_lock(&vmem_list_lock); 751 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 752 #ifdef DIAGNOSTIC 753 /* Convenient time to verify vmem state. */ 754 if (enable_vmem_check == 1) { 755 VMEM_LOCK(vm); 756 vmem_check(vm); 757 VMEM_UNLOCK(vm); 758 } 759 #endif 760 desired = 1 << flsl(vm->vm_nbusytag); 761 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 762 VMEM_HASHSIZE_MAX); 763 current = vm->vm_hashsize; 764 765 /* Grow in powers of two. Shrink less aggressively. */ 766 if (desired >= current * 2 || desired * 4 <= current) 767 vmem_rehash(vm, desired); 768 769 /* 770 * Periodically wake up threads waiting for resources, 771 * so they could ask for reclamation again. 772 */ 773 VMEM_CONDVAR_BROADCAST(vm); 774 } 775 mtx_unlock(&vmem_list_lock); 776 777 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 778 vmem_periodic_kick, NULL); 779 } 780 781 static void 782 vmem_start_callout(void *unused) 783 { 784 785 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 786 vmem_periodic_interval = hz * 10; 787 callout_init(&vmem_periodic_ch, 1); 788 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 789 vmem_periodic_kick, NULL); 790 } 791 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 792 793 static void 794 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 795 { 796 bt_t *btspan; 797 bt_t *btfree; 798 799 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 800 MPASS((size & vm->vm_quantum_mask) == 0); 801 802 btspan = bt_alloc(vm); 803 btspan->bt_type = type; 804 btspan->bt_start = addr; 805 btspan->bt_size = size; 806 bt_insseg_tail(vm, btspan); 807 808 btfree = bt_alloc(vm); 809 btfree->bt_type = BT_TYPE_FREE; 810 btfree->bt_start = addr; 811 btfree->bt_size = size; 812 bt_insseg(vm, btfree, btspan); 813 bt_insfree(vm, btfree); 814 815 vm->vm_size += size; 816 } 817 818 static void 819 vmem_destroy1(vmem_t *vm) 820 { 821 bt_t *bt; 822 823 /* 824 * Drain per-cpu quantum caches. 825 */ 826 qc_destroy(vm); 827 828 /* 829 * The vmem should now only contain empty segments. 830 */ 831 VMEM_LOCK(vm); 832 MPASS(vm->vm_nbusytag == 0); 833 834 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); 835 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 836 bt_remseg(vm, bt); 837 838 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 839 free(vm->vm_hashlist, M_VMEM); 840 841 bt_freetrim(vm, 0); 842 843 VMEM_CONDVAR_DESTROY(vm); 844 VMEM_LOCK_DESTROY(vm); 845 uma_zfree(vmem_zone, vm); 846 } 847 848 static int 849 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 850 { 851 vmem_addr_t addr; 852 int error; 853 854 if (vm->vm_importfn == NULL) 855 return (EINVAL); 856 857 /* 858 * To make sure we get a span that meets the alignment we double it 859 * and add the size to the tail. This slightly overestimates. 860 */ 861 if (align != vm->vm_quantum_mask + 1) 862 size = (align * 2) + size; 863 size = roundup(size, vm->vm_import_quantum); 864 865 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size) 866 return (ENOMEM); 867 868 /* 869 * Hide MAXALLOC tags so we're guaranteed to be able to add this 870 * span and the tag we want to allocate from it. 871 */ 872 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 873 vm->vm_nfreetags -= BT_MAXALLOC; 874 VMEM_UNLOCK(vm); 875 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 876 VMEM_LOCK(vm); 877 vm->vm_nfreetags += BT_MAXALLOC; 878 if (error) 879 return (ENOMEM); 880 881 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 882 883 return 0; 884 } 885 886 /* 887 * vmem_fit: check if a bt can satisfy the given restrictions. 888 * 889 * it's a caller's responsibility to ensure the region is big enough 890 * before calling us. 891 */ 892 static int 893 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 894 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 895 vmem_addr_t maxaddr, vmem_addr_t *addrp) 896 { 897 vmem_addr_t start; 898 vmem_addr_t end; 899 900 MPASS(size > 0); 901 MPASS(bt->bt_size >= size); /* caller's responsibility */ 902 903 /* 904 * XXX assumption: vmem_addr_t and vmem_size_t are 905 * unsigned integer of the same size. 906 */ 907 908 start = bt->bt_start; 909 if (start < minaddr) { 910 start = minaddr; 911 } 912 end = BT_END(bt); 913 if (end > maxaddr) 914 end = maxaddr; 915 if (start > end) 916 return (ENOMEM); 917 918 start = VMEM_ALIGNUP(start - phase, align) + phase; 919 if (start < bt->bt_start) 920 start += align; 921 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 922 MPASS(align < nocross); 923 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 924 } 925 if (start <= end && end - start >= size - 1) { 926 MPASS((start & (align - 1)) == phase); 927 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 928 MPASS(minaddr <= start); 929 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 930 MPASS(bt->bt_start <= start); 931 MPASS(BT_END(bt) - start >= size - 1); 932 *addrp = start; 933 934 return (0); 935 } 936 return (ENOMEM); 937 } 938 939 /* 940 * vmem_clip: Trim the boundary tag edges to the requested start and size. 941 */ 942 static void 943 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 944 { 945 bt_t *btnew; 946 bt_t *btprev; 947 948 VMEM_ASSERT_LOCKED(vm); 949 MPASS(bt->bt_type == BT_TYPE_FREE); 950 MPASS(bt->bt_size >= size); 951 bt_remfree(vm, bt); 952 if (bt->bt_start != start) { 953 btprev = bt_alloc(vm); 954 btprev->bt_type = BT_TYPE_FREE; 955 btprev->bt_start = bt->bt_start; 956 btprev->bt_size = start - bt->bt_start; 957 bt->bt_start = start; 958 bt->bt_size -= btprev->bt_size; 959 bt_insfree(vm, btprev); 960 bt_insseg(vm, btprev, 961 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 962 } 963 MPASS(bt->bt_start == start); 964 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 965 /* split */ 966 btnew = bt_alloc(vm); 967 btnew->bt_type = BT_TYPE_BUSY; 968 btnew->bt_start = bt->bt_start; 969 btnew->bt_size = size; 970 bt->bt_start = bt->bt_start + size; 971 bt->bt_size -= size; 972 bt_insfree(vm, bt); 973 bt_insseg(vm, btnew, 974 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 975 bt_insbusy(vm, btnew); 976 bt = btnew; 977 } else { 978 bt->bt_type = BT_TYPE_BUSY; 979 bt_insbusy(vm, bt); 980 } 981 MPASS(bt->bt_size >= size); 982 } 983 984 static int 985 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags) 986 { 987 vmem_size_t avail; 988 989 VMEM_ASSERT_LOCKED(vm); 990 991 /* 992 * XXX it is possible to fail to meet xalloc constraints with the 993 * imported region. It is up to the user to specify the 994 * import quantum such that it can satisfy any allocation. 995 */ 996 if (vmem_import(vm, size, align, flags) == 0) 997 return (1); 998 999 /* 1000 * Try to free some space from the quantum cache or reclaim 1001 * functions if available. 1002 */ 1003 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1004 avail = vm->vm_size - vm->vm_inuse; 1005 VMEM_UNLOCK(vm); 1006 if (vm->vm_qcache_max != 0) 1007 qc_drain(vm); 1008 if (vm->vm_reclaimfn != NULL) 1009 vm->vm_reclaimfn(vm, flags); 1010 VMEM_LOCK(vm); 1011 /* If we were successful retry even NOWAIT. */ 1012 if (vm->vm_size - vm->vm_inuse > avail) 1013 return (1); 1014 } 1015 if ((flags & M_NOWAIT) != 0) 1016 return (0); 1017 VMEM_CONDVAR_WAIT(vm); 1018 return (1); 1019 } 1020 1021 static int 1022 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree) 1023 { 1024 struct vmem_btag *prev; 1025 1026 MPASS(bt->bt_type == BT_TYPE_FREE); 1027 1028 if (vm->vm_releasefn == NULL) 1029 return (0); 1030 1031 prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1032 MPASS(prev != NULL); 1033 MPASS(prev->bt_type != BT_TYPE_FREE); 1034 1035 if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) { 1036 vmem_addr_t spanaddr; 1037 vmem_size_t spansize; 1038 1039 MPASS(prev->bt_start == bt->bt_start); 1040 spanaddr = prev->bt_start; 1041 spansize = prev->bt_size; 1042 if (remfree) 1043 bt_remfree(vm, bt); 1044 bt_remseg(vm, bt); 1045 bt_remseg(vm, prev); 1046 vm->vm_size -= spansize; 1047 VMEM_CONDVAR_BROADCAST(vm); 1048 bt_freetrim(vm, BT_MAXFREE); 1049 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize); 1050 return (1); 1051 } 1052 return (0); 1053 } 1054 1055 static int 1056 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align, 1057 const vmem_size_t phase, const vmem_size_t nocross, int flags, 1058 vmem_addr_t *addrp) 1059 { 1060 struct vmem_btag *bt, *cursor, *next, *prev; 1061 int error; 1062 1063 error = ENOMEM; 1064 VMEM_LOCK(vm); 1065 retry: 1066 /* 1067 * Make sure we have enough tags to complete the operation. 1068 */ 1069 if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0) 1070 goto out; 1071 1072 /* 1073 * Find the next free tag meeting our constraints. If one is found, 1074 * perform the allocation. 1075 */ 1076 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist); 1077 bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) { 1078 if (bt == NULL) 1079 bt = TAILQ_FIRST(&vm->vm_seglist); 1080 if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size && 1081 (error = vmem_fit(bt, size, align, phase, nocross, 1082 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1083 vmem_clip(vm, bt, *addrp, size); 1084 break; 1085 } 1086 } 1087 1088 /* 1089 * Try to coalesce free segments around the cursor. If we succeed, and 1090 * have not yet satisfied the allocation request, try again with the 1091 * newly coalesced segment. 1092 */ 1093 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL && 1094 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL && 1095 next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE && 1096 prev->bt_start + prev->bt_size == next->bt_start) { 1097 prev->bt_size += next->bt_size; 1098 bt_remfree(vm, next); 1099 bt_remseg(vm, next); 1100 1101 /* 1102 * The coalesced segment might be able to satisfy our request. 1103 * If not, we might need to release it from the arena. 1104 */ 1105 if (error == ENOMEM && prev->bt_size >= size && 1106 (error = vmem_fit(prev, size, align, phase, nocross, 1107 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) { 1108 vmem_clip(vm, prev, *addrp, size); 1109 bt = prev; 1110 } else 1111 (void)vmem_try_release(vm, prev, true); 1112 } 1113 1114 /* 1115 * If the allocation was successful, advance the cursor. 1116 */ 1117 if (error == 0) { 1118 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist); 1119 for (; bt != NULL && bt->bt_start < *addrp + size; 1120 bt = TAILQ_NEXT(bt, bt_seglist)) 1121 ; 1122 if (bt != NULL) 1123 TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist); 1124 else 1125 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist); 1126 } 1127 1128 /* 1129 * Attempt to bring additional resources into the arena. If that fails 1130 * and M_WAITOK is specified, sleep waiting for resources to be freed. 1131 */ 1132 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags)) 1133 goto retry; 1134 1135 out: 1136 VMEM_UNLOCK(vm); 1137 return (error); 1138 } 1139 1140 /* ---- vmem API */ 1141 1142 void 1143 vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 1144 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 1145 { 1146 1147 VMEM_LOCK(vm); 1148 vm->vm_importfn = importfn; 1149 vm->vm_releasefn = releasefn; 1150 vm->vm_arg = arg; 1151 vm->vm_import_quantum = import_quantum; 1152 VMEM_UNLOCK(vm); 1153 } 1154 1155 void 1156 vmem_set_limit(vmem_t *vm, vmem_size_t limit) 1157 { 1158 1159 VMEM_LOCK(vm); 1160 vm->vm_limit = limit; 1161 VMEM_UNLOCK(vm); 1162 } 1163 1164 void 1165 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 1166 { 1167 1168 VMEM_LOCK(vm); 1169 vm->vm_reclaimfn = reclaimfn; 1170 VMEM_UNLOCK(vm); 1171 } 1172 1173 /* 1174 * vmem_init: Initializes vmem arena. 1175 */ 1176 vmem_t * 1177 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 1178 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1179 { 1180 int i; 1181 1182 MPASS(quantum > 0); 1183 MPASS((quantum & (quantum - 1)) == 0); 1184 1185 bzero(vm, sizeof(*vm)); 1186 1187 VMEM_CONDVAR_INIT(vm, name); 1188 VMEM_LOCK_INIT(vm, name); 1189 vm->vm_nfreetags = 0; 1190 LIST_INIT(&vm->vm_freetags); 1191 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1192 vm->vm_quantum_mask = quantum - 1; 1193 vm->vm_quantum_shift = flsl(quantum) - 1; 1194 vm->vm_nbusytag = 0; 1195 vm->vm_size = 0; 1196 vm->vm_limit = 0; 1197 vm->vm_inuse = 0; 1198 qc_init(vm, qcache_max); 1199 1200 TAILQ_INIT(&vm->vm_seglist); 1201 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0; 1202 vm->vm_cursor.bt_type = BT_TYPE_CURSOR; 1203 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); 1204 1205 for (i = 0; i < VMEM_MAXORDER; i++) 1206 LIST_INIT(&vm->vm_freelist[i]); 1207 1208 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1209 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1210 vm->vm_hashlist = vm->vm_hash0; 1211 1212 if (size != 0) { 1213 if (vmem_add(vm, base, size, flags) != 0) { 1214 vmem_destroy1(vm); 1215 return NULL; 1216 } 1217 } 1218 1219 mtx_lock(&vmem_list_lock); 1220 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1221 mtx_unlock(&vmem_list_lock); 1222 1223 return vm; 1224 } 1225 1226 /* 1227 * vmem_create: create an arena. 1228 */ 1229 vmem_t * 1230 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1231 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1232 { 1233 1234 vmem_t *vm; 1235 1236 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); 1237 if (vm == NULL) 1238 return (NULL); 1239 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1240 flags) == NULL) 1241 return (NULL); 1242 return (vm); 1243 } 1244 1245 void 1246 vmem_destroy(vmem_t *vm) 1247 { 1248 1249 mtx_lock(&vmem_list_lock); 1250 LIST_REMOVE(vm, vm_alllist); 1251 mtx_unlock(&vmem_list_lock); 1252 1253 vmem_destroy1(vm); 1254 } 1255 1256 vmem_size_t 1257 vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1258 { 1259 1260 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1261 } 1262 1263 /* 1264 * vmem_alloc: allocate resource from the arena. 1265 */ 1266 int 1267 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1268 { 1269 const int strat __unused = flags & VMEM_FITMASK; 1270 qcache_t *qc; 1271 1272 flags &= VMEM_FLAGS; 1273 MPASS(size > 0); 1274 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1275 if ((flags & M_NOWAIT) == 0) 1276 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1277 1278 if (size <= vm->vm_qcache_max) { 1279 /* 1280 * Resource 0 cannot be cached, so avoid a blocking allocation 1281 * in qc_import() and give the vmem_xalloc() call below a chance 1282 * to return 0. 1283 */ 1284 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1285 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, 1286 (flags & ~M_WAITOK) | M_NOWAIT); 1287 if (__predict_true(*addrp != 0)) 1288 return (0); 1289 } 1290 1291 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1292 flags, addrp)); 1293 } 1294 1295 int 1296 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1297 const vmem_size_t phase, const vmem_size_t nocross, 1298 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1299 vmem_addr_t *addrp) 1300 { 1301 const vmem_size_t size = vmem_roundup_size(vm, size0); 1302 struct vmem_freelist *list; 1303 struct vmem_freelist *first; 1304 struct vmem_freelist *end; 1305 bt_t *bt; 1306 int error; 1307 int strat; 1308 1309 flags &= VMEM_FLAGS; 1310 strat = flags & VMEM_FITMASK; 1311 MPASS(size0 > 0); 1312 MPASS(size > 0); 1313 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); 1314 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1315 if ((flags & M_NOWAIT) == 0) 1316 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1317 MPASS((align & vm->vm_quantum_mask) == 0); 1318 MPASS((align & (align - 1)) == 0); 1319 MPASS((phase & vm->vm_quantum_mask) == 0); 1320 MPASS((nocross & vm->vm_quantum_mask) == 0); 1321 MPASS((nocross & (nocross - 1)) == 0); 1322 MPASS((align == 0 && phase == 0) || phase < align); 1323 MPASS(nocross == 0 || nocross >= size); 1324 MPASS(minaddr <= maxaddr); 1325 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1326 if (strat == M_NEXTFIT) 1327 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX); 1328 1329 if (align == 0) 1330 align = vm->vm_quantum_mask + 1; 1331 *addrp = 0; 1332 1333 /* 1334 * Next-fit allocations don't use the freelists. 1335 */ 1336 if (strat == M_NEXTFIT) 1337 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross, 1338 flags, addrp)); 1339 1340 end = &vm->vm_freelist[VMEM_MAXORDER]; 1341 /* 1342 * choose a free block from which we allocate. 1343 */ 1344 first = bt_freehead_toalloc(vm, size, strat); 1345 VMEM_LOCK(vm); 1346 for (;;) { 1347 /* 1348 * Make sure we have enough tags to complete the 1349 * operation. 1350 */ 1351 if (vm->vm_nfreetags < BT_MAXALLOC && 1352 bt_fill(vm, flags) != 0) { 1353 error = ENOMEM; 1354 break; 1355 } 1356 1357 /* 1358 * Scan freelists looking for a tag that satisfies the 1359 * allocation. If we're doing BESTFIT we may encounter 1360 * sizes below the request. If we're doing FIRSTFIT we 1361 * inspect only the first element from each list. 1362 */ 1363 for (list = first; list < end; list++) { 1364 LIST_FOREACH(bt, list, bt_freelist) { 1365 if (bt->bt_size >= size) { 1366 error = vmem_fit(bt, size, align, phase, 1367 nocross, minaddr, maxaddr, addrp); 1368 if (error == 0) { 1369 vmem_clip(vm, bt, *addrp, size); 1370 goto out; 1371 } 1372 } 1373 /* FIRST skips to the next list. */ 1374 if (strat == M_FIRSTFIT) 1375 break; 1376 } 1377 } 1378 1379 /* 1380 * Retry if the fast algorithm failed. 1381 */ 1382 if (strat == M_FIRSTFIT) { 1383 strat = M_BESTFIT; 1384 first = bt_freehead_toalloc(vm, size, strat); 1385 continue; 1386 } 1387 1388 /* 1389 * Try a few measures to bring additional resources into the 1390 * arena. If all else fails, we will sleep waiting for 1391 * resources to be freed. 1392 */ 1393 if (!vmem_try_fetch(vm, size, align, flags)) { 1394 error = ENOMEM; 1395 break; 1396 } 1397 } 1398 out: 1399 VMEM_UNLOCK(vm); 1400 if (error != 0 && (flags & M_NOWAIT) == 0) 1401 panic("failed to allocate waiting allocation\n"); 1402 1403 return (error); 1404 } 1405 1406 /* 1407 * vmem_free: free the resource to the arena. 1408 */ 1409 void 1410 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1411 { 1412 qcache_t *qc; 1413 MPASS(size > 0); 1414 1415 if (size <= vm->vm_qcache_max && 1416 __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { 1417 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1418 uma_zfree(qc->qc_cache, (void *)addr); 1419 } else 1420 vmem_xfree(vm, addr, size); 1421 } 1422 1423 void 1424 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1425 { 1426 bt_t *bt; 1427 bt_t *t; 1428 1429 MPASS(size > 0); 1430 1431 VMEM_LOCK(vm); 1432 bt = bt_lookupbusy(vm, addr); 1433 MPASS(bt != NULL); 1434 MPASS(bt->bt_start == addr); 1435 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1436 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1437 MPASS(bt->bt_type == BT_TYPE_BUSY); 1438 bt_rembusy(vm, bt); 1439 bt->bt_type = BT_TYPE_FREE; 1440 1441 /* coalesce */ 1442 t = TAILQ_NEXT(bt, bt_seglist); 1443 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1444 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1445 bt->bt_size += t->bt_size; 1446 bt_remfree(vm, t); 1447 bt_remseg(vm, t); 1448 } 1449 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1450 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1451 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1452 bt->bt_size += t->bt_size; 1453 bt->bt_start = t->bt_start; 1454 bt_remfree(vm, t); 1455 bt_remseg(vm, t); 1456 } 1457 1458 if (!vmem_try_release(vm, bt, false)) { 1459 bt_insfree(vm, bt); 1460 VMEM_CONDVAR_BROADCAST(vm); 1461 bt_freetrim(vm, BT_MAXFREE); 1462 } 1463 } 1464 1465 /* 1466 * vmem_add: 1467 * 1468 */ 1469 int 1470 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1471 { 1472 int error; 1473 1474 error = 0; 1475 flags &= VMEM_FLAGS; 1476 VMEM_LOCK(vm); 1477 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1478 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1479 else 1480 error = ENOMEM; 1481 VMEM_UNLOCK(vm); 1482 1483 return (error); 1484 } 1485 1486 /* 1487 * vmem_size: information about arenas size 1488 */ 1489 vmem_size_t 1490 vmem_size(vmem_t *vm, int typemask) 1491 { 1492 int i; 1493 1494 switch (typemask) { 1495 case VMEM_ALLOC: 1496 return vm->vm_inuse; 1497 case VMEM_FREE: 1498 return vm->vm_size - vm->vm_inuse; 1499 case VMEM_FREE|VMEM_ALLOC: 1500 return vm->vm_size; 1501 case VMEM_MAXFREE: 1502 VMEM_LOCK(vm); 1503 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1504 if (LIST_EMPTY(&vm->vm_freelist[i])) 1505 continue; 1506 VMEM_UNLOCK(vm); 1507 return ((vmem_size_t)ORDER2SIZE(i) << 1508 vm->vm_quantum_shift); 1509 } 1510 VMEM_UNLOCK(vm); 1511 return (0); 1512 default: 1513 panic("vmem_size"); 1514 } 1515 } 1516 1517 /* ---- debug */ 1518 1519 #if defined(DDB) || defined(DIAGNOSTIC) 1520 1521 static void bt_dump(const bt_t *, int (*)(const char *, ...) 1522 __printflike(1, 2)); 1523 1524 static const char * 1525 bt_type_string(int type) 1526 { 1527 1528 switch (type) { 1529 case BT_TYPE_BUSY: 1530 return "busy"; 1531 case BT_TYPE_FREE: 1532 return "free"; 1533 case BT_TYPE_SPAN: 1534 return "span"; 1535 case BT_TYPE_SPAN_STATIC: 1536 return "static span"; 1537 case BT_TYPE_CURSOR: 1538 return "cursor"; 1539 default: 1540 break; 1541 } 1542 return "BOGUS"; 1543 } 1544 1545 static void 1546 bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1547 { 1548 1549 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1550 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1551 bt->bt_type, bt_type_string(bt->bt_type)); 1552 } 1553 1554 static void 1555 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1556 { 1557 const bt_t *bt; 1558 int i; 1559 1560 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1561 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1562 bt_dump(bt, pr); 1563 } 1564 1565 for (i = 0; i < VMEM_MAXORDER; i++) { 1566 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1567 1568 if (LIST_EMPTY(fl)) { 1569 continue; 1570 } 1571 1572 (*pr)("freelist[%d]\n", i); 1573 LIST_FOREACH(bt, fl, bt_freelist) { 1574 bt_dump(bt, pr); 1575 } 1576 } 1577 } 1578 1579 #endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1580 1581 #if defined(DDB) 1582 #include <ddb/ddb.h> 1583 1584 static bt_t * 1585 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1586 { 1587 bt_t *bt; 1588 1589 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1590 if (BT_ISSPAN_P(bt)) { 1591 continue; 1592 } 1593 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1594 return bt; 1595 } 1596 } 1597 1598 return NULL; 1599 } 1600 1601 void 1602 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1603 { 1604 vmem_t *vm; 1605 1606 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1607 bt_t *bt; 1608 1609 bt = vmem_whatis_lookup(vm, addr); 1610 if (bt == NULL) { 1611 continue; 1612 } 1613 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1614 (void *)addr, (void *)bt->bt_start, 1615 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1616 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1617 } 1618 } 1619 1620 void 1621 vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1622 { 1623 const vmem_t *vm; 1624 1625 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1626 vmem_dump(vm, pr); 1627 } 1628 } 1629 1630 void 1631 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1632 { 1633 const vmem_t *vm = (const void *)addr; 1634 1635 vmem_dump(vm, pr); 1636 } 1637 1638 DB_SHOW_COMMAND(vmemdump, vmemdump) 1639 { 1640 1641 if (!have_addr) { 1642 db_printf("usage: show vmemdump <addr>\n"); 1643 return; 1644 } 1645 1646 vmem_dump((const vmem_t *)addr, db_printf); 1647 } 1648 1649 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall) 1650 { 1651 const vmem_t *vm; 1652 1653 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1654 vmem_dump(vm, db_printf); 1655 } 1656 1657 DB_SHOW_COMMAND(vmem, vmem_summ) 1658 { 1659 const vmem_t *vm = (const void *)addr; 1660 const bt_t *bt; 1661 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER]; 1662 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER]; 1663 int ord; 1664 1665 if (!have_addr) { 1666 db_printf("usage: show vmem <addr>\n"); 1667 return; 1668 } 1669 1670 db_printf("vmem %p '%s'\n", vm, vm->vm_name); 1671 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1); 1672 db_printf("\tsize:\t%zu\n", vm->vm_size); 1673 db_printf("\tinuse:\t%zu\n", vm->vm_inuse); 1674 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse); 1675 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag); 1676 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags); 1677 1678 memset(&ft, 0, sizeof(ft)); 1679 memset(&ut, 0, sizeof(ut)); 1680 memset(&fs, 0, sizeof(fs)); 1681 memset(&us, 0, sizeof(us)); 1682 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1683 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift); 1684 if (bt->bt_type == BT_TYPE_BUSY) { 1685 ut[ord]++; 1686 us[ord] += bt->bt_size; 1687 } else if (bt->bt_type == BT_TYPE_FREE) { 1688 ft[ord]++; 1689 fs[ord] += bt->bt_size; 1690 } 1691 } 1692 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n"); 1693 for (ord = 0; ord < VMEM_MAXORDER; ord++) { 1694 if (ut[ord] == 0 && ft[ord] == 0) 1695 continue; 1696 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n", 1697 ORDER2SIZE(ord) << vm->vm_quantum_shift, 1698 ut[ord], us[ord], ft[ord], fs[ord]); 1699 } 1700 } 1701 1702 DB_SHOW_ALL_COMMAND(vmem, vmem_summall) 1703 { 1704 const vmem_t *vm; 1705 1706 LIST_FOREACH(vm, &vmem_list, vm_alllist) 1707 vmem_summ((db_expr_t)vm, TRUE, count, modif); 1708 } 1709 #endif /* defined(DDB) */ 1710 1711 #define vmem_printf printf 1712 1713 #if defined(DIAGNOSTIC) 1714 1715 static bool 1716 vmem_check_sanity(vmem_t *vm) 1717 { 1718 const bt_t *bt, *bt2; 1719 1720 MPASS(vm != NULL); 1721 1722 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1723 if (bt->bt_start > BT_END(bt)) { 1724 printf("corrupted tag\n"); 1725 bt_dump(bt, vmem_printf); 1726 return false; 1727 } 1728 } 1729 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1730 if (bt->bt_type == BT_TYPE_CURSOR) { 1731 if (bt->bt_start != 0 || bt->bt_size != 0) { 1732 printf("corrupted cursor\n"); 1733 return false; 1734 } 1735 continue; 1736 } 1737 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1738 if (bt == bt2) { 1739 continue; 1740 } 1741 if (bt2->bt_type == BT_TYPE_CURSOR) { 1742 continue; 1743 } 1744 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1745 continue; 1746 } 1747 if (bt->bt_start <= BT_END(bt2) && 1748 bt2->bt_start <= BT_END(bt)) { 1749 printf("overwrapped tags\n"); 1750 bt_dump(bt, vmem_printf); 1751 bt_dump(bt2, vmem_printf); 1752 return false; 1753 } 1754 } 1755 } 1756 1757 return true; 1758 } 1759 1760 static void 1761 vmem_check(vmem_t *vm) 1762 { 1763 1764 if (!vmem_check_sanity(vm)) { 1765 panic("insanity vmem %p", vm); 1766 } 1767 } 1768 1769 #endif /* defined(DIAGNOSTIC) */ 1770