111752d88SAlan Cox /*- 211752d88SAlan Cox * Copyright (c) 2002-2006 Rice University 311752d88SAlan Cox * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 411752d88SAlan Cox * All rights reserved. 511752d88SAlan Cox * 611752d88SAlan Cox * This software was developed for the FreeBSD Project by Alan L. Cox, 711752d88SAlan Cox * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 811752d88SAlan Cox * 911752d88SAlan Cox * Redistribution and use in source and binary forms, with or without 1011752d88SAlan Cox * modification, are permitted provided that the following conditions 1111752d88SAlan Cox * are met: 1211752d88SAlan Cox * 1. Redistributions of source code must retain the above copyright 1311752d88SAlan Cox * notice, this list of conditions and the following disclaimer. 1411752d88SAlan Cox * 2. Redistributions in binary form must reproduce the above copyright 1511752d88SAlan Cox * notice, this list of conditions and the following disclaimer in the 1611752d88SAlan Cox * documentation and/or other materials provided with the distribution. 1711752d88SAlan Cox * 1811752d88SAlan Cox * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1911752d88SAlan Cox * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2011752d88SAlan Cox * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 2111752d88SAlan Cox * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2211752d88SAlan Cox * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2311752d88SAlan Cox * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 2411752d88SAlan Cox * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 2511752d88SAlan Cox * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 2611752d88SAlan Cox * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2711752d88SAlan Cox * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 2811752d88SAlan Cox * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2911752d88SAlan Cox * POSSIBILITY OF SUCH DAMAGE. 3011752d88SAlan Cox */ 3111752d88SAlan Cox 32fbd80bd0SAlan Cox /* 33fbd80bd0SAlan Cox * Physical memory system implementation 34fbd80bd0SAlan Cox * 35fbd80bd0SAlan Cox * Any external functions defined by this module are only to be used by the 36fbd80bd0SAlan Cox * virtual memory system. 37fbd80bd0SAlan Cox */ 38fbd80bd0SAlan Cox 3911752d88SAlan Cox #include <sys/cdefs.h> 4011752d88SAlan Cox __FBSDID("$FreeBSD$"); 4111752d88SAlan Cox 4211752d88SAlan Cox #include "opt_ddb.h" 43174b5f38SJohn Baldwin #include "opt_vm.h" 4411752d88SAlan Cox 4511752d88SAlan Cox #include <sys/param.h> 4611752d88SAlan Cox #include <sys/systm.h> 4711752d88SAlan Cox #include <sys/lock.h> 4811752d88SAlan Cox #include <sys/kernel.h> 4911752d88SAlan Cox #include <sys/malloc.h> 5011752d88SAlan Cox #include <sys/mutex.h> 517e226537SAttilio Rao #include <sys/proc.h> 5211752d88SAlan Cox #include <sys/queue.h> 5338d6b2dcSRoger Pau Monné #include <sys/rwlock.h> 5411752d88SAlan Cox #include <sys/sbuf.h> 5511752d88SAlan Cox #include <sys/sysctl.h> 5638d6b2dcSRoger Pau Monné #include <sys/tree.h> 5711752d88SAlan Cox #include <sys/vmmeter.h> 586520495aSAdrian Chadd #include <sys/seq.h> 5911752d88SAlan Cox 6011752d88SAlan Cox #include <ddb/ddb.h> 6111752d88SAlan Cox 6211752d88SAlan Cox #include <vm/vm.h> 6311752d88SAlan Cox #include <vm/vm_param.h> 6411752d88SAlan Cox #include <vm/vm_kern.h> 6511752d88SAlan Cox #include <vm/vm_object.h> 6611752d88SAlan Cox #include <vm/vm_page.h> 6711752d88SAlan Cox #include <vm/vm_phys.h> 6811752d88SAlan Cox 696520495aSAdrian Chadd #include <vm/vm_domain.h> 706520495aSAdrian Chadd 71449c2e92SKonstantin Belousov _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 72449c2e92SKonstantin Belousov "Too many physsegs."); 7311752d88SAlan Cox 7462d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 75a3870a18SJohn Baldwin struct mem_affinity *mem_affinity; 76415d7ccaSAdrian Chadd int *mem_locality; 7762d70a81SJohn Baldwin #endif 78a3870a18SJohn Baldwin 797e226537SAttilio Rao int vm_ndomains = 1; 807e226537SAttilio Rao 81449c2e92SKonstantin Belousov struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 82449c2e92SKonstantin Belousov int vm_phys_nsegs; 8311752d88SAlan Cox 8438d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg; 8538d6b2dcSRoger Pau Monné static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 8638d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *); 8738d6b2dcSRoger Pau Monné 8838d6b2dcSRoger Pau Monné RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 8938d6b2dcSRoger Pau Monné RB_INITIALIZER(_vm_phys_fictitious_tree); 9038d6b2dcSRoger Pau Monné 9138d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg { 9238d6b2dcSRoger Pau Monné RB_ENTRY(vm_phys_fictitious_seg) node; 9338d6b2dcSRoger Pau Monné /* Memory region data */ 94b6de32bdSKonstantin Belousov vm_paddr_t start; 95b6de32bdSKonstantin Belousov vm_paddr_t end; 96b6de32bdSKonstantin Belousov vm_page_t first_page; 9738d6b2dcSRoger Pau Monné }; 9838d6b2dcSRoger Pau Monné 9938d6b2dcSRoger Pau Monné RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 10038d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp); 10138d6b2dcSRoger Pau Monné 10238d6b2dcSRoger Pau Monné static struct rwlock vm_phys_fictitious_reg_lock; 103c0432fc3SMark Johnston MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 104b6de32bdSKonstantin Belousov 10511752d88SAlan Cox static struct vm_freelist 1067e226537SAttilio Rao vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 10711752d88SAlan Cox 108d866a563SAlan Cox static int vm_nfreelists; 109d866a563SAlan Cox 110d866a563SAlan Cox /* 111d866a563SAlan Cox * Provides the mapping from VM_FREELIST_* to free list indices (flind). 112d866a563SAlan Cox */ 113d866a563SAlan Cox static int vm_freelist_to_flind[VM_NFREELIST]; 114d866a563SAlan Cox 115d866a563SAlan Cox CTASSERT(VM_FREELIST_DEFAULT == 0); 116d866a563SAlan Cox 117d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 118d866a563SAlan Cox #define VM_ISADMA_BOUNDARY 16777216 119d866a563SAlan Cox #endif 120d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 121d866a563SAlan Cox #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 122d866a563SAlan Cox #endif 123d866a563SAlan Cox 124d866a563SAlan Cox /* 125d866a563SAlan Cox * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 126d866a563SAlan Cox * the ordering of the free list boundaries. 127d866a563SAlan Cox */ 128d866a563SAlan Cox #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) 129d866a563SAlan Cox CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); 130d866a563SAlan Cox #endif 131d866a563SAlan Cox #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 132d866a563SAlan Cox CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 133d866a563SAlan Cox #endif 13411752d88SAlan Cox 13511752d88SAlan Cox static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 13611752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 13711752d88SAlan Cox NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 13811752d88SAlan Cox 13911752d88SAlan Cox static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 14011752d88SAlan Cox SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 14111752d88SAlan Cox NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 14211752d88SAlan Cox 14362d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 144415d7ccaSAdrian Chadd static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 145415d7ccaSAdrian Chadd SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD, 146415d7ccaSAdrian Chadd NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info"); 1476520495aSAdrian Chadd #endif 148415d7ccaSAdrian Chadd 1497e226537SAttilio Rao SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 1507e226537SAttilio Rao &vm_ndomains, 0, "Number of physical memory domains available."); 151a3870a18SJohn Baldwin 1526520495aSAdrian Chadd /* 1536520495aSAdrian Chadd * Default to first-touch + round-robin. 1546520495aSAdrian Chadd */ 1556520495aSAdrian Chadd static struct mtx vm_default_policy_mtx; 1566520495aSAdrian Chadd MTX_SYSINIT(vm_default_policy, &vm_default_policy_mtx, "default policy mutex", 1576520495aSAdrian Chadd MTX_DEF); 15862d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 1596520495aSAdrian Chadd static struct vm_domain_policy vm_default_policy = 1606520495aSAdrian Chadd VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0); 1616520495aSAdrian Chadd #else 1626520495aSAdrian Chadd /* Use round-robin so the domain policy code will only try once per allocation */ 1636520495aSAdrian Chadd static struct vm_domain_policy vm_default_policy = 1646520495aSAdrian Chadd VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN, 0); 1656520495aSAdrian Chadd #endif 1666520495aSAdrian Chadd 167f5c4b077SJohn Baldwin static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 168f5c4b077SJohn Baldwin int order); 169c869e672SAlan Cox static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, 170c869e672SAlan Cox u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 171c869e672SAlan Cox vm_paddr_t boundary); 172d866a563SAlan Cox static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 173d866a563SAlan Cox static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 17411752d88SAlan Cox static int vm_phys_paddr_to_segind(vm_paddr_t pa); 17511752d88SAlan Cox static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 17611752d88SAlan Cox int order); 17711752d88SAlan Cox 1786520495aSAdrian Chadd static int 1796520495aSAdrian Chadd sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS) 1806520495aSAdrian Chadd { 1816520495aSAdrian Chadd char policy_name[32]; 1826520495aSAdrian Chadd int error; 1836520495aSAdrian Chadd 1846520495aSAdrian Chadd mtx_lock(&vm_default_policy_mtx); 1856520495aSAdrian Chadd 1866520495aSAdrian Chadd /* Map policy to output string */ 1876520495aSAdrian Chadd switch (vm_default_policy.p.policy) { 1886520495aSAdrian Chadd case VM_POLICY_FIRST_TOUCH: 1896520495aSAdrian Chadd strcpy(policy_name, "first-touch"); 1906520495aSAdrian Chadd break; 1916520495aSAdrian Chadd case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN: 1926520495aSAdrian Chadd strcpy(policy_name, "first-touch-rr"); 1936520495aSAdrian Chadd break; 1946520495aSAdrian Chadd case VM_POLICY_ROUND_ROBIN: 1956520495aSAdrian Chadd default: 1966520495aSAdrian Chadd strcpy(policy_name, "rr"); 1976520495aSAdrian Chadd break; 1986520495aSAdrian Chadd } 1996520495aSAdrian Chadd mtx_unlock(&vm_default_policy_mtx); 2006520495aSAdrian Chadd 2016520495aSAdrian Chadd error = sysctl_handle_string(oidp, &policy_name[0], 2026520495aSAdrian Chadd sizeof(policy_name), req); 2036520495aSAdrian Chadd if (error != 0 || req->newptr == NULL) 2046520495aSAdrian Chadd return (error); 2056520495aSAdrian Chadd 2066520495aSAdrian Chadd mtx_lock(&vm_default_policy_mtx); 2076520495aSAdrian Chadd /* Set: match on the subset of policies that make sense as a default */ 2086520495aSAdrian Chadd if (strcmp("first-touch-rr", policy_name) == 0) { 2096520495aSAdrian Chadd vm_domain_policy_set(&vm_default_policy, 2106520495aSAdrian Chadd VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0); 2116520495aSAdrian Chadd } else if (strcmp("first-touch", policy_name) == 0) { 2126520495aSAdrian Chadd vm_domain_policy_set(&vm_default_policy, 2136520495aSAdrian Chadd VM_POLICY_FIRST_TOUCH, 0); 2146520495aSAdrian Chadd } else if (strcmp("rr", policy_name) == 0) { 2156520495aSAdrian Chadd vm_domain_policy_set(&vm_default_policy, 2166520495aSAdrian Chadd VM_POLICY_ROUND_ROBIN, 0); 2176520495aSAdrian Chadd } else { 2186520495aSAdrian Chadd error = EINVAL; 2196520495aSAdrian Chadd goto finish; 2206520495aSAdrian Chadd } 2216520495aSAdrian Chadd 2226520495aSAdrian Chadd error = 0; 2236520495aSAdrian Chadd finish: 2246520495aSAdrian Chadd mtx_unlock(&vm_default_policy_mtx); 2256520495aSAdrian Chadd return (error); 2266520495aSAdrian Chadd } 2276520495aSAdrian Chadd 2286520495aSAdrian Chadd SYSCTL_PROC(_vm, OID_AUTO, default_policy, CTLTYPE_STRING | CTLFLAG_RW, 2296520495aSAdrian Chadd 0, 0, sysctl_vm_default_policy, "A", 2306520495aSAdrian Chadd "Default policy (rr, first-touch, first-touch-rr"); 2316520495aSAdrian Chadd 23238d6b2dcSRoger Pau Monné /* 23338d6b2dcSRoger Pau Monné * Red-black tree helpers for vm fictitious range management. 23438d6b2dcSRoger Pau Monné */ 23538d6b2dcSRoger Pau Monné static inline int 23638d6b2dcSRoger Pau Monné vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 23738d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *range) 23838d6b2dcSRoger Pau Monné { 23938d6b2dcSRoger Pau Monné 24038d6b2dcSRoger Pau Monné KASSERT(range->start != 0 && range->end != 0, 24138d6b2dcSRoger Pau Monné ("Invalid range passed on search for vm_fictitious page")); 24238d6b2dcSRoger Pau Monné if (p->start >= range->end) 24338d6b2dcSRoger Pau Monné return (1); 24438d6b2dcSRoger Pau Monné if (p->start < range->start) 24538d6b2dcSRoger Pau Monné return (-1); 24638d6b2dcSRoger Pau Monné 24738d6b2dcSRoger Pau Monné return (0); 24838d6b2dcSRoger Pau Monné } 24938d6b2dcSRoger Pau Monné 25038d6b2dcSRoger Pau Monné static int 25138d6b2dcSRoger Pau Monné vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 25238d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *p2) 25338d6b2dcSRoger Pau Monné { 25438d6b2dcSRoger Pau Monné 25538d6b2dcSRoger Pau Monné /* Check if this is a search for a page */ 25638d6b2dcSRoger Pau Monné if (p1->end == 0) 25738d6b2dcSRoger Pau Monné return (vm_phys_fictitious_in_range(p1, p2)); 25838d6b2dcSRoger Pau Monné 25938d6b2dcSRoger Pau Monné KASSERT(p2->end != 0, 26038d6b2dcSRoger Pau Monné ("Invalid range passed as second parameter to vm fictitious comparison")); 26138d6b2dcSRoger Pau Monné 26238d6b2dcSRoger Pau Monné /* Searching to add a new range */ 26338d6b2dcSRoger Pau Monné if (p1->end <= p2->start) 26438d6b2dcSRoger Pau Monné return (-1); 26538d6b2dcSRoger Pau Monné if (p1->start >= p2->end) 26638d6b2dcSRoger Pau Monné return (1); 26738d6b2dcSRoger Pau Monné 26838d6b2dcSRoger Pau Monné panic("Trying to add overlapping vm fictitious ranges:\n" 26938d6b2dcSRoger Pau Monné "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 27038d6b2dcSRoger Pau Monné (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 27138d6b2dcSRoger Pau Monné } 27238d6b2dcSRoger Pau Monné 273a6b15641SEdward Tomasz Napierala #ifdef notyet 2747e226537SAttilio Rao static __inline int 2757e226537SAttilio Rao vm_rr_selectdomain(void) 2767e226537SAttilio Rao { 27762d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 2787e226537SAttilio Rao struct thread *td; 2797e226537SAttilio Rao 2807e226537SAttilio Rao td = curthread; 2817e226537SAttilio Rao 2827e226537SAttilio Rao td->td_dom_rr_idx++; 2837e226537SAttilio Rao td->td_dom_rr_idx %= vm_ndomains; 2847e226537SAttilio Rao return (td->td_dom_rr_idx); 2857e226537SAttilio Rao #else 2867e226537SAttilio Rao return (0); 2877e226537SAttilio Rao #endif 2887e226537SAttilio Rao } 289a6b15641SEdward Tomasz Napierala #endif /* notyet */ 2907e226537SAttilio Rao 2916520495aSAdrian Chadd /* 2926520495aSAdrian Chadd * Initialise a VM domain iterator. 2936520495aSAdrian Chadd * 2946520495aSAdrian Chadd * Check the thread policy, then the proc policy, 2956520495aSAdrian Chadd * then default to the system policy. 2966520495aSAdrian Chadd * 2976520495aSAdrian Chadd * Later on the various layers will have this logic 2986520495aSAdrian Chadd * plumbed into them and the phys code will be explicitly 2996520495aSAdrian Chadd * handed a VM domain policy to use. 3006520495aSAdrian Chadd */ 3016520495aSAdrian Chadd static void 3026520495aSAdrian Chadd vm_policy_iterator_init(struct vm_domain_iterator *vi) 3036520495aSAdrian Chadd { 30462d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 3056520495aSAdrian Chadd struct vm_domain_policy lcl; 3066520495aSAdrian Chadd #endif 3076520495aSAdrian Chadd 3086520495aSAdrian Chadd vm_domain_iterator_init(vi); 3096520495aSAdrian Chadd 31062d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 3116520495aSAdrian Chadd /* Copy out the thread policy */ 3126520495aSAdrian Chadd vm_domain_policy_localcopy(&lcl, &curthread->td_vm_dom_policy); 3136520495aSAdrian Chadd if (lcl.p.policy != VM_POLICY_NONE) { 3146520495aSAdrian Chadd /* Thread policy is present; use it */ 3156520495aSAdrian Chadd vm_domain_iterator_set_policy(vi, &lcl); 3166520495aSAdrian Chadd return; 3176520495aSAdrian Chadd } 3186520495aSAdrian Chadd 3196520495aSAdrian Chadd vm_domain_policy_localcopy(&lcl, 3206520495aSAdrian Chadd &curthread->td_proc->p_vm_dom_policy); 3216520495aSAdrian Chadd if (lcl.p.policy != VM_POLICY_NONE) { 3226520495aSAdrian Chadd /* Process policy is present; use it */ 3236520495aSAdrian Chadd vm_domain_iterator_set_policy(vi, &lcl); 3246520495aSAdrian Chadd return; 3256520495aSAdrian Chadd } 3266520495aSAdrian Chadd #endif 3276520495aSAdrian Chadd /* Use system default policy */ 3286520495aSAdrian Chadd vm_domain_iterator_set_policy(vi, &vm_default_policy); 3296520495aSAdrian Chadd } 3306520495aSAdrian Chadd 3316520495aSAdrian Chadd static void 3326520495aSAdrian Chadd vm_policy_iterator_finish(struct vm_domain_iterator *vi) 3336520495aSAdrian Chadd { 3346520495aSAdrian Chadd 3356520495aSAdrian Chadd vm_domain_iterator_cleanup(vi); 3366520495aSAdrian Chadd } 3376520495aSAdrian Chadd 338449c2e92SKonstantin Belousov boolean_t 339449c2e92SKonstantin Belousov vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) 340449c2e92SKonstantin Belousov { 341449c2e92SKonstantin Belousov struct vm_phys_seg *s; 342449c2e92SKonstantin Belousov int idx; 343449c2e92SKonstantin Belousov 344449c2e92SKonstantin Belousov while ((idx = ffsl(mask)) != 0) { 345449c2e92SKonstantin Belousov idx--; /* ffsl counts from 1 */ 346449c2e92SKonstantin Belousov mask &= ~(1UL << idx); 347449c2e92SKonstantin Belousov s = &vm_phys_segs[idx]; 348449c2e92SKonstantin Belousov if (low < s->end && high > s->start) 349449c2e92SKonstantin Belousov return (TRUE); 350449c2e92SKonstantin Belousov } 351449c2e92SKonstantin Belousov return (FALSE); 352449c2e92SKonstantin Belousov } 353449c2e92SKonstantin Belousov 35411752d88SAlan Cox /* 35511752d88SAlan Cox * Outputs the state of the physical memory allocator, specifically, 35611752d88SAlan Cox * the amount of physical memory in each free list. 35711752d88SAlan Cox */ 35811752d88SAlan Cox static int 35911752d88SAlan Cox sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 36011752d88SAlan Cox { 36111752d88SAlan Cox struct sbuf sbuf; 36211752d88SAlan Cox struct vm_freelist *fl; 3637e226537SAttilio Rao int dom, error, flind, oind, pind; 36411752d88SAlan Cox 36500f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 36600f0e671SMatthew D Fleming if (error != 0) 36700f0e671SMatthew D Fleming return (error); 3687e226537SAttilio Rao sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 3697e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 370eb2f42fbSAlan Cox sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 37111752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 372eb2f42fbSAlan Cox sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 37311752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 37411752d88SAlan Cox "\n ", flind); 37511752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 37611752d88SAlan Cox sbuf_printf(&sbuf, " | POOL %d", pind); 37711752d88SAlan Cox sbuf_printf(&sbuf, "\n-- "); 37811752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 37911752d88SAlan Cox sbuf_printf(&sbuf, "-- -- "); 38011752d88SAlan Cox sbuf_printf(&sbuf, "--\n"); 38111752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 382d689bc00SAlan Cox sbuf_printf(&sbuf, " %2d (%6dK)", oind, 38311752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 38411752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 3857e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 386eb2f42fbSAlan Cox sbuf_printf(&sbuf, " | %6d", 3877e226537SAttilio Rao fl[oind].lcnt); 38811752d88SAlan Cox } 38911752d88SAlan Cox sbuf_printf(&sbuf, "\n"); 39011752d88SAlan Cox } 3917e226537SAttilio Rao } 39211752d88SAlan Cox } 3934e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 39411752d88SAlan Cox sbuf_delete(&sbuf); 39511752d88SAlan Cox return (error); 39611752d88SAlan Cox } 39711752d88SAlan Cox 39811752d88SAlan Cox /* 39911752d88SAlan Cox * Outputs the set of physical memory segments. 40011752d88SAlan Cox */ 40111752d88SAlan Cox static int 40211752d88SAlan Cox sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 40311752d88SAlan Cox { 40411752d88SAlan Cox struct sbuf sbuf; 40511752d88SAlan Cox struct vm_phys_seg *seg; 40611752d88SAlan Cox int error, segind; 40711752d88SAlan Cox 40800f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 40900f0e671SMatthew D Fleming if (error != 0) 41000f0e671SMatthew D Fleming return (error); 4114e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 41211752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 41311752d88SAlan Cox sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 41411752d88SAlan Cox seg = &vm_phys_segs[segind]; 41511752d88SAlan Cox sbuf_printf(&sbuf, "start: %#jx\n", 41611752d88SAlan Cox (uintmax_t)seg->start); 41711752d88SAlan Cox sbuf_printf(&sbuf, "end: %#jx\n", 41811752d88SAlan Cox (uintmax_t)seg->end); 419a3870a18SJohn Baldwin sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 42011752d88SAlan Cox sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 42111752d88SAlan Cox } 4224e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 42311752d88SAlan Cox sbuf_delete(&sbuf); 42411752d88SAlan Cox return (error); 42511752d88SAlan Cox } 42611752d88SAlan Cox 427415d7ccaSAdrian Chadd /* 428415d7ccaSAdrian Chadd * Return affinity, or -1 if there's no affinity information. 429415d7ccaSAdrian Chadd */ 4306520495aSAdrian Chadd int 431415d7ccaSAdrian Chadd vm_phys_mem_affinity(int f, int t) 432415d7ccaSAdrian Chadd { 433415d7ccaSAdrian Chadd 43462d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 435415d7ccaSAdrian Chadd if (mem_locality == NULL) 436415d7ccaSAdrian Chadd return (-1); 437415d7ccaSAdrian Chadd if (f >= vm_ndomains || t >= vm_ndomains) 438415d7ccaSAdrian Chadd return (-1); 439415d7ccaSAdrian Chadd return (mem_locality[f * vm_ndomains + t]); 4406520495aSAdrian Chadd #else 4416520495aSAdrian Chadd return (-1); 4426520495aSAdrian Chadd #endif 443415d7ccaSAdrian Chadd } 444415d7ccaSAdrian Chadd 44562d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 446415d7ccaSAdrian Chadd /* 447415d7ccaSAdrian Chadd * Outputs the VM locality table. 448415d7ccaSAdrian Chadd */ 449415d7ccaSAdrian Chadd static int 450415d7ccaSAdrian Chadd sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 451415d7ccaSAdrian Chadd { 452415d7ccaSAdrian Chadd struct sbuf sbuf; 453415d7ccaSAdrian Chadd int error, i, j; 454415d7ccaSAdrian Chadd 455415d7ccaSAdrian Chadd error = sysctl_wire_old_buffer(req, 0); 456415d7ccaSAdrian Chadd if (error != 0) 457415d7ccaSAdrian Chadd return (error); 458415d7ccaSAdrian Chadd sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 459415d7ccaSAdrian Chadd 460415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 461415d7ccaSAdrian Chadd 462415d7ccaSAdrian Chadd for (i = 0; i < vm_ndomains; i++) { 463415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d: ", i); 464415d7ccaSAdrian Chadd for (j = 0; j < vm_ndomains; j++) { 465415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 466415d7ccaSAdrian Chadd } 467415d7ccaSAdrian Chadd sbuf_printf(&sbuf, "\n"); 468415d7ccaSAdrian Chadd } 469415d7ccaSAdrian Chadd error = sbuf_finish(&sbuf); 470415d7ccaSAdrian Chadd sbuf_delete(&sbuf); 471415d7ccaSAdrian Chadd return (error); 472415d7ccaSAdrian Chadd } 4736520495aSAdrian Chadd #endif 474415d7ccaSAdrian Chadd 4757e226537SAttilio Rao static void 4767e226537SAttilio Rao vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 477a3870a18SJohn Baldwin { 478a3870a18SJohn Baldwin 4797e226537SAttilio Rao m->order = order; 4807e226537SAttilio Rao if (tail) 481c325e866SKonstantin Belousov TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); 4827e226537SAttilio Rao else 483c325e866SKonstantin Belousov TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); 4847e226537SAttilio Rao fl[order].lcnt++; 485a3870a18SJohn Baldwin } 4867e226537SAttilio Rao 4877e226537SAttilio Rao static void 4887e226537SAttilio Rao vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 4897e226537SAttilio Rao { 4907e226537SAttilio Rao 491c325e866SKonstantin Belousov TAILQ_REMOVE(&fl[order].pl, m, plinks.q); 4927e226537SAttilio Rao fl[order].lcnt--; 4937e226537SAttilio Rao m->order = VM_NFREEORDER; 494a3870a18SJohn Baldwin } 495a3870a18SJohn Baldwin 49611752d88SAlan Cox /* 49711752d88SAlan Cox * Create a physical memory segment. 49811752d88SAlan Cox */ 49911752d88SAlan Cox static void 500d866a563SAlan Cox _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 50111752d88SAlan Cox { 50211752d88SAlan Cox struct vm_phys_seg *seg; 50311752d88SAlan Cox 50411752d88SAlan Cox KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 50511752d88SAlan Cox ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 5067e226537SAttilio Rao KASSERT(domain < vm_ndomains, 5077e226537SAttilio Rao ("vm_phys_create_seg: invalid domain provided")); 50811752d88SAlan Cox seg = &vm_phys_segs[vm_phys_nsegs++]; 509271f0f12SAlan Cox while (seg > vm_phys_segs && (seg - 1)->start >= end) { 510271f0f12SAlan Cox *seg = *(seg - 1); 511271f0f12SAlan Cox seg--; 512271f0f12SAlan Cox } 51311752d88SAlan Cox seg->start = start; 51411752d88SAlan Cox seg->end = end; 515a3870a18SJohn Baldwin seg->domain = domain; 51611752d88SAlan Cox } 51711752d88SAlan Cox 518a3870a18SJohn Baldwin static void 519d866a563SAlan Cox vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 520a3870a18SJohn Baldwin { 52162d70a81SJohn Baldwin #ifdef VM_NUMA_ALLOC 522a3870a18SJohn Baldwin int i; 523a3870a18SJohn Baldwin 524a3870a18SJohn Baldwin if (mem_affinity == NULL) { 525d866a563SAlan Cox _vm_phys_create_seg(start, end, 0); 526a3870a18SJohn Baldwin return; 527a3870a18SJohn Baldwin } 528a3870a18SJohn Baldwin 529a3870a18SJohn Baldwin for (i = 0;; i++) { 530a3870a18SJohn Baldwin if (mem_affinity[i].end == 0) 531a3870a18SJohn Baldwin panic("Reached end of affinity info"); 532a3870a18SJohn Baldwin if (mem_affinity[i].end <= start) 533a3870a18SJohn Baldwin continue; 534a3870a18SJohn Baldwin if (mem_affinity[i].start > start) 535a3870a18SJohn Baldwin panic("No affinity info for start %jx", 536a3870a18SJohn Baldwin (uintmax_t)start); 537a3870a18SJohn Baldwin if (mem_affinity[i].end >= end) { 538d866a563SAlan Cox _vm_phys_create_seg(start, end, 539a3870a18SJohn Baldwin mem_affinity[i].domain); 540a3870a18SJohn Baldwin break; 541a3870a18SJohn Baldwin } 542d866a563SAlan Cox _vm_phys_create_seg(start, mem_affinity[i].end, 543a3870a18SJohn Baldwin mem_affinity[i].domain); 544a3870a18SJohn Baldwin start = mem_affinity[i].end; 545a3870a18SJohn Baldwin } 54662d70a81SJohn Baldwin #else 54762d70a81SJohn Baldwin _vm_phys_create_seg(start, end, 0); 54862d70a81SJohn Baldwin #endif 549a3870a18SJohn Baldwin } 550a3870a18SJohn Baldwin 55111752d88SAlan Cox /* 552271f0f12SAlan Cox * Add a physical memory segment. 553271f0f12SAlan Cox */ 554271f0f12SAlan Cox void 555271f0f12SAlan Cox vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 556271f0f12SAlan Cox { 557d866a563SAlan Cox vm_paddr_t paddr; 558271f0f12SAlan Cox 559271f0f12SAlan Cox KASSERT((start & PAGE_MASK) == 0, 560271f0f12SAlan Cox ("vm_phys_define_seg: start is not page aligned")); 561271f0f12SAlan Cox KASSERT((end & PAGE_MASK) == 0, 562271f0f12SAlan Cox ("vm_phys_define_seg: end is not page aligned")); 563d866a563SAlan Cox 564d866a563SAlan Cox /* 565d866a563SAlan Cox * Split the physical memory segment if it spans two or more free 566d866a563SAlan Cox * list boundaries. 567d866a563SAlan Cox */ 568d866a563SAlan Cox paddr = start; 569271f0f12SAlan Cox #ifdef VM_FREELIST_ISADMA 570d866a563SAlan Cox if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { 571d866a563SAlan Cox vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); 572d866a563SAlan Cox paddr = VM_ISADMA_BOUNDARY; 573d866a563SAlan Cox } 574271f0f12SAlan Cox #endif 575d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 576d866a563SAlan Cox if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 577d866a563SAlan Cox vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 578d866a563SAlan Cox paddr = VM_LOWMEM_BOUNDARY; 579d866a563SAlan Cox } 580271f0f12SAlan Cox #endif 581d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 582d866a563SAlan Cox if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 583d866a563SAlan Cox vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 584d866a563SAlan Cox paddr = VM_DMA32_BOUNDARY; 585d866a563SAlan Cox } 586d866a563SAlan Cox #endif 587d866a563SAlan Cox vm_phys_create_seg(paddr, end); 588271f0f12SAlan Cox } 589271f0f12SAlan Cox 590271f0f12SAlan Cox /* 59111752d88SAlan Cox * Initialize the physical memory allocator. 592d866a563SAlan Cox * 593d866a563SAlan Cox * Requires that vm_page_array is initialized! 59411752d88SAlan Cox */ 59511752d88SAlan Cox void 59611752d88SAlan Cox vm_phys_init(void) 59711752d88SAlan Cox { 59811752d88SAlan Cox struct vm_freelist *fl; 599271f0f12SAlan Cox struct vm_phys_seg *seg; 600d866a563SAlan Cox u_long npages; 601d866a563SAlan Cox int dom, flind, freelist, oind, pind, segind; 60211752d88SAlan Cox 603d866a563SAlan Cox /* 604d866a563SAlan Cox * Compute the number of free lists, and generate the mapping from the 605d866a563SAlan Cox * manifest constants VM_FREELIST_* to the free list indices. 606d866a563SAlan Cox * 607d866a563SAlan Cox * Initially, the entries of vm_freelist_to_flind[] are set to either 608d866a563SAlan Cox * 0 or 1 to indicate which free lists should be created. 609d866a563SAlan Cox */ 610d866a563SAlan Cox npages = 0; 611d866a563SAlan Cox for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 612d866a563SAlan Cox seg = &vm_phys_segs[segind]; 613d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 614d866a563SAlan Cox if (seg->end <= VM_ISADMA_BOUNDARY) 615d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; 616d866a563SAlan Cox else 617d866a563SAlan Cox #endif 618d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 619d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) 620d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 621d866a563SAlan Cox else 622d866a563SAlan Cox #endif 623d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 624d866a563SAlan Cox if ( 625d866a563SAlan Cox #ifdef VM_DMA32_NPAGES_THRESHOLD 626d866a563SAlan Cox /* 627d866a563SAlan Cox * Create the DMA32 free list only if the amount of 628d866a563SAlan Cox * physical memory above physical address 4G exceeds the 629d866a563SAlan Cox * given threshold. 630d866a563SAlan Cox */ 631d866a563SAlan Cox npages > VM_DMA32_NPAGES_THRESHOLD && 632d866a563SAlan Cox #endif 633d866a563SAlan Cox seg->end <= VM_DMA32_BOUNDARY) 634d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 635d866a563SAlan Cox else 636d866a563SAlan Cox #endif 637d866a563SAlan Cox { 638d866a563SAlan Cox npages += atop(seg->end - seg->start); 639d866a563SAlan Cox vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 640d866a563SAlan Cox } 641d866a563SAlan Cox } 642d866a563SAlan Cox /* Change each entry into a running total of the free lists. */ 643d866a563SAlan Cox for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 644d866a563SAlan Cox vm_freelist_to_flind[freelist] += 645d866a563SAlan Cox vm_freelist_to_flind[freelist - 1]; 646d866a563SAlan Cox } 647d866a563SAlan Cox vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 648d866a563SAlan Cox KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 649d866a563SAlan Cox /* Change each entry into a free list index. */ 650d866a563SAlan Cox for (freelist = 0; freelist < VM_NFREELIST; freelist++) 651d866a563SAlan Cox vm_freelist_to_flind[freelist]--; 652d866a563SAlan Cox 653d866a563SAlan Cox /* 654d866a563SAlan Cox * Initialize the first_page and free_queues fields of each physical 655d866a563SAlan Cox * memory segment. 656d866a563SAlan Cox */ 657271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 658d866a563SAlan Cox npages = 0; 65911752d88SAlan Cox #endif 660271f0f12SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 661271f0f12SAlan Cox seg = &vm_phys_segs[segind]; 662271f0f12SAlan Cox #ifdef VM_PHYSSEG_SPARSE 663d866a563SAlan Cox seg->first_page = &vm_page_array[npages]; 664d866a563SAlan Cox npages += atop(seg->end - seg->start); 665271f0f12SAlan Cox #else 666271f0f12SAlan Cox seg->first_page = PHYS_TO_VM_PAGE(seg->start); 66711752d88SAlan Cox #endif 668d866a563SAlan Cox #ifdef VM_FREELIST_ISADMA 669d866a563SAlan Cox if (seg->end <= VM_ISADMA_BOUNDARY) { 670d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; 671d866a563SAlan Cox KASSERT(flind >= 0, 672d866a563SAlan Cox ("vm_phys_init: ISADMA flind < 0")); 673d866a563SAlan Cox } else 674d866a563SAlan Cox #endif 675d866a563SAlan Cox #ifdef VM_FREELIST_LOWMEM 676d866a563SAlan Cox if (seg->end <= VM_LOWMEM_BOUNDARY) { 677d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 678d866a563SAlan Cox KASSERT(flind >= 0, 679d866a563SAlan Cox ("vm_phys_init: LOWMEM flind < 0")); 680d866a563SAlan Cox } else 681d866a563SAlan Cox #endif 682d866a563SAlan Cox #ifdef VM_FREELIST_DMA32 683d866a563SAlan Cox if (seg->end <= VM_DMA32_BOUNDARY) { 684d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 685d866a563SAlan Cox KASSERT(flind >= 0, 686d866a563SAlan Cox ("vm_phys_init: DMA32 flind < 0")); 687d866a563SAlan Cox } else 688d866a563SAlan Cox #endif 689d866a563SAlan Cox { 690d866a563SAlan Cox flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 691d866a563SAlan Cox KASSERT(flind >= 0, 692d866a563SAlan Cox ("vm_phys_init: DEFAULT flind < 0")); 69311752d88SAlan Cox } 694d866a563SAlan Cox seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 695d866a563SAlan Cox } 696d866a563SAlan Cox 697d866a563SAlan Cox /* 698d866a563SAlan Cox * Initialize the free queues. 699d866a563SAlan Cox */ 7007e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 70111752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 70211752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 7037e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 70411752d88SAlan Cox for (oind = 0; oind < VM_NFREEORDER; oind++) 70511752d88SAlan Cox TAILQ_INIT(&fl[oind].pl); 70611752d88SAlan Cox } 70711752d88SAlan Cox } 708a3870a18SJohn Baldwin } 709d866a563SAlan Cox 71038d6b2dcSRoger Pau Monné rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 71111752d88SAlan Cox } 71211752d88SAlan Cox 71311752d88SAlan Cox /* 71411752d88SAlan Cox * Split a contiguous, power of two-sized set of physical pages. 71511752d88SAlan Cox */ 71611752d88SAlan Cox static __inline void 71711752d88SAlan Cox vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 71811752d88SAlan Cox { 71911752d88SAlan Cox vm_page_t m_buddy; 72011752d88SAlan Cox 72111752d88SAlan Cox while (oind > order) { 72211752d88SAlan Cox oind--; 72311752d88SAlan Cox m_buddy = &m[1 << oind]; 72411752d88SAlan Cox KASSERT(m_buddy->order == VM_NFREEORDER, 72511752d88SAlan Cox ("vm_phys_split_pages: page %p has unexpected order %d", 72611752d88SAlan Cox m_buddy, m_buddy->order)); 7277e226537SAttilio Rao vm_freelist_add(fl, m_buddy, oind, 0); 72811752d88SAlan Cox } 72911752d88SAlan Cox } 73011752d88SAlan Cox 73111752d88SAlan Cox /* 732*f93f7cf1SMark Johnston * Initialize a physical page in preparation for adding it to the free 733*f93f7cf1SMark Johnston * lists. 73411752d88SAlan Cox */ 73511752d88SAlan Cox void 736*f93f7cf1SMark Johnston vm_phys_init_page(vm_paddr_t pa) 73711752d88SAlan Cox { 73811752d88SAlan Cox vm_page_t m; 73911752d88SAlan Cox 74011752d88SAlan Cox m = vm_phys_paddr_to_vm_page(pa); 741*f93f7cf1SMark Johnston m->object = NULL; 742*f93f7cf1SMark Johnston m->wire_count = 0; 743fc85a6f0SMark Johnston m->busy_lock = VPB_UNBUSIED; 744*f93f7cf1SMark Johnston m->hold_count = 0; 745*f93f7cf1SMark Johnston m->flags = m->aflags = m->oflags = 0; 74611752d88SAlan Cox m->phys_addr = pa; 74744e46b9eSAlan Cox m->queue = PQ_NONE; 748*f93f7cf1SMark Johnston m->psind = 0; 74911752d88SAlan Cox m->segind = vm_phys_paddr_to_segind(pa); 750*f93f7cf1SMark Johnston m->order = VM_NFREEORDER; 75111752d88SAlan Cox m->pool = VM_FREEPOOL_DEFAULT; 752*f93f7cf1SMark Johnston m->valid = m->dirty = 0; 75311752d88SAlan Cox pmap_page_init(m); 75411752d88SAlan Cox } 75511752d88SAlan Cox 75611752d88SAlan Cox /* 75711752d88SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages 75811752d88SAlan Cox * from the free lists. 7598941dc44SAlan Cox * 7608941dc44SAlan Cox * The free page queues must be locked. 76111752d88SAlan Cox */ 76211752d88SAlan Cox vm_page_t 76311752d88SAlan Cox vm_phys_alloc_pages(int pool, int order) 76411752d88SAlan Cox { 76549ca10d4SJayachandran C. vm_page_t m; 7666520495aSAdrian Chadd int domain, flind; 7676520495aSAdrian Chadd struct vm_domain_iterator vi; 76849ca10d4SJayachandran C. 769f5c4b077SJohn Baldwin KASSERT(pool < VM_NFREEPOOL, 770f5c4b077SJohn Baldwin ("vm_phys_alloc_pages: pool %d is out of range", pool)); 771f5c4b077SJohn Baldwin KASSERT(order < VM_NFREEORDER, 772f5c4b077SJohn Baldwin ("vm_phys_alloc_pages: order %d is out of range", order)); 773f5c4b077SJohn Baldwin 7746520495aSAdrian Chadd vm_policy_iterator_init(&vi); 7756520495aSAdrian Chadd 7766520495aSAdrian Chadd while ((vm_domain_iterator_run(&vi, &domain)) == 0) { 77749ca10d4SJayachandran C. for (flind = 0; flind < vm_nfreelists; flind++) { 7787e226537SAttilio Rao m = vm_phys_alloc_domain_pages(domain, flind, pool, 7797e226537SAttilio Rao order); 78049ca10d4SJayachandran C. if (m != NULL) 78149ca10d4SJayachandran C. return (m); 78249ca10d4SJayachandran C. } 7837e226537SAttilio Rao } 7846520495aSAdrian Chadd 7856520495aSAdrian Chadd vm_policy_iterator_finish(&vi); 78649ca10d4SJayachandran C. return (NULL); 78749ca10d4SJayachandran C. } 78849ca10d4SJayachandran C. 78949ca10d4SJayachandran C. /* 790d866a563SAlan Cox * Allocate a contiguous, power of two-sized set of physical pages from the 791d866a563SAlan Cox * specified free list. The free list must be specified using one of the 792d866a563SAlan Cox * manifest constants VM_FREELIST_*. 793d866a563SAlan Cox * 794d866a563SAlan Cox * The free page queues must be locked. 79549ca10d4SJayachandran C. */ 79649ca10d4SJayachandran C. vm_page_t 797d866a563SAlan Cox vm_phys_alloc_freelist_pages(int freelist, int pool, int order) 79849ca10d4SJayachandran C. { 79911752d88SAlan Cox vm_page_t m; 8006520495aSAdrian Chadd struct vm_domain_iterator vi; 8016520495aSAdrian Chadd int domain; 80211752d88SAlan Cox 803d866a563SAlan Cox KASSERT(freelist < VM_NFREELIST, 804d866a563SAlan Cox ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 805d866a563SAlan Cox freelist)); 80611752d88SAlan Cox KASSERT(pool < VM_NFREEPOOL, 80749ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 80811752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 80949ca10d4SJayachandran C. ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 8106520495aSAdrian Chadd 8116520495aSAdrian Chadd vm_policy_iterator_init(&vi); 8126520495aSAdrian Chadd 8136520495aSAdrian Chadd while ((vm_domain_iterator_run(&vi, &domain)) == 0) { 814d866a563SAlan Cox m = vm_phys_alloc_domain_pages(domain, 815d866a563SAlan Cox vm_freelist_to_flind[freelist], pool, order); 816f5c4b077SJohn Baldwin if (m != NULL) 817f5c4b077SJohn Baldwin return (m); 8187e226537SAttilio Rao } 8196520495aSAdrian Chadd 8206520495aSAdrian Chadd vm_policy_iterator_finish(&vi); 8217e226537SAttilio Rao return (NULL); 822f5c4b077SJohn Baldwin } 823f5c4b077SJohn Baldwin 824f5c4b077SJohn Baldwin static vm_page_t 825f5c4b077SJohn Baldwin vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 826f5c4b077SJohn Baldwin { 827f5c4b077SJohn Baldwin struct vm_freelist *fl; 828f5c4b077SJohn Baldwin struct vm_freelist *alt; 829f5c4b077SJohn Baldwin int oind, pind; 830f5c4b077SJohn Baldwin vm_page_t m; 831f5c4b077SJohn Baldwin 83211752d88SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 8337e226537SAttilio Rao fl = &vm_phys_free_queues[domain][flind][pool][0]; 83411752d88SAlan Cox for (oind = order; oind < VM_NFREEORDER; oind++) { 83511752d88SAlan Cox m = TAILQ_FIRST(&fl[oind].pl); 83611752d88SAlan Cox if (m != NULL) { 8377e226537SAttilio Rao vm_freelist_rem(fl, m, oind); 83811752d88SAlan Cox vm_phys_split_pages(m, oind, fl, order); 83911752d88SAlan Cox return (m); 84011752d88SAlan Cox } 84111752d88SAlan Cox } 84211752d88SAlan Cox 84311752d88SAlan Cox /* 84411752d88SAlan Cox * The given pool was empty. Find the largest 84511752d88SAlan Cox * contiguous, power-of-two-sized set of pages in any 84611752d88SAlan Cox * pool. Transfer these pages to the given pool, and 84711752d88SAlan Cox * use them to satisfy the allocation. 84811752d88SAlan Cox */ 84911752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 85011752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 8517e226537SAttilio Rao alt = &vm_phys_free_queues[domain][flind][pind][0]; 85211752d88SAlan Cox m = TAILQ_FIRST(&alt[oind].pl); 85311752d88SAlan Cox if (m != NULL) { 8547e226537SAttilio Rao vm_freelist_rem(alt, m, oind); 85511752d88SAlan Cox vm_phys_set_pool(pool, m, oind); 85611752d88SAlan Cox vm_phys_split_pages(m, oind, fl, order); 85711752d88SAlan Cox return (m); 85811752d88SAlan Cox } 85911752d88SAlan Cox } 86011752d88SAlan Cox } 86111752d88SAlan Cox return (NULL); 86211752d88SAlan Cox } 86311752d88SAlan Cox 86411752d88SAlan Cox /* 86511752d88SAlan Cox * Find the vm_page corresponding to the given physical address. 86611752d88SAlan Cox */ 86711752d88SAlan Cox vm_page_t 86811752d88SAlan Cox vm_phys_paddr_to_vm_page(vm_paddr_t pa) 86911752d88SAlan Cox { 87011752d88SAlan Cox struct vm_phys_seg *seg; 87111752d88SAlan Cox int segind; 87211752d88SAlan Cox 87311752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 87411752d88SAlan Cox seg = &vm_phys_segs[segind]; 87511752d88SAlan Cox if (pa >= seg->start && pa < seg->end) 87611752d88SAlan Cox return (&seg->first_page[atop(pa - seg->start)]); 87711752d88SAlan Cox } 878f06a3a36SAndrew Thompson return (NULL); 87911752d88SAlan Cox } 88011752d88SAlan Cox 881b6de32bdSKonstantin Belousov vm_page_t 882b6de32bdSKonstantin Belousov vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 883b6de32bdSKonstantin Belousov { 88438d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg tmp, *seg; 885b6de32bdSKonstantin Belousov vm_page_t m; 886b6de32bdSKonstantin Belousov 887b6de32bdSKonstantin Belousov m = NULL; 88838d6b2dcSRoger Pau Monné tmp.start = pa; 88938d6b2dcSRoger Pau Monné tmp.end = 0; 89038d6b2dcSRoger Pau Monné 89138d6b2dcSRoger Pau Monné rw_rlock(&vm_phys_fictitious_reg_lock); 89238d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 89338d6b2dcSRoger Pau Monné rw_runlock(&vm_phys_fictitious_reg_lock); 89438d6b2dcSRoger Pau Monné if (seg == NULL) 89538d6b2dcSRoger Pau Monné return (NULL); 89638d6b2dcSRoger Pau Monné 897b6de32bdSKonstantin Belousov m = &seg->first_page[atop(pa - seg->start)]; 89838d6b2dcSRoger Pau Monné KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 89938d6b2dcSRoger Pau Monné 900b6de32bdSKonstantin Belousov return (m); 901b6de32bdSKonstantin Belousov } 902b6de32bdSKonstantin Belousov 9035ebe728dSRoger Pau Monné static inline void 9045ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 9055ebe728dSRoger Pau Monné long page_count, vm_memattr_t memattr) 9065ebe728dSRoger Pau Monné { 9075ebe728dSRoger Pau Monné long i; 9085ebe728dSRoger Pau Monné 909*f93f7cf1SMark Johnston bzero(range, page_count * sizeof(*range)); 9105ebe728dSRoger Pau Monné for (i = 0; i < page_count; i++) { 9115ebe728dSRoger Pau Monné vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 9125ebe728dSRoger Pau Monné range[i].oflags &= ~VPO_UNMANAGED; 9135ebe728dSRoger Pau Monné range[i].busy_lock = VPB_UNBUSIED; 9145ebe728dSRoger Pau Monné } 9155ebe728dSRoger Pau Monné } 9165ebe728dSRoger Pau Monné 917b6de32bdSKonstantin Belousov int 918b6de32bdSKonstantin Belousov vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 919b6de32bdSKonstantin Belousov vm_memattr_t memattr) 920b6de32bdSKonstantin Belousov { 921b6de32bdSKonstantin Belousov struct vm_phys_fictitious_seg *seg; 922b6de32bdSKonstantin Belousov vm_page_t fp; 9235ebe728dSRoger Pau Monné long page_count; 924b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 9255ebe728dSRoger Pau Monné long pi, pe; 9265ebe728dSRoger Pau Monné long dpage_count; 927b6de32bdSKonstantin Belousov #endif 928b6de32bdSKonstantin Belousov 9295ebe728dSRoger Pau Monné KASSERT(start < end, 9305ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 9315ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 9325ebe728dSRoger Pau Monné 933b6de32bdSKonstantin Belousov page_count = (end - start) / PAGE_SIZE; 934b6de32bdSKonstantin Belousov 935b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 936b6de32bdSKonstantin Belousov pi = atop(start); 9375ebe728dSRoger Pau Monné pe = atop(end); 9385ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 939b6de32bdSKonstantin Belousov fp = &vm_page_array[pi - first_page]; 9405ebe728dSRoger Pau Monné if ((pe - first_page) > vm_page_array_size) { 9415ebe728dSRoger Pau Monné /* 9425ebe728dSRoger Pau Monné * We have a segment that starts inside 9435ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 9445ebe728dSRoger Pau Monné * 9455ebe728dSRoger Pau Monné * Use vm_page_array pages for those that are 9465ebe728dSRoger Pau Monné * inside of the vm_page_array range, and 9475ebe728dSRoger Pau Monné * allocate the remaining ones. 9485ebe728dSRoger Pau Monné */ 9495ebe728dSRoger Pau Monné dpage_count = vm_page_array_size - (pi - first_page); 9505ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, dpage_count, 9515ebe728dSRoger Pau Monné memattr); 9525ebe728dSRoger Pau Monné page_count -= dpage_count; 9535ebe728dSRoger Pau Monné start += ptoa(dpage_count); 9545ebe728dSRoger Pau Monné goto alloc; 9555ebe728dSRoger Pau Monné } 9565ebe728dSRoger Pau Monné /* 9575ebe728dSRoger Pau Monné * We can allocate the full range from vm_page_array, 9585ebe728dSRoger Pau Monné * so there's no need to register the range in the tree. 9595ebe728dSRoger Pau Monné */ 9605ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 9615ebe728dSRoger Pau Monné return (0); 9625ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 9635ebe728dSRoger Pau Monné /* 9645ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 9655ebe728dSRoger Pau Monné * but starts outside of it. 9665ebe728dSRoger Pau Monné */ 9675ebe728dSRoger Pau Monné fp = &vm_page_array[0]; 9685ebe728dSRoger Pau Monné dpage_count = pe - first_page; 9695ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 9705ebe728dSRoger Pau Monné memattr); 9715ebe728dSRoger Pau Monné end -= ptoa(dpage_count); 9725ebe728dSRoger Pau Monné page_count -= dpage_count; 9735ebe728dSRoger Pau Monné goto alloc; 9745ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 9755ebe728dSRoger Pau Monné /* 9765ebe728dSRoger Pau Monné * Trying to register a fictitious range that expands before 9775ebe728dSRoger Pau Monné * and after vm_page_array. 9785ebe728dSRoger Pau Monné */ 9795ebe728dSRoger Pau Monné return (EINVAL); 9805ebe728dSRoger Pau Monné } else { 9815ebe728dSRoger Pau Monné alloc: 982b6de32bdSKonstantin Belousov #endif 983b6de32bdSKonstantin Belousov fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 984*f93f7cf1SMark Johnston M_WAITOK); 9855ebe728dSRoger Pau Monné #ifdef VM_PHYSSEG_DENSE 986b6de32bdSKonstantin Belousov } 9875ebe728dSRoger Pau Monné #endif 9885ebe728dSRoger Pau Monné vm_phys_fictitious_init_range(fp, start, page_count, memattr); 98938d6b2dcSRoger Pau Monné 99038d6b2dcSRoger Pau Monné seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 991b6de32bdSKonstantin Belousov seg->start = start; 992b6de32bdSKonstantin Belousov seg->end = end; 993b6de32bdSKonstantin Belousov seg->first_page = fp; 99438d6b2dcSRoger Pau Monné 99538d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 99638d6b2dcSRoger Pau Monné RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 99738d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 99838d6b2dcSRoger Pau Monné 999b6de32bdSKonstantin Belousov return (0); 1000b6de32bdSKonstantin Belousov } 1001b6de32bdSKonstantin Belousov 1002b6de32bdSKonstantin Belousov void 1003b6de32bdSKonstantin Belousov vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 1004b6de32bdSKonstantin Belousov { 100538d6b2dcSRoger Pau Monné struct vm_phys_fictitious_seg *seg, tmp; 1006b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 10075ebe728dSRoger Pau Monné long pi, pe; 1008b6de32bdSKonstantin Belousov #endif 1009b6de32bdSKonstantin Belousov 10105ebe728dSRoger Pau Monné KASSERT(start < end, 10115ebe728dSRoger Pau Monné ("Start of segment isn't less than end (start: %jx end: %jx)", 10125ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end)); 10135ebe728dSRoger Pau Monné 1014b6de32bdSKonstantin Belousov #ifdef VM_PHYSSEG_DENSE 1015b6de32bdSKonstantin Belousov pi = atop(start); 10165ebe728dSRoger Pau Monné pe = atop(end); 10175ebe728dSRoger Pau Monné if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 10185ebe728dSRoger Pau Monné if ((pe - first_page) <= vm_page_array_size) { 10195ebe728dSRoger Pau Monné /* 10205ebe728dSRoger Pau Monné * This segment was allocated using vm_page_array 10215ebe728dSRoger Pau Monné * only, there's nothing to do since those pages 10225ebe728dSRoger Pau Monné * were never added to the tree. 10235ebe728dSRoger Pau Monné */ 10245ebe728dSRoger Pau Monné return; 10255ebe728dSRoger Pau Monné } 10265ebe728dSRoger Pau Monné /* 10275ebe728dSRoger Pau Monné * We have a segment that starts inside 10285ebe728dSRoger Pau Monné * of vm_page_array, but ends outside of it. 10295ebe728dSRoger Pau Monné * 10305ebe728dSRoger Pau Monné * Calculate how many pages were added to the 10315ebe728dSRoger Pau Monné * tree and free them. 10325ebe728dSRoger Pau Monné */ 10335ebe728dSRoger Pau Monné start = ptoa(first_page + vm_page_array_size); 10345ebe728dSRoger Pau Monné } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 10355ebe728dSRoger Pau Monné /* 10365ebe728dSRoger Pau Monné * We have a segment that ends inside of vm_page_array, 10375ebe728dSRoger Pau Monné * but starts outside of it. 10385ebe728dSRoger Pau Monné */ 10395ebe728dSRoger Pau Monné end = ptoa(first_page); 10405ebe728dSRoger Pau Monné } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 10415ebe728dSRoger Pau Monné /* Since it's not possible to register such a range, panic. */ 10425ebe728dSRoger Pau Monné panic( 10435ebe728dSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 10445ebe728dSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 10455ebe728dSRoger Pau Monné } 1046b6de32bdSKonstantin Belousov #endif 104738d6b2dcSRoger Pau Monné tmp.start = start; 104838d6b2dcSRoger Pau Monné tmp.end = 0; 1049b6de32bdSKonstantin Belousov 105038d6b2dcSRoger Pau Monné rw_wlock(&vm_phys_fictitious_reg_lock); 105138d6b2dcSRoger Pau Monné seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 105238d6b2dcSRoger Pau Monné if (seg->start != start || seg->end != end) { 105338d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 105438d6b2dcSRoger Pau Monné panic( 105538d6b2dcSRoger Pau Monné "Unregistering not registered fictitious range [%#jx:%#jx]", 105638d6b2dcSRoger Pau Monné (uintmax_t)start, (uintmax_t)end); 105738d6b2dcSRoger Pau Monné } 105838d6b2dcSRoger Pau Monné RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 105938d6b2dcSRoger Pau Monné rw_wunlock(&vm_phys_fictitious_reg_lock); 106038d6b2dcSRoger Pau Monné free(seg->first_page, M_FICT_PAGES); 106138d6b2dcSRoger Pau Monné free(seg, M_FICT_PAGES); 1062b6de32bdSKonstantin Belousov } 1063b6de32bdSKonstantin Belousov 106411752d88SAlan Cox /* 106511752d88SAlan Cox * Find the segment containing the given physical address. 106611752d88SAlan Cox */ 106711752d88SAlan Cox static int 106811752d88SAlan Cox vm_phys_paddr_to_segind(vm_paddr_t pa) 106911752d88SAlan Cox { 107011752d88SAlan Cox struct vm_phys_seg *seg; 107111752d88SAlan Cox int segind; 107211752d88SAlan Cox 107311752d88SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 107411752d88SAlan Cox seg = &vm_phys_segs[segind]; 107511752d88SAlan Cox if (pa >= seg->start && pa < seg->end) 107611752d88SAlan Cox return (segind); 107711752d88SAlan Cox } 107811752d88SAlan Cox panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 107911752d88SAlan Cox (uintmax_t)pa); 108011752d88SAlan Cox } 108111752d88SAlan Cox 108211752d88SAlan Cox /* 108311752d88SAlan Cox * Free a contiguous, power of two-sized set of physical pages. 10848941dc44SAlan Cox * 10858941dc44SAlan Cox * The free page queues must be locked. 108611752d88SAlan Cox */ 108711752d88SAlan Cox void 108811752d88SAlan Cox vm_phys_free_pages(vm_page_t m, int order) 108911752d88SAlan Cox { 109011752d88SAlan Cox struct vm_freelist *fl; 109111752d88SAlan Cox struct vm_phys_seg *seg; 10925c1f2cc4SAlan Cox vm_paddr_t pa; 109311752d88SAlan Cox vm_page_t m_buddy; 109411752d88SAlan Cox 109511752d88SAlan Cox KASSERT(m->order == VM_NFREEORDER, 10968941dc44SAlan Cox ("vm_phys_free_pages: page %p has unexpected order %d", 109711752d88SAlan Cox m, m->order)); 109811752d88SAlan Cox KASSERT(m->pool < VM_NFREEPOOL, 10998941dc44SAlan Cox ("vm_phys_free_pages: page %p has unexpected pool %d", 110011752d88SAlan Cox m, m->pool)); 110111752d88SAlan Cox KASSERT(order < VM_NFREEORDER, 11028941dc44SAlan Cox ("vm_phys_free_pages: order %d is out of range", order)); 110311752d88SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 110411752d88SAlan Cox seg = &vm_phys_segs[m->segind]; 11055c1f2cc4SAlan Cox if (order < VM_NFREEORDER - 1) { 11065c1f2cc4SAlan Cox pa = VM_PAGE_TO_PHYS(m); 11075c1f2cc4SAlan Cox do { 11085c1f2cc4SAlan Cox pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 11095c1f2cc4SAlan Cox if (pa < seg->start || pa >= seg->end) 111011752d88SAlan Cox break; 11115c1f2cc4SAlan Cox m_buddy = &seg->first_page[atop(pa - seg->start)]; 111211752d88SAlan Cox if (m_buddy->order != order) 111311752d88SAlan Cox break; 111411752d88SAlan Cox fl = (*seg->free_queues)[m_buddy->pool]; 11157e226537SAttilio Rao vm_freelist_rem(fl, m_buddy, order); 111611752d88SAlan Cox if (m_buddy->pool != m->pool) 111711752d88SAlan Cox vm_phys_set_pool(m->pool, m_buddy, order); 111811752d88SAlan Cox order++; 11195c1f2cc4SAlan Cox pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 112011752d88SAlan Cox m = &seg->first_page[atop(pa - seg->start)]; 11215c1f2cc4SAlan Cox } while (order < VM_NFREEORDER - 1); 112211752d88SAlan Cox } 112311752d88SAlan Cox fl = (*seg->free_queues)[m->pool]; 11247e226537SAttilio Rao vm_freelist_add(fl, m, order, 1); 112511752d88SAlan Cox } 112611752d88SAlan Cox 112711752d88SAlan Cox /* 11285c1f2cc4SAlan Cox * Free a contiguous, arbitrarily sized set of physical pages. 11295c1f2cc4SAlan Cox * 11305c1f2cc4SAlan Cox * The free page queues must be locked. 11315c1f2cc4SAlan Cox */ 11325c1f2cc4SAlan Cox void 11335c1f2cc4SAlan Cox vm_phys_free_contig(vm_page_t m, u_long npages) 11345c1f2cc4SAlan Cox { 11355c1f2cc4SAlan Cox u_int n; 11365c1f2cc4SAlan Cox int order; 11375c1f2cc4SAlan Cox 11385c1f2cc4SAlan Cox /* 11395c1f2cc4SAlan Cox * Avoid unnecessary coalescing by freeing the pages in the largest 11405c1f2cc4SAlan Cox * possible power-of-two-sized subsets. 11415c1f2cc4SAlan Cox */ 11425c1f2cc4SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 11435c1f2cc4SAlan Cox for (;; npages -= n) { 11445c1f2cc4SAlan Cox /* 11455c1f2cc4SAlan Cox * Unsigned "min" is used here so that "order" is assigned 11465c1f2cc4SAlan Cox * "VM_NFREEORDER - 1" when "m"'s physical address is zero 11475c1f2cc4SAlan Cox * or the low-order bits of its physical address are zero 11485c1f2cc4SAlan Cox * because the size of a physical address exceeds the size of 11495c1f2cc4SAlan Cox * a long. 11505c1f2cc4SAlan Cox */ 11515c1f2cc4SAlan Cox order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 11525c1f2cc4SAlan Cox VM_NFREEORDER - 1); 11535c1f2cc4SAlan Cox n = 1 << order; 11545c1f2cc4SAlan Cox if (npages < n) 11555c1f2cc4SAlan Cox break; 11565c1f2cc4SAlan Cox vm_phys_free_pages(m, order); 11575c1f2cc4SAlan Cox m += n; 11585c1f2cc4SAlan Cox } 11595c1f2cc4SAlan Cox /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 11605c1f2cc4SAlan Cox for (; npages > 0; npages -= n) { 11615c1f2cc4SAlan Cox order = flsl(npages) - 1; 11625c1f2cc4SAlan Cox n = 1 << order; 11635c1f2cc4SAlan Cox vm_phys_free_pages(m, order); 11645c1f2cc4SAlan Cox m += n; 11655c1f2cc4SAlan Cox } 11665c1f2cc4SAlan Cox } 11675c1f2cc4SAlan Cox 11685c1f2cc4SAlan Cox /* 1169c869e672SAlan Cox * Scan physical memory between the specified addresses "low" and "high" for a 1170c869e672SAlan Cox * run of contiguous physical pages that satisfy the specified conditions, and 1171c869e672SAlan Cox * return the lowest page in the run. The specified "alignment" determines 1172c869e672SAlan Cox * the alignment of the lowest physical page in the run. If the specified 1173c869e672SAlan Cox * "boundary" is non-zero, then the run of physical pages cannot span a 1174c869e672SAlan Cox * physical address that is a multiple of "boundary". 1175c869e672SAlan Cox * 1176c869e672SAlan Cox * "npages" must be greater than zero. Both "alignment" and "boundary" must 1177c869e672SAlan Cox * be a power of two. 1178c869e672SAlan Cox */ 1179c869e672SAlan Cox vm_page_t 1180c869e672SAlan Cox vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 1181c869e672SAlan Cox u_long alignment, vm_paddr_t boundary, int options) 1182c869e672SAlan Cox { 1183c869e672SAlan Cox vm_paddr_t pa_end; 1184c869e672SAlan Cox vm_page_t m_end, m_run, m_start; 1185c869e672SAlan Cox struct vm_phys_seg *seg; 1186c869e672SAlan Cox int segind; 1187c869e672SAlan Cox 1188c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1189c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1190c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1191c869e672SAlan Cox if (low >= high) 1192c869e672SAlan Cox return (NULL); 1193c869e672SAlan Cox for (segind = 0; segind < vm_phys_nsegs; segind++) { 1194c869e672SAlan Cox seg = &vm_phys_segs[segind]; 1195c869e672SAlan Cox if (seg->start >= high) 1196c869e672SAlan Cox break; 1197c869e672SAlan Cox if (low >= seg->end) 1198c869e672SAlan Cox continue; 1199c869e672SAlan Cox if (low <= seg->start) 1200c869e672SAlan Cox m_start = seg->first_page; 1201c869e672SAlan Cox else 1202c869e672SAlan Cox m_start = &seg->first_page[atop(low - seg->start)]; 1203c869e672SAlan Cox if (high < seg->end) 1204c869e672SAlan Cox pa_end = high; 1205c869e672SAlan Cox else 1206c869e672SAlan Cox pa_end = seg->end; 1207c869e672SAlan Cox if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) 1208c869e672SAlan Cox continue; 1209c869e672SAlan Cox m_end = &seg->first_page[atop(pa_end - seg->start)]; 1210c869e672SAlan Cox m_run = vm_page_scan_contig(npages, m_start, m_end, 1211c869e672SAlan Cox alignment, boundary, options); 1212c869e672SAlan Cox if (m_run != NULL) 1213c869e672SAlan Cox return (m_run); 1214c869e672SAlan Cox } 1215c869e672SAlan Cox return (NULL); 1216c869e672SAlan Cox } 1217c869e672SAlan Cox 1218c869e672SAlan Cox /* 121911752d88SAlan Cox * Set the pool for a contiguous, power of two-sized set of physical pages. 122011752d88SAlan Cox */ 12217bfda801SAlan Cox void 122211752d88SAlan Cox vm_phys_set_pool(int pool, vm_page_t m, int order) 122311752d88SAlan Cox { 122411752d88SAlan Cox vm_page_t m_tmp; 122511752d88SAlan Cox 122611752d88SAlan Cox for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 122711752d88SAlan Cox m_tmp->pool = pool; 122811752d88SAlan Cox } 122911752d88SAlan Cox 123011752d88SAlan Cox /* 12319742373aSAlan Cox * Search for the given physical page "m" in the free lists. If the search 12329742373aSAlan Cox * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 12339742373aSAlan Cox * FALSE, indicating that "m" is not in the free lists. 12347bfda801SAlan Cox * 12357bfda801SAlan Cox * The free page queues must be locked. 12367bfda801SAlan Cox */ 1237e35395ceSAlan Cox boolean_t 12387bfda801SAlan Cox vm_phys_unfree_page(vm_page_t m) 12397bfda801SAlan Cox { 12407bfda801SAlan Cox struct vm_freelist *fl; 12417bfda801SAlan Cox struct vm_phys_seg *seg; 12427bfda801SAlan Cox vm_paddr_t pa, pa_half; 12437bfda801SAlan Cox vm_page_t m_set, m_tmp; 12447bfda801SAlan Cox int order; 12457bfda801SAlan Cox 12467bfda801SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 12477bfda801SAlan Cox 12487bfda801SAlan Cox /* 12497bfda801SAlan Cox * First, find the contiguous, power of two-sized set of free 12507bfda801SAlan Cox * physical pages containing the given physical page "m" and 12517bfda801SAlan Cox * assign it to "m_set". 12527bfda801SAlan Cox */ 12537bfda801SAlan Cox seg = &vm_phys_segs[m->segind]; 12547bfda801SAlan Cox for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1255bc8794a1SAlan Cox order < VM_NFREEORDER - 1; ) { 12567bfda801SAlan Cox order++; 12577bfda801SAlan Cox pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 12582fbced65SAlan Cox if (pa >= seg->start) 12597bfda801SAlan Cox m_set = &seg->first_page[atop(pa - seg->start)]; 1260e35395ceSAlan Cox else 1261e35395ceSAlan Cox return (FALSE); 12627bfda801SAlan Cox } 1263e35395ceSAlan Cox if (m_set->order < order) 1264e35395ceSAlan Cox return (FALSE); 1265e35395ceSAlan Cox if (m_set->order == VM_NFREEORDER) 1266e35395ceSAlan Cox return (FALSE); 12677bfda801SAlan Cox KASSERT(m_set->order < VM_NFREEORDER, 12687bfda801SAlan Cox ("vm_phys_unfree_page: page %p has unexpected order %d", 12697bfda801SAlan Cox m_set, m_set->order)); 12707bfda801SAlan Cox 12717bfda801SAlan Cox /* 12727bfda801SAlan Cox * Next, remove "m_set" from the free lists. Finally, extract 12737bfda801SAlan Cox * "m" from "m_set" using an iterative algorithm: While "m_set" 12747bfda801SAlan Cox * is larger than a page, shrink "m_set" by returning the half 12757bfda801SAlan Cox * of "m_set" that does not contain "m" to the free lists. 12767bfda801SAlan Cox */ 12777bfda801SAlan Cox fl = (*seg->free_queues)[m_set->pool]; 12787bfda801SAlan Cox order = m_set->order; 12797e226537SAttilio Rao vm_freelist_rem(fl, m_set, order); 12807bfda801SAlan Cox while (order > 0) { 12817bfda801SAlan Cox order--; 12827bfda801SAlan Cox pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 12837bfda801SAlan Cox if (m->phys_addr < pa_half) 12847bfda801SAlan Cox m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 12857bfda801SAlan Cox else { 12867bfda801SAlan Cox m_tmp = m_set; 12877bfda801SAlan Cox m_set = &seg->first_page[atop(pa_half - seg->start)]; 12887bfda801SAlan Cox } 12897e226537SAttilio Rao vm_freelist_add(fl, m_tmp, order, 0); 12907bfda801SAlan Cox } 12917bfda801SAlan Cox KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1292e35395ceSAlan Cox return (TRUE); 12937bfda801SAlan Cox } 12947bfda801SAlan Cox 12957bfda801SAlan Cox /* 12962f9f48d6SAlan Cox * Allocate a contiguous set of physical pages of the given size 12972f9f48d6SAlan Cox * "npages" from the free lists. All of the physical pages must be at 12982f9f48d6SAlan Cox * or above the given physical address "low" and below the given 12992f9f48d6SAlan Cox * physical address "high". The given value "alignment" determines the 13002f9f48d6SAlan Cox * alignment of the first physical page in the set. If the given value 13012f9f48d6SAlan Cox * "boundary" is non-zero, then the set of physical pages cannot cross 13022f9f48d6SAlan Cox * any physical address boundary that is a multiple of that value. Both 130311752d88SAlan Cox * "alignment" and "boundary" must be a power of two. 130411752d88SAlan Cox */ 130511752d88SAlan Cox vm_page_t 13065c1f2cc4SAlan Cox vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 13075c1f2cc4SAlan Cox u_long alignment, vm_paddr_t boundary) 130811752d88SAlan Cox { 1309c869e672SAlan Cox vm_paddr_t pa_end, pa_start; 1310c869e672SAlan Cox vm_page_t m_run; 13116520495aSAdrian Chadd struct vm_domain_iterator vi; 1312c869e672SAlan Cox struct vm_phys_seg *seg; 1313c869e672SAlan Cox int domain, segind; 131411752d88SAlan Cox 1315c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1316c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1317c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1318fbd80bd0SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1319c869e672SAlan Cox if (low >= high) 1320c869e672SAlan Cox return (NULL); 13216520495aSAdrian Chadd vm_policy_iterator_init(&vi); 13227e226537SAttilio Rao restartdom: 13236520495aSAdrian Chadd if (vm_domain_iterator_run(&vi, &domain) != 0) { 13246520495aSAdrian Chadd vm_policy_iterator_finish(&vi); 13256520495aSAdrian Chadd return (NULL); 13266520495aSAdrian Chadd } 1327c869e672SAlan Cox m_run = NULL; 1328477bffbeSAlan Cox for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1329c869e672SAlan Cox seg = &vm_phys_segs[segind]; 1330477bffbeSAlan Cox if (seg->start >= high || seg->domain != domain) 133111752d88SAlan Cox continue; 1332477bffbeSAlan Cox if (low >= seg->end) 1333477bffbeSAlan Cox break; 1334c869e672SAlan Cox if (low <= seg->start) 1335c869e672SAlan Cox pa_start = seg->start; 1336c869e672SAlan Cox else 1337c869e672SAlan Cox pa_start = low; 1338c869e672SAlan Cox if (high < seg->end) 1339c869e672SAlan Cox pa_end = high; 1340c869e672SAlan Cox else 1341c869e672SAlan Cox pa_end = seg->end; 1342c869e672SAlan Cox if (pa_end - pa_start < ptoa(npages)) 1343c869e672SAlan Cox continue; 1344c869e672SAlan Cox m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, 1345c869e672SAlan Cox alignment, boundary); 1346c869e672SAlan Cox if (m_run != NULL) 1347c869e672SAlan Cox break; 1348c869e672SAlan Cox } 1349c869e672SAlan Cox if (m_run == NULL && !vm_domain_iterator_isdone(&vi)) 1350c869e672SAlan Cox goto restartdom; 1351c869e672SAlan Cox vm_policy_iterator_finish(&vi); 1352c869e672SAlan Cox return (m_run); 1353c869e672SAlan Cox } 135411752d88SAlan Cox 135511752d88SAlan Cox /* 1356c869e672SAlan Cox * Allocate a run of contiguous physical pages from the free list for the 1357c869e672SAlan Cox * specified segment. 1358c869e672SAlan Cox */ 1359c869e672SAlan Cox static vm_page_t 1360c869e672SAlan Cox vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, 1361c869e672SAlan Cox vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1362c869e672SAlan Cox { 1363c869e672SAlan Cox struct vm_freelist *fl; 1364c869e672SAlan Cox vm_paddr_t pa, pa_end, size; 1365c869e672SAlan Cox vm_page_t m, m_ret; 1366c869e672SAlan Cox u_long npages_end; 1367c869e672SAlan Cox int oind, order, pind; 1368c869e672SAlan Cox 1369c869e672SAlan Cox KASSERT(npages > 0, ("npages is 0")); 1370c869e672SAlan Cox KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1371c869e672SAlan Cox KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1372c869e672SAlan Cox mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1373c869e672SAlan Cox /* Compute the queue that is the best fit for npages. */ 1374c869e672SAlan Cox for (order = 0; (1 << order) < npages; order++); 1375c869e672SAlan Cox /* Search for a run satisfying the specified conditions. */ 1376c869e672SAlan Cox size = npages << PAGE_SHIFT; 1377c869e672SAlan Cox for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; 1378c869e672SAlan Cox oind++) { 1379c869e672SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1380c869e672SAlan Cox fl = (*seg->free_queues)[pind]; 1381c869e672SAlan Cox TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { 1382c869e672SAlan Cox /* 138311752d88SAlan Cox * Is the size of this allocation request 138411752d88SAlan Cox * larger than the largest block size? 138511752d88SAlan Cox */ 138611752d88SAlan Cox if (order >= VM_NFREEORDER) { 138711752d88SAlan Cox /* 1388c869e672SAlan Cox * Determine if a sufficient number of 1389c869e672SAlan Cox * subsequent blocks to satisfy the 1390c869e672SAlan Cox * allocation request are free. 139111752d88SAlan Cox */ 139211752d88SAlan Cox pa = VM_PAGE_TO_PHYS(m_ret); 1393c869e672SAlan Cox pa_end = pa + size; 139411752d88SAlan Cox for (;;) { 1395c869e672SAlan Cox pa += 1 << (PAGE_SHIFT + 1396c869e672SAlan Cox VM_NFREEORDER - 1); 1397c869e672SAlan Cox if (pa >= pa_end || 1398c869e672SAlan Cox pa < seg->start || 139911752d88SAlan Cox pa >= seg->end) 140011752d88SAlan Cox break; 1401c869e672SAlan Cox m = &seg->first_page[atop(pa - 1402c869e672SAlan Cox seg->start)]; 1403c869e672SAlan Cox if (m->order != VM_NFREEORDER - 1404c869e672SAlan Cox 1) 140511752d88SAlan Cox break; 140611752d88SAlan Cox } 1407c869e672SAlan Cox /* If not, go to the next block. */ 1408c869e672SAlan Cox if (pa < pa_end) 140911752d88SAlan Cox continue; 141011752d88SAlan Cox } 141111752d88SAlan Cox 141211752d88SAlan Cox /* 1413c869e672SAlan Cox * Determine if the blocks are within the 1414c869e672SAlan Cox * given range, satisfy the given alignment, 1415c869e672SAlan Cox * and do not cross the given boundary. 141611752d88SAlan Cox */ 141711752d88SAlan Cox pa = VM_PAGE_TO_PHYS(m_ret); 1418c869e672SAlan Cox pa_end = pa + size; 1419d9c9c81cSPedro F. Giffuni if (pa >= low && pa_end <= high && 1420d9c9c81cSPedro F. Giffuni (pa & (alignment - 1)) == 0 && 1421d9c9c81cSPedro F. Giffuni rounddown2(pa ^ (pa_end - 1), boundary) == 0) 142211752d88SAlan Cox goto done; 142311752d88SAlan Cox } 142411752d88SAlan Cox } 142511752d88SAlan Cox } 142611752d88SAlan Cox return (NULL); 142711752d88SAlan Cox done: 142811752d88SAlan Cox for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 142911752d88SAlan Cox fl = (*seg->free_queues)[m->pool]; 14307e226537SAttilio Rao vm_freelist_rem(fl, m, m->order); 143111752d88SAlan Cox } 143211752d88SAlan Cox if (m_ret->pool != VM_FREEPOOL_DEFAULT) 143311752d88SAlan Cox vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 143411752d88SAlan Cox fl = (*seg->free_queues)[m_ret->pool]; 143511752d88SAlan Cox vm_phys_split_pages(m_ret, oind, fl, order); 14365c1f2cc4SAlan Cox /* Return excess pages to the free lists. */ 14375c1f2cc4SAlan Cox npages_end = roundup2(npages, 1 << imin(oind, order)); 14385c1f2cc4SAlan Cox if (npages < npages_end) 14395c1f2cc4SAlan Cox vm_phys_free_contig(&m_ret[npages], npages_end - npages); 144011752d88SAlan Cox return (m_ret); 144111752d88SAlan Cox } 144211752d88SAlan Cox 144311752d88SAlan Cox #ifdef DDB 144411752d88SAlan Cox /* 144511752d88SAlan Cox * Show the number of physical pages in each of the free lists. 144611752d88SAlan Cox */ 144711752d88SAlan Cox DB_SHOW_COMMAND(freepages, db_show_freepages) 144811752d88SAlan Cox { 144911752d88SAlan Cox struct vm_freelist *fl; 14507e226537SAttilio Rao int flind, oind, pind, dom; 145111752d88SAlan Cox 14527e226537SAttilio Rao for (dom = 0; dom < vm_ndomains; dom++) { 14537e226537SAttilio Rao db_printf("DOMAIN: %d\n", dom); 145411752d88SAlan Cox for (flind = 0; flind < vm_nfreelists; flind++) { 145511752d88SAlan Cox db_printf("FREE LIST %d:\n" 145611752d88SAlan Cox "\n ORDER (SIZE) | NUMBER" 145711752d88SAlan Cox "\n ", flind); 145811752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 145911752d88SAlan Cox db_printf(" | POOL %d", pind); 146011752d88SAlan Cox db_printf("\n-- "); 146111752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) 146211752d88SAlan Cox db_printf("-- -- "); 146311752d88SAlan Cox db_printf("--\n"); 146411752d88SAlan Cox for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 146511752d88SAlan Cox db_printf(" %2.2d (%6.6dK)", oind, 146611752d88SAlan Cox 1 << (PAGE_SHIFT - 10 + oind)); 146711752d88SAlan Cox for (pind = 0; pind < VM_NFREEPOOL; pind++) { 14687e226537SAttilio Rao fl = vm_phys_free_queues[dom][flind][pind]; 146911752d88SAlan Cox db_printf(" | %6.6d", fl[oind].lcnt); 147011752d88SAlan Cox } 147111752d88SAlan Cox db_printf("\n"); 147211752d88SAlan Cox } 147311752d88SAlan Cox db_printf("\n"); 147411752d88SAlan Cox } 14757e226537SAttilio Rao db_printf("\n"); 14767e226537SAttilio Rao } 147711752d88SAlan Cox } 147811752d88SAlan Cox #endif 1479