Lines Matching +full:vm +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_domainset.h>
87 #include <vm/vm_kern.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_pagequeue.h>
94 #include <vm/vm_phys.h>
95 #include <vm/vm_radix.h>
96 #include <vm/vm_extern.h>
97 #include <vm/uma.h>
129 /* On non-superpage architectures we want large import sizes. */
231 for (tries = wait ? 3 : 1;; tries--) { in kmem_alloc_contig_pages()
247 * Allocates a region from the kernel address map and physical pages
271 offset = addr - VM_MIN_KERNEL_ADDRESS; in kmem_alloc_attr_domain()
287 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) in kmem_alloc_attr_domain()
317 start_segind = -1; in kmem_alloc_attr_domainset()
325 if (start_segind == -1) in kmem_alloc_attr_domainset()
328 atop(round_page(size)), low, high) == -1) { in kmem_alloc_attr_domainset()
337 * Allocates a region from the kernel address map and physically
362 offset = addr - VM_MIN_KERNEL_ADDRESS; in kmem_alloc_contig_domain()
379 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) in kmem_alloc_contig_domain()
411 start_segind = -1; in kmem_alloc_contig_domainset()
419 if (start_segind == -1) in kmem_alloc_contig_domainset()
422 atop(round_page(size)), low, high) == -1) { in kmem_alloc_contig_domainset()
433 * Initializes a map to manage a subrange
438 * parent Map to take range from
439 * min, max Returned endpoints of map
444 kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max, in kmem_subinit() argument
458 vm_map_init(map, vm_map_pmap(parent), *min, *max); in kmem_subinit()
459 if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS) in kmem_subinit()
466 * Allocate wired-down pages in the kernel's address space.
541 offset = addr - VM_MIN_KERNEL_ADDRESS; in kmem_back_domain()
551 mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); in kmem_back_domain()
571 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) in kmem_back_domain()
573 KASSERT((m->oflags & VPO_UNMANAGED) != 0, in kmem_back_domain()
579 m->oflags |= VPO_KMEM_EXEC; in kmem_back_domain()
616 rv = kmem_back_domain(domain, object, addr, next - addr, flags); in kmem_back()
618 kmem_unback(object, start, addr - start); in kmem_back()
649 offset = addr - VM_MIN_KERNEL_ADDRESS; in _kmem_unback()
655 if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0)) in _kmem_unback()
698 * Allocates pageable memory from a sub-map of the kernel. If the submap
704 kmap_alloc_wait(vm_map_t map, vm_size_t size) in kmap_alloc_wait() argument
714 * To make this work for more than one map, use the map's lock in kmap_alloc_wait()
717 vm_map_lock(map); in kmap_alloc_wait()
718 addr = vm_map_findspace(map, vm_map_min(map), size); in kmap_alloc_wait()
719 if (addr + size <= vm_map_max(map)) in kmap_alloc_wait()
722 if (vm_map_max(map) - vm_map_min(map) < size) { in kmap_alloc_wait()
723 vm_map_unlock(map); in kmap_alloc_wait()
727 vm_map_modflags(map, MAP_NEEDS_WAKEUP, 0); in kmap_alloc_wait()
728 vm_map_unlock_and_wait(map, 0); in kmap_alloc_wait()
730 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_RW, VM_PROT_RW, in kmap_alloc_wait()
732 vm_map_unlock(map); in kmap_alloc_wait()
740 * waiting for memory in that map.
743 kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) in kmap_free_wakeup() argument
746 vm_map_lock(map); in kmap_free_wakeup()
747 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); in kmap_free_wakeup()
748 if ((map->flags & MAP_NEEDS_WAKEUP) != 0) { in kmap_free_wakeup()
749 vm_map_modflags(map, 0, MAP_NEEDS_WAKEUP); in kmap_free_wakeup()
750 vm_map_wakeup(map); in kmap_free_wakeup()
752 vm_map_unlock(map); in kmap_free_wakeup()
762 * Map a single physical page of zeros to a larger virtual range. in kmem_init_zero_region()
777 * Import KVA from the kernel map into the kernel arena.
804 * Import KVA from a parent arena into a per-domain arena. Imports must be
805 * KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size.
821 * Create the kernel map; insert a mapping covering kernel text,
823 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
825 * Create the kernel vmem arena and its per-domain children.
861 * the per-domain arenas. in kmem_init()
876 * Initialize the per-domain arenas. These are used to color in kmem_init()
918 * the map. in kmem_init()
945 * Restore the default permissions to ensure that the direct map alias in kmem_bootstrap_free()
948 pmap_change_prot(start, end - start, VM_PROT_RW); in kmem_bootstrap_free()
956 vm_phys_free_pages(m, m->pool, 0); in kmem_bootstrap_free()
963 (void)vmem_add(kernel_arena, start, end - start, M_WAITOK); in kmem_bootstrap_free()
973 struct vmspace *vm; in pmap_active_cpus() local
978 td = cpuid_to_pcpu[c]->pc_curthread; in pmap_active_cpus()
979 p = td->td_proc; in pmap_active_cpus()
982 vm = vmspace_acquire_ref(p); in pmap_active_cpus()
983 if (vm == NULL) in pmap_active_cpus()
985 if (pmap == vmspace_pmap(vm)) in pmap_active_cpus()
987 vmspace_free(vm); in pmap_active_cpus()
993 * Allow userspace to directly trigger the VM drain routine for testing
1022 if (error != 0 || req->newptr == NULL) in debug_uma_reclaim()
1041 if (error != 0 || req->newptr == NULL) in debug_uma_reclaim_domain()