xref: /freebsd/sys/vm/vm_kern.c (revision 6e660824a82f590542932de52f128db584029893)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>		/* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 #include <sys/rwlock.h>
76 #include <sys/sysctl.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 #include <vm/uma.h>
87 
88 vm_map_t kernel_map;
89 vm_map_t kmem_map;
90 vm_map_t exec_map;
91 vm_map_t pipe_map;
92 
93 const void *zero_region;
94 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
95 
96 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
97     NULL, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
98 
99 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
100 #if defined(__arm__) || defined(__sparc64__)
101     &vm_max_kernel_address, 0,
102 #else
103     NULL, VM_MAX_KERNEL_ADDRESS,
104 #endif
105     "Max kernel address");
106 
107 /*
108  *	kmem_alloc_nofault:
109  *
110  *	Allocate a virtual address range with no underlying object and
111  *	no initial mapping to physical memory.  Any mapping from this
112  *	range to physical memory must be explicitly created prior to
113  *	its use, typically with pmap_qenter().  Any attempt to create
114  *	a mapping on demand through vm_fault() will result in a panic.
115  */
116 vm_offset_t
117 kmem_alloc_nofault(map, size)
118 	vm_map_t map;
119 	vm_size_t size;
120 {
121 	vm_offset_t addr;
122 	int result;
123 
124 	size = round_page(size);
125 	addr = vm_map_min(map);
126 	result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
127 	    VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
128 	if (result != KERN_SUCCESS) {
129 		return (0);
130 	}
131 	return (addr);
132 }
133 
134 /*
135  *	kmem_alloc_nofault_space:
136  *
137  *	Allocate a virtual address range with no underlying object and
138  *	no initial mapping to physical memory within the specified
139  *	address space.  Any mapping from this range to physical memory
140  *	must be explicitly created prior to its use, typically with
141  *	pmap_qenter().  Any attempt to create a mapping on demand
142  *	through vm_fault() will result in a panic.
143  */
144 vm_offset_t
145 kmem_alloc_nofault_space(map, size, find_space)
146 	vm_map_t map;
147 	vm_size_t size;
148 	int find_space;
149 {
150 	vm_offset_t addr;
151 	int result;
152 
153 	size = round_page(size);
154 	addr = vm_map_min(map);
155 	result = vm_map_find(map, NULL, 0, &addr, size, find_space,
156 	    VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
157 	if (result != KERN_SUCCESS) {
158 		return (0);
159 	}
160 	return (addr);
161 }
162 
163 /*
164  *	Allocate wired-down memory in the kernel's address map
165  *	or a submap.
166  */
167 vm_offset_t
168 kmem_alloc(map, size)
169 	vm_map_t map;
170 	vm_size_t size;
171 {
172 	vm_offset_t addr;
173 	vm_offset_t offset;
174 
175 	size = round_page(size);
176 
177 	/*
178 	 * Use the kernel object for wired-down kernel pages. Assume that no
179 	 * region of the kernel object is referenced more than once.
180 	 */
181 
182 	/*
183 	 * Locate sufficient space in the map.  This will give us the final
184 	 * virtual address for the new memory, and thus will tell us the
185 	 * offset within the kernel map.
186 	 */
187 	vm_map_lock(map);
188 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
189 		vm_map_unlock(map);
190 		return (0);
191 	}
192 	offset = addr - VM_MIN_KERNEL_ADDRESS;
193 	vm_object_reference(kernel_object);
194 	vm_map_insert(map, kernel_object, offset, addr, addr + size,
195 		VM_PROT_ALL, VM_PROT_ALL, 0);
196 	vm_map_unlock(map);
197 
198 	/*
199 	 * And finally, mark the data as non-pageable.
200 	 */
201 	(void) vm_map_wire(map, addr, addr + size,
202 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
203 
204 	return (addr);
205 }
206 
207 /*
208  *	Allocates a region from the kernel address map and physical pages
209  *	within the specified address range to the kernel object.  Creates a
210  *	wired mapping from this region to these pages, and returns the
211  *	region's starting virtual address.  The allocated pages are not
212  *	necessarily physically contiguous.  If M_ZERO is specified through the
213  *	given flags, then the pages are zeroed before they are mapped.
214  */
215 vm_offset_t
216 kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
217     vm_paddr_t high, vm_memattr_t memattr)
218 {
219 	vm_object_t object = kernel_object;
220 	vm_offset_t addr;
221 	vm_ooffset_t end_offset, offset;
222 	vm_page_t m;
223 	int pflags, tries;
224 
225 	size = round_page(size);
226 	vm_map_lock(map);
227 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
228 		vm_map_unlock(map);
229 		return (0);
230 	}
231 	offset = addr - VM_MIN_KERNEL_ADDRESS;
232 	vm_object_reference(object);
233 	vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
234 	    VM_PROT_ALL, 0);
235 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
236 	VM_OBJECT_WLOCK(object);
237 	end_offset = offset + size;
238 	for (; offset < end_offset; offset += PAGE_SIZE) {
239 		tries = 0;
240 retry:
241 		m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
242 		    low, high, PAGE_SIZE, 0, memattr);
243 		if (m == NULL) {
244 			VM_OBJECT_WUNLOCK(object);
245 			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
246 				vm_map_unlock(map);
247 				vm_pageout_grow_cache(tries, low, high);
248 				vm_map_lock(map);
249 				VM_OBJECT_WLOCK(object);
250 				tries++;
251 				goto retry;
252 			}
253 
254 			/*
255 			 * Since the pages that were allocated by any previous
256 			 * iterations of this loop are not busy, they can be
257 			 * freed by vm_object_page_remove(), which is called
258 			 * by vm_map_delete().
259 			 */
260 			vm_map_delete(map, addr, addr + size);
261 			vm_map_unlock(map);
262 			return (0);
263 		}
264 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
265 			pmap_zero_page(m);
266 		m->valid = VM_PAGE_BITS_ALL;
267 	}
268 	VM_OBJECT_WUNLOCK(object);
269 	vm_map_unlock(map);
270 	vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
271 	    VM_MAP_WIRE_NOHOLES);
272 	return (addr);
273 }
274 
275 /*
276  *	Allocates a region from the kernel address map and physically
277  *	contiguous pages within the specified address range to the kernel
278  *	object.  Creates a wired mapping from this region to these pages, and
279  *	returns the region's starting virtual address.  If M_ZERO is specified
280  *	through the given flags, then the pages are zeroed before they are
281  *	mapped.
282  */
283 vm_offset_t
284 kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
285     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
286     vm_memattr_t memattr)
287 {
288 	vm_object_t object = kernel_object;
289 	vm_offset_t addr;
290 	vm_ooffset_t offset;
291 	vm_page_t end_m, m;
292 	int pflags, tries;
293 
294 	size = round_page(size);
295 	vm_map_lock(map);
296 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
297 		vm_map_unlock(map);
298 		return (0);
299 	}
300 	offset = addr - VM_MIN_KERNEL_ADDRESS;
301 	vm_object_reference(object);
302 	vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
303 	    VM_PROT_ALL, 0);
304 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
305 	VM_OBJECT_WLOCK(object);
306 	tries = 0;
307 retry:
308 	m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
309 	    atop(size), low, high, alignment, boundary, memattr);
310 	if (m == NULL) {
311 		VM_OBJECT_WUNLOCK(object);
312 		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
313 			vm_map_unlock(map);
314 			vm_pageout_grow_cache(tries, low, high);
315 			vm_map_lock(map);
316 			VM_OBJECT_WLOCK(object);
317 			tries++;
318 			goto retry;
319 		}
320 		vm_map_delete(map, addr, addr + size);
321 		vm_map_unlock(map);
322 		return (0);
323 	}
324 	end_m = m + atop(size);
325 	for (; m < end_m; m++) {
326 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
327 			pmap_zero_page(m);
328 		m->valid = VM_PAGE_BITS_ALL;
329 	}
330 	VM_OBJECT_WUNLOCK(object);
331 	vm_map_unlock(map);
332 	vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
333 	    VM_MAP_WIRE_NOHOLES);
334 	return (addr);
335 }
336 
337 /*
338  *	kmem_free:
339  *
340  *	Release a region of kernel virtual memory allocated
341  *	with kmem_alloc, and return the physical pages
342  *	associated with that region.
343  *
344  *	This routine may not block on kernel maps.
345  */
346 void
347 kmem_free(map, addr, size)
348 	vm_map_t map;
349 	vm_offset_t addr;
350 	vm_size_t size;
351 {
352 
353 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
354 }
355 
356 /*
357  *	kmem_suballoc:
358  *
359  *	Allocates a map to manage a subrange
360  *	of the kernel virtual address space.
361  *
362  *	Arguments are as follows:
363  *
364  *	parent		Map to take range from
365  *	min, max	Returned endpoints of map
366  *	size		Size of range to find
367  *	superpage_align	Request that min is superpage aligned
368  */
369 vm_map_t
370 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
371     vm_size_t size, boolean_t superpage_align)
372 {
373 	int ret;
374 	vm_map_t result;
375 
376 	size = round_page(size);
377 
378 	*min = vm_map_min(parent);
379 	ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
380 	    VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
381 	    MAP_ACC_NO_CHARGE);
382 	if (ret != KERN_SUCCESS)
383 		panic("kmem_suballoc: bad status return of %d", ret);
384 	*max = *min + size;
385 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
386 	if (result == NULL)
387 		panic("kmem_suballoc: cannot create submap");
388 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
389 		panic("kmem_suballoc: unable to change range to submap");
390 	return (result);
391 }
392 
393 /*
394  *	kmem_malloc:
395  *
396  * 	Allocate wired-down memory in the kernel's address map for the higher
397  * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
398  * 	kmem_alloc() because we may need to allocate memory at interrupt
399  * 	level where we cannot block (canwait == FALSE).
400  *
401  * 	This routine has its own private kernel submap (kmem_map) and object
402  * 	(kmem_object).  This, combined with the fact that only malloc uses
403  * 	this routine, ensures that we will never block in map or object waits.
404  *
405  * 	We don't worry about expanding the map (adding entries) since entries
406  * 	for wired maps are statically allocated.
407  *
408  *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
409  *	which we never free.
410  */
411 vm_offset_t
412 kmem_malloc(map, size, flags)
413 	vm_map_t map;
414 	vm_size_t size;
415 	int flags;
416 {
417 	vm_offset_t addr;
418 	int i, rv;
419 
420 	size = round_page(size);
421 	addr = vm_map_min(map);
422 
423 	/*
424 	 * Locate sufficient space in the map.  This will give us the final
425 	 * virtual address for the new memory, and thus will tell us the
426 	 * offset within the kernel map.
427 	 */
428 	vm_map_lock(map);
429 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
430 		vm_map_unlock(map);
431                 if ((flags & M_NOWAIT) == 0) {
432 			for (i = 0; i < 8; i++) {
433 				EVENTHANDLER_INVOKE(vm_lowmem, 0);
434 				uma_reclaim();
435 				vm_map_lock(map);
436 				if (vm_map_findspace(map, vm_map_min(map),
437 				    size, &addr) == 0) {
438 					break;
439 				}
440 				vm_map_unlock(map);
441 				tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
442 			}
443 			if (i == 8) {
444 				panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
445 				    (long)size, (long)map->size);
446 			}
447 		} else {
448 			return (0);
449 		}
450 	}
451 
452 	rv = kmem_back(map, addr, size, flags);
453 	vm_map_unlock(map);
454 	return (rv == KERN_SUCCESS ? addr : 0);
455 }
456 
457 /*
458  *	kmem_back:
459  *
460  *	Allocate physical pages for the specified virtual address range.
461  */
462 int
463 kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
464 {
465 	vm_offset_t offset, i;
466 	vm_map_entry_t entry;
467 	vm_page_t m;
468 	int pflags;
469 	boolean_t found;
470 
471 	KASSERT(vm_map_locked(map), ("kmem_back: map %p is not locked", map));
472 	offset = addr - VM_MIN_KERNEL_ADDRESS;
473 	vm_object_reference(kmem_object);
474 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
475 	    VM_PROT_ALL, VM_PROT_ALL, 0);
476 
477 	/*
478 	 * Assert: vm_map_insert() will never be able to extend the
479 	 * previous entry so vm_map_lookup_entry() will find a new
480 	 * entry exactly corresponding to this address range and it
481 	 * will have wired_count == 0.
482 	 */
483 	found = vm_map_lookup_entry(map, addr, &entry);
484 	KASSERT(found && entry->start == addr && entry->end == addr + size &&
485 	    entry->wired_count == 0 && (entry->eflags & MAP_ENTRY_IN_TRANSITION)
486 	    == 0, ("kmem_back: entry not found or misaligned"));
487 
488 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
489 
490 	VM_OBJECT_WLOCK(kmem_object);
491 	for (i = 0; i < size; i += PAGE_SIZE) {
492 retry:
493 		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
494 
495 		/*
496 		 * Ran out of space, free everything up and return. Don't need
497 		 * to lock page queues here as we know that the pages we got
498 		 * aren't on any queues.
499 		 */
500 		if (m == NULL) {
501 			if ((flags & M_NOWAIT) == 0) {
502 				VM_OBJECT_WUNLOCK(kmem_object);
503 				entry->eflags |= MAP_ENTRY_IN_TRANSITION;
504 				vm_map_unlock(map);
505 				VM_WAIT;
506 				vm_map_lock(map);
507 				KASSERT(
508 (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_NEEDS_WAKEUP)) ==
509 				    MAP_ENTRY_IN_TRANSITION,
510 				    ("kmem_back: volatile entry"));
511 				entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
512 				VM_OBJECT_WLOCK(kmem_object);
513 				goto retry;
514 			}
515 			/*
516 			 * Free the pages before removing the map entry.
517 			 * They are already marked busy.  Calling
518 			 * vm_map_delete before the pages has been freed or
519 			 * unbusied will cause a deadlock.
520 			 */
521 			while (i != 0) {
522 				i -= PAGE_SIZE;
523 				m = vm_page_lookup(kmem_object,
524 						   OFF_TO_IDX(offset + i));
525 				vm_page_unwire(m, 0);
526 				vm_page_free(m);
527 			}
528 			VM_OBJECT_WUNLOCK(kmem_object);
529 			vm_map_delete(map, addr, addr + size);
530 			return (KERN_NO_SPACE);
531 		}
532 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
533 			pmap_zero_page(m);
534 		m->valid = VM_PAGE_BITS_ALL;
535 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
536 		    ("kmem_malloc: page %p is managed", m));
537 	}
538 	VM_OBJECT_WUNLOCK(kmem_object);
539 
540 	/*
541 	 * Mark map entry as non-pageable.  Repeat the assert.
542 	 */
543 	KASSERT(entry->start == addr && entry->end == addr + size &&
544 	    entry->wired_count == 0,
545 	    ("kmem_back: entry not found or misaligned after allocation"));
546 	entry->wired_count = 1;
547 
548 	/*
549 	 * At this point, the kmem_object must be unlocked because
550 	 * vm_map_simplify_entry() calls vm_object_deallocate(), which
551 	 * locks the kmem_object.
552 	 */
553 	vm_map_simplify_entry(map, entry);
554 
555 	/*
556 	 * Loop thru pages, entering them in the pmap.
557 	 */
558 	VM_OBJECT_WLOCK(kmem_object);
559 	for (i = 0; i < size; i += PAGE_SIZE) {
560 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
561 		/*
562 		 * Because this is kernel_pmap, this call will not block.
563 		 */
564 		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
565 		    TRUE);
566 		vm_page_wakeup(m);
567 	}
568 	VM_OBJECT_WUNLOCK(kmem_object);
569 
570 	return (KERN_SUCCESS);
571 }
572 
573 /*
574  *	kmem_alloc_wait:
575  *
576  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
577  *	has no room, the caller sleeps waiting for more memory in the submap.
578  *
579  *	This routine may block.
580  */
581 vm_offset_t
582 kmem_alloc_wait(map, size)
583 	vm_map_t map;
584 	vm_size_t size;
585 {
586 	vm_offset_t addr;
587 
588 	size = round_page(size);
589 	if (!swap_reserve(size))
590 		return (0);
591 
592 	for (;;) {
593 		/*
594 		 * To make this work for more than one map, use the map's lock
595 		 * to lock out sleepers/wakers.
596 		 */
597 		vm_map_lock(map);
598 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
599 			break;
600 		/* no space now; see if we can ever get space */
601 		if (vm_map_max(map) - vm_map_min(map) < size) {
602 			vm_map_unlock(map);
603 			swap_release(size);
604 			return (0);
605 		}
606 		map->needs_wakeup = TRUE;
607 		vm_map_unlock_and_wait(map, 0);
608 	}
609 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
610 	    VM_PROT_ALL, MAP_ACC_CHARGED);
611 	vm_map_unlock(map);
612 	return (addr);
613 }
614 
615 /*
616  *	kmem_free_wakeup:
617  *
618  *	Returns memory to a submap of the kernel, and wakes up any processes
619  *	waiting for memory in that map.
620  */
621 void
622 kmem_free_wakeup(map, addr, size)
623 	vm_map_t map;
624 	vm_offset_t addr;
625 	vm_size_t size;
626 {
627 
628 	vm_map_lock(map);
629 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
630 	if (map->needs_wakeup) {
631 		map->needs_wakeup = FALSE;
632 		vm_map_wakeup(map);
633 	}
634 	vm_map_unlock(map);
635 }
636 
637 static void
638 kmem_init_zero_region(void)
639 {
640 	vm_offset_t addr, i;
641 	vm_page_t m;
642 	int error;
643 
644 	/*
645 	 * Map a single physical page of zeros to a larger virtual range.
646 	 * This requires less looping in places that want large amounts of
647 	 * zeros, while not using much more physical resources.
648 	 */
649 	addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
650 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
651 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
652 	if ((m->flags & PG_ZERO) == 0)
653 		pmap_zero_page(m);
654 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
655 		pmap_qenter(addr + i, &m, 1);
656 	error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
657 	    VM_PROT_READ, TRUE);
658 	KASSERT(error == 0, ("error=%d", error));
659 
660 	zero_region = (const void *)addr;
661 }
662 
663 /*
664  * 	kmem_init:
665  *
666  *	Create the kernel map; insert a mapping covering kernel text,
667  *	data, bss, and all space allocated thus far (`boostrap' data).  The
668  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
669  *	`start' as allocated, and the range between `start' and `end' as free.
670  */
671 void
672 kmem_init(start, end)
673 	vm_offset_t start, end;
674 {
675 	vm_map_t m;
676 
677 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
678 	m->system_map = 1;
679 	vm_map_lock(m);
680 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
681 	kernel_map = m;
682 	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
683 #ifdef __amd64__
684 	    KERNBASE,
685 #else
686 	    VM_MIN_KERNEL_ADDRESS,
687 #endif
688 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
689 	/* ... and ending with the completion of the above `insert' */
690 	vm_map_unlock(m);
691 
692 	kmem_init_zero_region();
693 }
694 
695 #ifdef DIAGNOSTIC
696 /*
697  * Allow userspace to directly trigger the VM drain routine for testing
698  * purposes.
699  */
700 static int
701 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
702 {
703 	int error, i;
704 
705 	i = 0;
706 	error = sysctl_handle_int(oidp, &i, 0, req);
707 	if (error)
708 		return (error);
709 	if (i)
710 		EVENTHANDLER_INVOKE(vm_lowmem, 0);
711 	return (0);
712 }
713 
714 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
715     debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
716 #endif
717