xref: /freebsd/sys/vm/vm_kern.c (revision f5f7c05209ca2c3748fd8b27c5e80ffad49120eb)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>		/* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/malloc.h>
76 #include <sys/sysctl.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 #include <vm/uma.h>
87 
88 vm_map_t kernel_map=0;
89 vm_map_t kmem_map=0;
90 vm_map_t exec_map=0;
91 vm_map_t pipe_map;
92 vm_map_t buffer_map=0;
93 
94 const void *zero_region;
95 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
96 
97 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
98     NULL, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
99 
100 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
101 #ifdef __sparc64__
102     &vm_max_kernel_address, 0,
103 #else
104     NULL, VM_MAX_KERNEL_ADDRESS,
105 #endif
106     "Max kernel address");
107 
108 /*
109  *	kmem_alloc_nofault:
110  *
111  *	Allocate a virtual address range with no underlying object and
112  *	no initial mapping to physical memory.  Any mapping from this
113  *	range to physical memory must be explicitly created prior to
114  *	its use, typically with pmap_qenter().  Any attempt to create
115  *	a mapping on demand through vm_fault() will result in a panic.
116  */
117 vm_offset_t
118 kmem_alloc_nofault(map, size)
119 	vm_map_t map;
120 	vm_size_t size;
121 {
122 	vm_offset_t addr;
123 	int result;
124 
125 	size = round_page(size);
126 	addr = vm_map_min(map);
127 	result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
128 	    VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
129 	if (result != KERN_SUCCESS) {
130 		return (0);
131 	}
132 	return (addr);
133 }
134 
135 /*
136  *	kmem_alloc_nofault_space:
137  *
138  *	Allocate a virtual address range with no underlying object and
139  *	no initial mapping to physical memory within the specified
140  *	address space.  Any mapping from this range to physical memory
141  *	must be explicitly created prior to its use, typically with
142  *	pmap_qenter().  Any attempt to create a mapping on demand
143  *	through vm_fault() will result in a panic.
144  */
145 vm_offset_t
146 kmem_alloc_nofault_space(map, size, find_space)
147 	vm_map_t map;
148 	vm_size_t size;
149 	int find_space;
150 {
151 	vm_offset_t addr;
152 	int result;
153 
154 	size = round_page(size);
155 	addr = vm_map_min(map);
156 	result = vm_map_find(map, NULL, 0, &addr, size, find_space,
157 	    VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
158 	if (result != KERN_SUCCESS) {
159 		return (0);
160 	}
161 	return (addr);
162 }
163 
164 /*
165  *	Allocate wired-down memory in the kernel's address map
166  *	or a submap.
167  */
168 vm_offset_t
169 kmem_alloc(map, size)
170 	vm_map_t map;
171 	vm_size_t size;
172 {
173 	vm_offset_t addr;
174 	vm_offset_t offset;
175 
176 	size = round_page(size);
177 
178 	/*
179 	 * Use the kernel object for wired-down kernel pages. Assume that no
180 	 * region of the kernel object is referenced more than once.
181 	 */
182 
183 	/*
184 	 * Locate sufficient space in the map.  This will give us the final
185 	 * virtual address for the new memory, and thus will tell us the
186 	 * offset within the kernel map.
187 	 */
188 	vm_map_lock(map);
189 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
190 		vm_map_unlock(map);
191 		return (0);
192 	}
193 	offset = addr - VM_MIN_KERNEL_ADDRESS;
194 	vm_object_reference(kernel_object);
195 	vm_map_insert(map, kernel_object, offset, addr, addr + size,
196 		VM_PROT_ALL, VM_PROT_ALL, 0);
197 	vm_map_unlock(map);
198 
199 	/*
200 	 * And finally, mark the data as non-pageable.
201 	 */
202 	(void) vm_map_wire(map, addr, addr + size,
203 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
204 
205 	return (addr);
206 }
207 
208 /*
209  *	Allocates a region from the kernel address map and physical pages
210  *	within the specified address range to the kernel object.  Creates a
211  *	wired mapping from this region to these pages, and returns the
212  *	region's starting virtual address.  The allocated pages are not
213  *	necessarily physically contiguous.  If M_ZERO is specified through the
214  *	given flags, then the pages are zeroed before they are mapped.
215  */
216 vm_offset_t
217 kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
218     vm_paddr_t high, vm_memattr_t memattr)
219 {
220 	vm_object_t object = kernel_object;
221 	vm_offset_t addr;
222 	vm_ooffset_t end_offset, offset;
223 	vm_page_t m;
224 	int pflags, tries;
225 
226 	size = round_page(size);
227 	vm_map_lock(map);
228 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
229 		vm_map_unlock(map);
230 		return (0);
231 	}
232 	offset = addr - VM_MIN_KERNEL_ADDRESS;
233 	vm_object_reference(object);
234 	vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
235 	    VM_PROT_ALL, 0);
236 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
237 	VM_OBJECT_LOCK(object);
238 	end_offset = offset + size;
239 	for (; offset < end_offset; offset += PAGE_SIZE) {
240 		tries = 0;
241 retry:
242 		m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
243 		    low, high, PAGE_SIZE, 0, memattr);
244 		if (m == NULL) {
245 			VM_OBJECT_UNLOCK(object);
246 			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
247 				vm_map_unlock(map);
248 				vm_pageout_grow_cache(tries, low, high);
249 				vm_map_lock(map);
250 				VM_OBJECT_LOCK(object);
251 				tries++;
252 				goto retry;
253 			}
254 
255 			/*
256 			 * Since the pages that were allocated by any previous
257 			 * iterations of this loop are not busy, they can be
258 			 * freed by vm_object_page_remove(), which is called
259 			 * by vm_map_delete().
260 			 */
261 			vm_map_delete(map, addr, addr + size);
262 			vm_map_unlock(map);
263 			return (0);
264 		}
265 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
266 			pmap_zero_page(m);
267 		m->valid = VM_PAGE_BITS_ALL;
268 	}
269 	VM_OBJECT_UNLOCK(object);
270 	vm_map_unlock(map);
271 	vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
272 	    VM_MAP_WIRE_NOHOLES);
273 	return (addr);
274 }
275 
276 /*
277  *	Allocates a region from the kernel address map and physically
278  *	contiguous pages within the specified address range to the kernel
279  *	object.  Creates a wired mapping from this region to these pages, and
280  *	returns the region's starting virtual address.  If M_ZERO is specified
281  *	through the given flags, then the pages are zeroed before they are
282  *	mapped.
283  */
284 vm_offset_t
285 kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
286     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
287     vm_memattr_t memattr)
288 {
289 	vm_object_t object = kernel_object;
290 	vm_offset_t addr;
291 	vm_ooffset_t offset;
292 	vm_page_t end_m, m;
293 	int pflags, tries;
294 
295 	size = round_page(size);
296 	vm_map_lock(map);
297 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
298 		vm_map_unlock(map);
299 		return (0);
300 	}
301 	offset = addr - VM_MIN_KERNEL_ADDRESS;
302 	vm_object_reference(object);
303 	vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
304 	    VM_PROT_ALL, 0);
305 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
306 	VM_OBJECT_LOCK(object);
307 	tries = 0;
308 retry:
309 	m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
310 	    atop(size), low, high, alignment, boundary, memattr);
311 	if (m == NULL) {
312 		VM_OBJECT_UNLOCK(object);
313 		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
314 			vm_map_unlock(map);
315 			vm_pageout_grow_cache(tries, low, high);
316 			vm_map_lock(map);
317 			VM_OBJECT_LOCK(object);
318 			tries++;
319 			goto retry;
320 		}
321 		vm_map_delete(map, addr, addr + size);
322 		vm_map_unlock(map);
323 		return (0);
324 	}
325 	end_m = m + atop(size);
326 	for (; m < end_m; m++) {
327 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
328 			pmap_zero_page(m);
329 		m->valid = VM_PAGE_BITS_ALL;
330 	}
331 	VM_OBJECT_UNLOCK(object);
332 	vm_map_unlock(map);
333 	vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
334 	    VM_MAP_WIRE_NOHOLES);
335 	return (addr);
336 }
337 
338 /*
339  *	kmem_free:
340  *
341  *	Release a region of kernel virtual memory allocated
342  *	with kmem_alloc, and return the physical pages
343  *	associated with that region.
344  *
345  *	This routine may not block on kernel maps.
346  */
347 void
348 kmem_free(map, addr, size)
349 	vm_map_t map;
350 	vm_offset_t addr;
351 	vm_size_t size;
352 {
353 
354 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
355 }
356 
357 /*
358  *	kmem_suballoc:
359  *
360  *	Allocates a map to manage a subrange
361  *	of the kernel virtual address space.
362  *
363  *	Arguments are as follows:
364  *
365  *	parent		Map to take range from
366  *	min, max	Returned endpoints of map
367  *	size		Size of range to find
368  *	superpage_align	Request that min is superpage aligned
369  */
370 vm_map_t
371 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
372     vm_size_t size, boolean_t superpage_align)
373 {
374 	int ret;
375 	vm_map_t result;
376 
377 	size = round_page(size);
378 
379 	*min = vm_map_min(parent);
380 	ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
381 	    VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
382 	    MAP_ACC_NO_CHARGE);
383 	if (ret != KERN_SUCCESS)
384 		panic("kmem_suballoc: bad status return of %d", ret);
385 	*max = *min + size;
386 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
387 	if (result == NULL)
388 		panic("kmem_suballoc: cannot create submap");
389 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
390 		panic("kmem_suballoc: unable to change range to submap");
391 	return (result);
392 }
393 
394 /*
395  *	kmem_malloc:
396  *
397  * 	Allocate wired-down memory in the kernel's address map for the higher
398  * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
399  * 	kmem_alloc() because we may need to allocate memory at interrupt
400  * 	level where we cannot block (canwait == FALSE).
401  *
402  * 	This routine has its own private kernel submap (kmem_map) and object
403  * 	(kmem_object).  This, combined with the fact that only malloc uses
404  * 	this routine, ensures that we will never block in map or object waits.
405  *
406  * 	We don't worry about expanding the map (adding entries) since entries
407  * 	for wired maps are statically allocated.
408  *
409  *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
410  *	which we never free.
411  */
412 vm_offset_t
413 kmem_malloc(map, size, flags)
414 	vm_map_t map;
415 	vm_size_t size;
416 	int flags;
417 {
418 	vm_offset_t addr;
419 	int i, rv;
420 
421 	size = round_page(size);
422 	addr = vm_map_min(map);
423 
424 	/*
425 	 * Locate sufficient space in the map.  This will give us the final
426 	 * virtual address for the new memory, and thus will tell us the
427 	 * offset within the kernel map.
428 	 */
429 	vm_map_lock(map);
430 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
431 		vm_map_unlock(map);
432                 if ((flags & M_NOWAIT) == 0) {
433 			for (i = 0; i < 8; i++) {
434 				EVENTHANDLER_INVOKE(vm_lowmem, 0);
435 				uma_reclaim();
436 				vm_map_lock(map);
437 				if (vm_map_findspace(map, vm_map_min(map),
438 				    size, &addr) == 0) {
439 					break;
440 				}
441 				vm_map_unlock(map);
442 				tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
443 			}
444 			if (i == 8) {
445 				panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
446 				    (long)size, (long)map->size);
447 			}
448 		} else {
449 			return (0);
450 		}
451 	}
452 
453 	rv = kmem_back(map, addr, size, flags);
454 	vm_map_unlock(map);
455 	return (rv == KERN_SUCCESS ? addr : 0);
456 }
457 
458 /*
459  *	kmem_back:
460  *
461  *	Allocate physical pages for the specified virtual address range.
462  */
463 int
464 kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
465 {
466 	vm_offset_t offset, i;
467 	vm_map_entry_t entry;
468 	vm_page_t m;
469 	int pflags;
470 	boolean_t found;
471 
472 	KASSERT(vm_map_locked(map), ("kmem_back: map %p is not locked", map));
473 	offset = addr - VM_MIN_KERNEL_ADDRESS;
474 	vm_object_reference(kmem_object);
475 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
476 	    VM_PROT_ALL, VM_PROT_ALL, 0);
477 
478 	/*
479 	 * Assert: vm_map_insert() will never be able to extend the
480 	 * previous entry so vm_map_lookup_entry() will find a new
481 	 * entry exactly corresponding to this address range and it
482 	 * will have wired_count == 0.
483 	 */
484 	found = vm_map_lookup_entry(map, addr, &entry);
485 	KASSERT(found && entry->start == addr && entry->end == addr + size &&
486 	    entry->wired_count == 0 && (entry->eflags & MAP_ENTRY_IN_TRANSITION)
487 	    == 0, ("kmem_back: entry not found or misaligned"));
488 
489 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
490 
491 	VM_OBJECT_LOCK(kmem_object);
492 	for (i = 0; i < size; i += PAGE_SIZE) {
493 retry:
494 		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
495 
496 		/*
497 		 * Ran out of space, free everything up and return. Don't need
498 		 * to lock page queues here as we know that the pages we got
499 		 * aren't on any queues.
500 		 */
501 		if (m == NULL) {
502 			if ((flags & M_NOWAIT) == 0) {
503 				VM_OBJECT_UNLOCK(kmem_object);
504 				entry->eflags |= MAP_ENTRY_IN_TRANSITION;
505 				vm_map_unlock(map);
506 				VM_WAIT;
507 				vm_map_lock(map);
508 				KASSERT(
509 (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_NEEDS_WAKEUP)) ==
510 				    MAP_ENTRY_IN_TRANSITION,
511 				    ("kmem_back: volatile entry"));
512 				entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
513 				VM_OBJECT_LOCK(kmem_object);
514 				goto retry;
515 			}
516 			/*
517 			 * Free the pages before removing the map entry.
518 			 * They are already marked busy.  Calling
519 			 * vm_map_delete before the pages has been freed or
520 			 * unbusied will cause a deadlock.
521 			 */
522 			while (i != 0) {
523 				i -= PAGE_SIZE;
524 				m = vm_page_lookup(kmem_object,
525 						   OFF_TO_IDX(offset + i));
526 				vm_page_unwire(m, 0);
527 				vm_page_free(m);
528 			}
529 			VM_OBJECT_UNLOCK(kmem_object);
530 			vm_map_delete(map, addr, addr + size);
531 			return (KERN_NO_SPACE);
532 		}
533 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
534 			pmap_zero_page(m);
535 		m->valid = VM_PAGE_BITS_ALL;
536 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
537 		    ("kmem_malloc: page %p is managed", m));
538 	}
539 	VM_OBJECT_UNLOCK(kmem_object);
540 
541 	/*
542 	 * Mark map entry as non-pageable.  Repeat the assert.
543 	 */
544 	KASSERT(entry->start == addr && entry->end == addr + size &&
545 	    entry->wired_count == 0,
546 	    ("kmem_back: entry not found or misaligned after allocation"));
547 	entry->wired_count = 1;
548 
549 	/*
550 	 * At this point, the kmem_object must be unlocked because
551 	 * vm_map_simplify_entry() calls vm_object_deallocate(), which
552 	 * locks the kmem_object.
553 	 */
554 	vm_map_simplify_entry(map, entry);
555 
556 	/*
557 	 * Loop thru pages, entering them in the pmap.
558 	 */
559 	VM_OBJECT_LOCK(kmem_object);
560 	for (i = 0; i < size; i += PAGE_SIZE) {
561 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
562 		/*
563 		 * Because this is kernel_pmap, this call will not block.
564 		 */
565 		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
566 		    TRUE);
567 		vm_page_wakeup(m);
568 	}
569 	VM_OBJECT_UNLOCK(kmem_object);
570 
571 	return (KERN_SUCCESS);
572 }
573 
574 /*
575  *	kmem_alloc_wait:
576  *
577  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
578  *	has no room, the caller sleeps waiting for more memory in the submap.
579  *
580  *	This routine may block.
581  */
582 vm_offset_t
583 kmem_alloc_wait(map, size)
584 	vm_map_t map;
585 	vm_size_t size;
586 {
587 	vm_offset_t addr;
588 
589 	size = round_page(size);
590 	if (!swap_reserve(size))
591 		return (0);
592 
593 	for (;;) {
594 		/*
595 		 * To make this work for more than one map, use the map's lock
596 		 * to lock out sleepers/wakers.
597 		 */
598 		vm_map_lock(map);
599 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
600 			break;
601 		/* no space now; see if we can ever get space */
602 		if (vm_map_max(map) - vm_map_min(map) < size) {
603 			vm_map_unlock(map);
604 			swap_release(size);
605 			return (0);
606 		}
607 		map->needs_wakeup = TRUE;
608 		vm_map_unlock_and_wait(map, 0);
609 	}
610 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
611 	    VM_PROT_ALL, MAP_ACC_CHARGED);
612 	vm_map_unlock(map);
613 	return (addr);
614 }
615 
616 /*
617  *	kmem_free_wakeup:
618  *
619  *	Returns memory to a submap of the kernel, and wakes up any processes
620  *	waiting for memory in that map.
621  */
622 void
623 kmem_free_wakeup(map, addr, size)
624 	vm_map_t map;
625 	vm_offset_t addr;
626 	vm_size_t size;
627 {
628 
629 	vm_map_lock(map);
630 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
631 	if (map->needs_wakeup) {
632 		map->needs_wakeup = FALSE;
633 		vm_map_wakeup(map);
634 	}
635 	vm_map_unlock(map);
636 }
637 
638 static void
639 kmem_init_zero_region(void)
640 {
641 	vm_offset_t addr, i;
642 	vm_page_t m;
643 	int error;
644 
645 	/*
646 	 * Map a single physical page of zeros to a larger virtual range.
647 	 * This requires less looping in places that want large amounts of
648 	 * zeros, while not using much more physical resources.
649 	 */
650 	addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
651 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
652 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
653 	if ((m->flags & PG_ZERO) == 0)
654 		pmap_zero_page(m);
655 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
656 		pmap_qenter(addr + i, &m, 1);
657 	error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
658 	    VM_PROT_READ, TRUE);
659 	KASSERT(error == 0, ("error=%d", error));
660 
661 	zero_region = (const void *)addr;
662 }
663 
664 /*
665  * 	kmem_init:
666  *
667  *	Create the kernel map; insert a mapping covering kernel text,
668  *	data, bss, and all space allocated thus far (`boostrap' data).  The
669  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
670  *	`start' as allocated, and the range between `start' and `end' as free.
671  */
672 void
673 kmem_init(start, end)
674 	vm_offset_t start, end;
675 {
676 	vm_map_t m;
677 
678 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
679 	m->system_map = 1;
680 	vm_map_lock(m);
681 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
682 	kernel_map = m;
683 	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
684 #ifdef __amd64__
685 	    KERNBASE,
686 #else
687 	    VM_MIN_KERNEL_ADDRESS,
688 #endif
689 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
690 	/* ... and ending with the completion of the above `insert' */
691 	vm_map_unlock(m);
692 
693 	kmem_init_zero_region();
694 }
695 
696 #ifdef DIAGNOSTIC
697 /*
698  * Allow userspace to directly trigger the VM drain routine for testing
699  * purposes.
700  */
701 static int
702 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
703 {
704 	int error, i;
705 
706 	i = 0;
707 	error = sysctl_handle_int(oidp, &i, 0, req);
708 	if (error)
709 		return (error);
710 	if (i)
711 		EVENTHANDLER_INVOKE(vm_lowmem, 0);
712 	return (0);
713 }
714 
715 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
716     debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
717 #endif
718