xref: /freebsd/sys/vm/vm_kern.c (revision 669f9224ec5398fbc825dd031415126af032cf42)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>		/* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 #include <sys/rwlock.h>
76 #include <sys/sysctl.h>
77 #include <sys/vmem.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89 
90 vm_map_t kernel_map;
91 vm_map_t exec_map;
92 vm_map_t pipe_map;
93 
94 const void *zero_region;
95 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
96 
97 /* NB: Used by kernel debuggers. */
98 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
99 
100 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
101     SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
102 
103 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
104 #if defined(__arm__) || defined(__sparc64__)
105     &vm_max_kernel_address, 0,
106 #else
107     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
108 #endif
109     "Max kernel address");
110 
111 /*
112  *	kva_alloc:
113  *
114  *	Allocate a virtual address range with no underlying object and
115  *	no initial mapping to physical memory.  Any mapping from this
116  *	range to physical memory must be explicitly created prior to
117  *	its use, typically with pmap_qenter().  Any attempt to create
118  *	a mapping on demand through vm_fault() will result in a panic.
119  */
120 vm_offset_t
121 kva_alloc(size)
122 	vm_size_t size;
123 {
124 	vm_offset_t addr;
125 
126 	size = round_page(size);
127 	if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
128 		return (0);
129 
130 	return (addr);
131 }
132 
133 /*
134  *	kva_free:
135  *
136  *	Release a region of kernel virtual memory allocated
137  *	with kva_alloc, and return the physical pages
138  *	associated with that region.
139  *
140  *	This routine may not block on kernel maps.
141  */
142 void
143 kva_free(addr, size)
144 	vm_offset_t addr;
145 	vm_size_t size;
146 {
147 
148 	size = round_page(size);
149 	vmem_free(kernel_arena, addr, size);
150 }
151 
152 /*
153  *	Allocates a region from the kernel address map and physical pages
154  *	within the specified address range to the kernel object.  Creates a
155  *	wired mapping from this region to these pages, and returns the
156  *	region's starting virtual address.  The allocated pages are not
157  *	necessarily physically contiguous.  If M_ZERO is specified through the
158  *	given flags, then the pages are zeroed before they are mapped.
159  */
160 vm_offset_t
161 kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
162     vm_paddr_t high, vm_memattr_t memattr)
163 {
164 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
165 	vm_offset_t addr, i;
166 	vm_ooffset_t offset;
167 	vm_page_t m;
168 	int pflags, tries;
169 
170 	size = round_page(size);
171 	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
172 		return (0);
173 	offset = addr - VM_MIN_KERNEL_ADDRESS;
174 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
175 	VM_OBJECT_WLOCK(object);
176 	for (i = 0; i < size; i += PAGE_SIZE) {
177 		tries = 0;
178 retry:
179 		m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
180 		    pflags, 1, low, high, PAGE_SIZE, 0, memattr);
181 		if (m == NULL) {
182 			VM_OBJECT_WUNLOCK(object);
183 			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
184 				vm_pageout_grow_cache(tries, low, high);
185 				VM_OBJECT_WLOCK(object);
186 				tries++;
187 				goto retry;
188 			}
189 			kmem_unback(object, addr, i);
190 			vmem_free(vmem, addr, size);
191 			return (0);
192 		}
193 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
194 			pmap_zero_page(m);
195 		m->valid = VM_PAGE_BITS_ALL;
196 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
197 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
198 	}
199 	VM_OBJECT_WUNLOCK(object);
200 	return (addr);
201 }
202 
203 /*
204  *	Allocates a region from the kernel address map and physically
205  *	contiguous pages within the specified address range to the kernel
206  *	object.  Creates a wired mapping from this region to these pages, and
207  *	returns the region's starting virtual address.  If M_ZERO is specified
208  *	through the given flags, then the pages are zeroed before they are
209  *	mapped.
210  */
211 vm_offset_t
212 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
213     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
214     vm_memattr_t memattr)
215 {
216 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
217 	vm_offset_t addr, tmp;
218 	vm_ooffset_t offset;
219 	vm_page_t end_m, m;
220 	int pflags, tries;
221 
222 	size = round_page(size);
223 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
224 		return (0);
225 	offset = addr - VM_MIN_KERNEL_ADDRESS;
226 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
227 	VM_OBJECT_WLOCK(object);
228 	tries = 0;
229 retry:
230 	m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
231 	    atop(size), low, high, alignment, boundary, memattr);
232 	if (m == NULL) {
233 		VM_OBJECT_WUNLOCK(object);
234 		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
235 			vm_pageout_grow_cache(tries, low, high);
236 			VM_OBJECT_WLOCK(object);
237 			tries++;
238 			goto retry;
239 		}
240 		vmem_free(vmem, addr, size);
241 		return (0);
242 	}
243 	end_m = m + atop(size);
244 	tmp = addr;
245 	for (; m < end_m; m++) {
246 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
247 			pmap_zero_page(m);
248 		m->valid = VM_PAGE_BITS_ALL;
249 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
250 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
251 		tmp += PAGE_SIZE;
252 	}
253 	VM_OBJECT_WUNLOCK(object);
254 	return (addr);
255 }
256 
257 /*
258  *	kmem_suballoc:
259  *
260  *	Allocates a map to manage a subrange
261  *	of the kernel virtual address space.
262  *
263  *	Arguments are as follows:
264  *
265  *	parent		Map to take range from
266  *	min, max	Returned endpoints of map
267  *	size		Size of range to find
268  *	superpage_align	Request that min is superpage aligned
269  */
270 vm_map_t
271 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
272     vm_size_t size, boolean_t superpage_align)
273 {
274 	int ret;
275 	vm_map_t result;
276 
277 	size = round_page(size);
278 
279 	*min = vm_map_min(parent);
280 	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
281 	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
282 	    MAP_ACC_NO_CHARGE);
283 	if (ret != KERN_SUCCESS)
284 		panic("kmem_suballoc: bad status return of %d", ret);
285 	*max = *min + size;
286 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
287 	if (result == NULL)
288 		panic("kmem_suballoc: cannot create submap");
289 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
290 		panic("kmem_suballoc: unable to change range to submap");
291 	return (result);
292 }
293 
294 /*
295  *	kmem_malloc:
296  *
297  *	Allocate wired-down pages in the kernel's address space.
298  */
299 vm_offset_t
300 kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
301 {
302 	vm_offset_t addr;
303 	int rv;
304 
305 	size = round_page(size);
306 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
307 		return (0);
308 
309 	rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
310 	    addr, size, flags);
311 	if (rv != KERN_SUCCESS) {
312 		vmem_free(vmem, addr, size);
313 		return (0);
314 	}
315 	return (addr);
316 }
317 
318 /*
319  *	kmem_back:
320  *
321  *	Allocate physical pages for the specified virtual address range.
322  */
323 int
324 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
325 {
326 	vm_offset_t offset, i;
327 	vm_page_t m;
328 	int pflags;
329 
330 	KASSERT(object == kmem_object || object == kernel_object,
331 	    ("kmem_back: only supports kernel objects."));
332 
333 	offset = addr - VM_MIN_KERNEL_ADDRESS;
334 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
335 
336 	VM_OBJECT_WLOCK(object);
337 	for (i = 0; i < size; i += PAGE_SIZE) {
338 retry:
339 		m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
340 
341 		/*
342 		 * Ran out of space, free everything up and return. Don't need
343 		 * to lock page queues here as we know that the pages we got
344 		 * aren't on any queues.
345 		 */
346 		if (m == NULL) {
347 			VM_OBJECT_WUNLOCK(object);
348 			if ((flags & M_NOWAIT) == 0) {
349 				VM_WAIT;
350 				VM_OBJECT_WLOCK(object);
351 				goto retry;
352 			}
353 			kmem_unback(object, addr, i);
354 			return (KERN_NO_SPACE);
355 		}
356 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
357 			pmap_zero_page(m);
358 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
359 		    ("kmem_malloc: page %p is managed", m));
360 		m->valid = VM_PAGE_BITS_ALL;
361 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
362 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
363 	}
364 	VM_OBJECT_WUNLOCK(object);
365 
366 	return (KERN_SUCCESS);
367 }
368 
369 /*
370  *	kmem_unback:
371  *
372  *	Unmap and free the physical pages underlying the specified virtual
373  *	address range.
374  *
375  *	A physical page must exist within the specified object at each index
376  *	that is being unmapped.
377  */
378 void
379 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
380 {
381 	vm_page_t m;
382 	vm_offset_t i, offset;
383 
384 	KASSERT(object == kmem_object || object == kernel_object,
385 	    ("kmem_unback: only supports kernel objects."));
386 
387 	pmap_remove(kernel_pmap, addr, addr + size);
388 	offset = addr - VM_MIN_KERNEL_ADDRESS;
389 	VM_OBJECT_WLOCK(object);
390 	for (i = 0; i < size; i += PAGE_SIZE) {
391 		m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
392 		vm_page_unwire(m, PQ_NONE);
393 		vm_page_free(m);
394 	}
395 	VM_OBJECT_WUNLOCK(object);
396 }
397 
398 /*
399  *	kmem_free:
400  *
401  *	Free memory allocated with kmem_malloc.  The size must match the
402  *	original allocation.
403  */
404 void
405 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
406 {
407 
408 	size = round_page(size);
409 	kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
410 	    addr, size);
411 	vmem_free(vmem, addr, size);
412 }
413 
414 /*
415  *	kmap_alloc_wait:
416  *
417  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
418  *	has no room, the caller sleeps waiting for more memory in the submap.
419  *
420  *	This routine may block.
421  */
422 vm_offset_t
423 kmap_alloc_wait(map, size)
424 	vm_map_t map;
425 	vm_size_t size;
426 {
427 	vm_offset_t addr;
428 
429 	size = round_page(size);
430 	if (!swap_reserve(size))
431 		return (0);
432 
433 	for (;;) {
434 		/*
435 		 * To make this work for more than one map, use the map's lock
436 		 * to lock out sleepers/wakers.
437 		 */
438 		vm_map_lock(map);
439 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
440 			break;
441 		/* no space now; see if we can ever get space */
442 		if (vm_map_max(map) - vm_map_min(map) < size) {
443 			vm_map_unlock(map);
444 			swap_release(size);
445 			return (0);
446 		}
447 		map->needs_wakeup = TRUE;
448 		vm_map_unlock_and_wait(map, 0);
449 	}
450 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
451 	    VM_PROT_ALL, MAP_ACC_CHARGED);
452 	vm_map_unlock(map);
453 	return (addr);
454 }
455 
456 /*
457  *	kmap_free_wakeup:
458  *
459  *	Returns memory to a submap of the kernel, and wakes up any processes
460  *	waiting for memory in that map.
461  */
462 void
463 kmap_free_wakeup(map, addr, size)
464 	vm_map_t map;
465 	vm_offset_t addr;
466 	vm_size_t size;
467 {
468 
469 	vm_map_lock(map);
470 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
471 	if (map->needs_wakeup) {
472 		map->needs_wakeup = FALSE;
473 		vm_map_wakeup(map);
474 	}
475 	vm_map_unlock(map);
476 }
477 
478 void
479 kmem_init_zero_region(void)
480 {
481 	vm_offset_t addr, i;
482 	vm_page_t m;
483 
484 	/*
485 	 * Map a single physical page of zeros to a larger virtual range.
486 	 * This requires less looping in places that want large amounts of
487 	 * zeros, while not using much more physical resources.
488 	 */
489 	addr = kva_alloc(ZERO_REGION_SIZE);
490 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
491 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
492 	if ((m->flags & PG_ZERO) == 0)
493 		pmap_zero_page(m);
494 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
495 		pmap_qenter(addr + i, &m, 1);
496 	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
497 
498 	zero_region = (const void *)addr;
499 }
500 
501 /*
502  * 	kmem_init:
503  *
504  *	Create the kernel map; insert a mapping covering kernel text,
505  *	data, bss, and all space allocated thus far (`boostrap' data).  The
506  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
507  *	`start' as allocated, and the range between `start' and `end' as free.
508  */
509 void
510 kmem_init(start, end)
511 	vm_offset_t start, end;
512 {
513 	vm_map_t m;
514 
515 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
516 	m->system_map = 1;
517 	vm_map_lock(m);
518 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
519 	kernel_map = m;
520 	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
521 #ifdef __amd64__
522 	    KERNBASE,
523 #else
524 	    VM_MIN_KERNEL_ADDRESS,
525 #endif
526 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
527 	/* ... and ending with the completion of the above `insert' */
528 	vm_map_unlock(m);
529 }
530 
531 #ifdef DIAGNOSTIC
532 /*
533  * Allow userspace to directly trigger the VM drain routine for testing
534  * purposes.
535  */
536 static int
537 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
538 {
539 	int error, i;
540 
541 	i = 0;
542 	error = sysctl_handle_int(oidp, &i, 0, req);
543 	if (error)
544 		return (error);
545 	if (i)
546 		EVENTHANDLER_INVOKE(vm_lowmem, 0);
547 	return (0);
548 }
549 
550 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
551     debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
552 #endif
553