xref: /freebsd/sys/vm/vm_kern.c (revision ea825d02749f382c3f7e17f28247f20a48733eab)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>		/* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 #include <sys/rwlock.h>
76 #include <sys/sysctl.h>
77 #include <sys/vmem.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_radix.h>
88 #include <vm/vm_extern.h>
89 #include <vm/uma.h>
90 
91 vm_map_t kernel_map;
92 vm_map_t exec_map;
93 vm_map_t pipe_map;
94 
95 const void *zero_region;
96 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
97 
98 /* NB: Used by kernel debuggers. */
99 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
100 
101 u_int exec_map_entry_size;
102 u_int exec_map_entries;
103 
104 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
105     SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
106 
107 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
108 #if defined(__arm__) || defined(__sparc64__)
109     &vm_max_kernel_address, 0,
110 #else
111     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
112 #endif
113     "Max kernel address");
114 
115 /*
116  *	kva_alloc:
117  *
118  *	Allocate a virtual address range with no underlying object and
119  *	no initial mapping to physical memory.  Any mapping from this
120  *	range to physical memory must be explicitly created prior to
121  *	its use, typically with pmap_qenter().  Any attempt to create
122  *	a mapping on demand through vm_fault() will result in a panic.
123  */
124 vm_offset_t
125 kva_alloc(vm_size_t size)
126 {
127 	vm_offset_t addr;
128 
129 	size = round_page(size);
130 	if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
131 		return (0);
132 
133 	return (addr);
134 }
135 
136 /*
137  *	kva_free:
138  *
139  *	Release a region of kernel virtual memory allocated
140  *	with kva_alloc, and return the physical pages
141  *	associated with that region.
142  *
143  *	This routine may not block on kernel maps.
144  */
145 void
146 kva_free(vm_offset_t addr, vm_size_t size)
147 {
148 
149 	size = round_page(size);
150 	vmem_free(kernel_arena, addr, size);
151 }
152 
153 /*
154  *	Allocates a region from the kernel address map and physical pages
155  *	within the specified address range to the kernel object.  Creates a
156  *	wired mapping from this region to these pages, and returns the
157  *	region's starting virtual address.  The allocated pages are not
158  *	necessarily physically contiguous.  If M_ZERO is specified through the
159  *	given flags, then the pages are zeroed before they are mapped.
160  */
161 vm_offset_t
162 kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
163     vm_paddr_t high, vm_memattr_t memattr)
164 {
165 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
166 	vm_offset_t addr, i, offset;
167 	vm_page_t m;
168 	int pflags, tries;
169 
170 	size = round_page(size);
171 	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
172 		return (0);
173 	offset = addr - VM_MIN_KERNEL_ADDRESS;
174 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
175 	VM_OBJECT_WLOCK(object);
176 	for (i = 0; i < size; i += PAGE_SIZE) {
177 		tries = 0;
178 retry:
179 		m = vm_page_alloc_contig(object, atop(offset + i),
180 		    pflags, 1, low, high, PAGE_SIZE, 0, memattr);
181 		if (m == NULL) {
182 			VM_OBJECT_WUNLOCK(object);
183 			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
184 				if (!vm_page_reclaim_contig(pflags, 1,
185 				    low, high, PAGE_SIZE, 0) &&
186 				    (flags & M_WAITOK) != 0)
187 					VM_WAIT;
188 				VM_OBJECT_WLOCK(object);
189 				tries++;
190 				goto retry;
191 			}
192 			kmem_unback(object, addr, i);
193 			vmem_free(vmem, addr, size);
194 			return (0);
195 		}
196 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
197 			pmap_zero_page(m);
198 		m->valid = VM_PAGE_BITS_ALL;
199 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
200 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
201 	}
202 	VM_OBJECT_WUNLOCK(object);
203 	return (addr);
204 }
205 
206 /*
207  *	Allocates a region from the kernel address map and physically
208  *	contiguous pages within the specified address range to the kernel
209  *	object.  Creates a wired mapping from this region to these pages, and
210  *	returns the region's starting virtual address.  If M_ZERO is specified
211  *	through the given flags, then the pages are zeroed before they are
212  *	mapped.
213  */
214 vm_offset_t
215 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
216     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
217     vm_memattr_t memattr)
218 {
219 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
220 	vm_offset_t addr, offset, tmp;
221 	vm_page_t end_m, m;
222 	u_long npages;
223 	int pflags, tries;
224 
225 	size = round_page(size);
226 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
227 		return (0);
228 	offset = addr - VM_MIN_KERNEL_ADDRESS;
229 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
230 	npages = atop(size);
231 	VM_OBJECT_WLOCK(object);
232 	tries = 0;
233 retry:
234 	m = vm_page_alloc_contig(object, atop(offset), pflags,
235 	    npages, low, high, alignment, boundary, memattr);
236 	if (m == NULL) {
237 		VM_OBJECT_WUNLOCK(object);
238 		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
239 			if (!vm_page_reclaim_contig(pflags, npages, low, high,
240 			    alignment, boundary) && (flags & M_WAITOK) != 0)
241 				VM_WAIT;
242 			VM_OBJECT_WLOCK(object);
243 			tries++;
244 			goto retry;
245 		}
246 		vmem_free(vmem, addr, size);
247 		return (0);
248 	}
249 	end_m = m + npages;
250 	tmp = addr;
251 	for (; m < end_m; m++) {
252 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
253 			pmap_zero_page(m);
254 		m->valid = VM_PAGE_BITS_ALL;
255 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
256 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
257 		tmp += PAGE_SIZE;
258 	}
259 	VM_OBJECT_WUNLOCK(object);
260 	return (addr);
261 }
262 
263 /*
264  *	kmem_suballoc:
265  *
266  *	Allocates a map to manage a subrange
267  *	of the kernel virtual address space.
268  *
269  *	Arguments are as follows:
270  *
271  *	parent		Map to take range from
272  *	min, max	Returned endpoints of map
273  *	size		Size of range to find
274  *	superpage_align	Request that min is superpage aligned
275  */
276 vm_map_t
277 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
278     vm_size_t size, boolean_t superpage_align)
279 {
280 	int ret;
281 	vm_map_t result;
282 
283 	size = round_page(size);
284 
285 	*min = vm_map_min(parent);
286 	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
287 	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
288 	    MAP_ACC_NO_CHARGE);
289 	if (ret != KERN_SUCCESS)
290 		panic("kmem_suballoc: bad status return of %d", ret);
291 	*max = *min + size;
292 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
293 	if (result == NULL)
294 		panic("kmem_suballoc: cannot create submap");
295 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
296 		panic("kmem_suballoc: unable to change range to submap");
297 	return (result);
298 }
299 
300 /*
301  *	kmem_malloc:
302  *
303  *	Allocate wired-down pages in the kernel's address space.
304  */
305 vm_offset_t
306 kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
307 {
308 	vm_offset_t addr;
309 	int rv;
310 
311 	size = round_page(size);
312 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
313 		return (0);
314 
315 	rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
316 	    addr, size, flags);
317 	if (rv != KERN_SUCCESS) {
318 		vmem_free(vmem, addr, size);
319 		return (0);
320 	}
321 	return (addr);
322 }
323 
324 /*
325  *	kmem_back:
326  *
327  *	Allocate physical pages for the specified virtual address range.
328  */
329 int
330 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
331 {
332 	vm_offset_t offset, i;
333 	vm_page_t m, mpred;
334 	int pflags;
335 
336 	KASSERT(object == kmem_object || object == kernel_object,
337 	    ("kmem_back: only supports kernel objects."));
338 
339 	offset = addr - VM_MIN_KERNEL_ADDRESS;
340 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
341 
342 	i = 0;
343 retry:
344 	VM_OBJECT_WLOCK(object);
345 	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
346 	for (; i < size; i += PAGE_SIZE, mpred = m) {
347 		m = vm_page_alloc_after(object, atop(offset + i), pflags,
348 		    mpred);
349 
350 		/*
351 		 * Ran out of space, free everything up and return. Don't need
352 		 * to lock page queues here as we know that the pages we got
353 		 * aren't on any queues.
354 		 */
355 		if (m == NULL) {
356 			VM_OBJECT_WUNLOCK(object);
357 			if ((flags & M_NOWAIT) == 0) {
358 				VM_WAIT;
359 				goto retry;
360 			}
361 			kmem_unback(object, addr, i);
362 			return (KERN_NO_SPACE);
363 		}
364 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
365 			pmap_zero_page(m);
366 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
367 		    ("kmem_malloc: page %p is managed", m));
368 		m->valid = VM_PAGE_BITS_ALL;
369 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
370 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
371 	}
372 	VM_OBJECT_WUNLOCK(object);
373 
374 	return (KERN_SUCCESS);
375 }
376 
377 /*
378  *	kmem_unback:
379  *
380  *	Unmap and free the physical pages underlying the specified virtual
381  *	address range.
382  *
383  *	A physical page must exist within the specified object at each index
384  *	that is being unmapped.
385  */
386 void
387 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
388 {
389 	vm_page_t m, next;
390 	vm_offset_t end, offset;
391 
392 	KASSERT(object == kmem_object || object == kernel_object,
393 	    ("kmem_unback: only supports kernel objects."));
394 
395 	pmap_remove(kernel_pmap, addr, addr + size);
396 	offset = addr - VM_MIN_KERNEL_ADDRESS;
397 	end = offset + size;
398 	VM_OBJECT_WLOCK(object);
399 	for (m = vm_page_lookup(object, atop(offset)); offset < end;
400 	    offset += PAGE_SIZE, m = next) {
401 		next = vm_page_next(m);
402 		vm_page_unwire(m, PQ_NONE);
403 		vm_page_free(m);
404 	}
405 	VM_OBJECT_WUNLOCK(object);
406 }
407 
408 /*
409  *	kmem_free:
410  *
411  *	Free memory allocated with kmem_malloc.  The size must match the
412  *	original allocation.
413  */
414 void
415 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
416 {
417 
418 	size = round_page(size);
419 	kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
420 	    addr, size);
421 	vmem_free(vmem, addr, size);
422 }
423 
424 /*
425  *	kmap_alloc_wait:
426  *
427  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
428  *	has no room, the caller sleeps waiting for more memory in the submap.
429  *
430  *	This routine may block.
431  */
432 vm_offset_t
433 kmap_alloc_wait(vm_map_t map, vm_size_t size)
434 {
435 	vm_offset_t addr;
436 
437 	size = round_page(size);
438 	if (!swap_reserve(size))
439 		return (0);
440 
441 	for (;;) {
442 		/*
443 		 * To make this work for more than one map, use the map's lock
444 		 * to lock out sleepers/wakers.
445 		 */
446 		vm_map_lock(map);
447 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
448 			break;
449 		/* no space now; see if we can ever get space */
450 		if (vm_map_max(map) - vm_map_min(map) < size) {
451 			vm_map_unlock(map);
452 			swap_release(size);
453 			return (0);
454 		}
455 		map->needs_wakeup = TRUE;
456 		vm_map_unlock_and_wait(map, 0);
457 	}
458 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
459 	    VM_PROT_ALL, MAP_ACC_CHARGED);
460 	vm_map_unlock(map);
461 	return (addr);
462 }
463 
464 /*
465  *	kmap_free_wakeup:
466  *
467  *	Returns memory to a submap of the kernel, and wakes up any processes
468  *	waiting for memory in that map.
469  */
470 void
471 kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
472 {
473 
474 	vm_map_lock(map);
475 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
476 	if (map->needs_wakeup) {
477 		map->needs_wakeup = FALSE;
478 		vm_map_wakeup(map);
479 	}
480 	vm_map_unlock(map);
481 }
482 
483 void
484 kmem_init_zero_region(void)
485 {
486 	vm_offset_t addr, i;
487 	vm_page_t m;
488 
489 	/*
490 	 * Map a single physical page of zeros to a larger virtual range.
491 	 * This requires less looping in places that want large amounts of
492 	 * zeros, while not using much more physical resources.
493 	 */
494 	addr = kva_alloc(ZERO_REGION_SIZE);
495 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
496 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
497 	if ((m->flags & PG_ZERO) == 0)
498 		pmap_zero_page(m);
499 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
500 		pmap_qenter(addr + i, &m, 1);
501 	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
502 
503 	zero_region = (const void *)addr;
504 }
505 
506 /*
507  * 	kmem_init:
508  *
509  *	Create the kernel map; insert a mapping covering kernel text,
510  *	data, bss, and all space allocated thus far (`boostrap' data).  The
511  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
512  *	`start' as allocated, and the range between `start' and `end' as free.
513  */
514 void
515 kmem_init(vm_offset_t start, vm_offset_t end)
516 {
517 	vm_map_t m;
518 
519 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
520 	m->system_map = 1;
521 	vm_map_lock(m);
522 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
523 	kernel_map = m;
524 	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
525 #ifdef __amd64__
526 	    KERNBASE,
527 #else
528 	    VM_MIN_KERNEL_ADDRESS,
529 #endif
530 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
531 	/* ... and ending with the completion of the above `insert' */
532 	vm_map_unlock(m);
533 }
534 
535 #ifdef DIAGNOSTIC
536 /*
537  * Allow userspace to directly trigger the VM drain routine for testing
538  * purposes.
539  */
540 static int
541 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
542 {
543 	int error, i;
544 
545 	i = 0;
546 	error = sysctl_handle_int(oidp, &i, 0, req);
547 	if (error)
548 		return (error);
549 	if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0)
550 		return (EINVAL);
551 	if (i != 0)
552 		EVENTHANDLER_INVOKE(vm_lowmem, i);
553 	return (0);
554 }
555 
556 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
557     debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags");
558 #endif
559