xref: /freebsd/sys/vm/vm_kern.c (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>		/* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 #include <sys/rwlock.h>
76 #include <sys/sysctl.h>
77 #include <sys/vmem.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89 
90 vm_map_t kernel_map;
91 vm_map_t exec_map;
92 vm_map_t pipe_map;
93 
94 const void *zero_region;
95 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
96 
97 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
98     SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
99 
100 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
101 #if defined(__arm__) || defined(__sparc64__)
102     &vm_max_kernel_address, 0,
103 #else
104     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
105 #endif
106     "Max kernel address");
107 
108 /*
109  *	kva_alloc:
110  *
111  *	Allocate a virtual address range with no underlying object and
112  *	no initial mapping to physical memory.  Any mapping from this
113  *	range to physical memory must be explicitly created prior to
114  *	its use, typically with pmap_qenter().  Any attempt to create
115  *	a mapping on demand through vm_fault() will result in a panic.
116  */
117 vm_offset_t
118 kva_alloc(size)
119 	vm_size_t size;
120 {
121 	vm_offset_t addr;
122 
123 	size = round_page(size);
124 	if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
125 		return (0);
126 
127 	return (addr);
128 }
129 
130 /*
131  *	kva_free:
132  *
133  *	Release a region of kernel virtual memory allocated
134  *	with kva_alloc, and return the physical pages
135  *	associated with that region.
136  *
137  *	This routine may not block on kernel maps.
138  */
139 void
140 kva_free(addr, size)
141 	vm_offset_t addr;
142 	vm_size_t size;
143 {
144 
145 	size = round_page(size);
146 	vmem_free(kernel_arena, addr, size);
147 }
148 
149 /*
150  *	Allocates a region from the kernel address map and physical pages
151  *	within the specified address range to the kernel object.  Creates a
152  *	wired mapping from this region to these pages, and returns the
153  *	region's starting virtual address.  The allocated pages are not
154  *	necessarily physically contiguous.  If M_ZERO is specified through the
155  *	given flags, then the pages are zeroed before they are mapped.
156  */
157 vm_offset_t
158 kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
159     vm_paddr_t high, vm_memattr_t memattr)
160 {
161 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
162 	vm_offset_t addr;
163 	vm_ooffset_t offset;
164 	vm_page_t m;
165 	int pflags, tries;
166 	int i;
167 
168 	size = round_page(size);
169 	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
170 		return (0);
171 	offset = addr - VM_MIN_KERNEL_ADDRESS;
172 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
173 	VM_OBJECT_WLOCK(object);
174 	for (i = 0; i < size; i += PAGE_SIZE) {
175 		tries = 0;
176 retry:
177 		m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
178 		    pflags, 1, low, high, PAGE_SIZE, 0, memattr);
179 		if (m == NULL) {
180 			VM_OBJECT_WUNLOCK(object);
181 			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
182 				vm_pageout_grow_cache(tries, low, high);
183 				VM_OBJECT_WLOCK(object);
184 				tries++;
185 				goto retry;
186 			}
187 			/*
188 			 * Unmap and free the pages.
189 			 */
190 			if (i != 0)
191 				pmap_remove(kernel_pmap, addr, addr + i);
192 			while (i != 0) {
193 				i -= PAGE_SIZE;
194 				m = vm_page_lookup(object,
195 				    OFF_TO_IDX(offset + i));
196 				vm_page_unwire(m, PQ_INACTIVE);
197 				vm_page_free(m);
198 			}
199 			vmem_free(vmem, addr, size);
200 			return (0);
201 		}
202 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
203 			pmap_zero_page(m);
204 		m->valid = VM_PAGE_BITS_ALL;
205 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
206 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
207 	}
208 	VM_OBJECT_WUNLOCK(object);
209 	return (addr);
210 }
211 
212 /*
213  *	Allocates a region from the kernel address map and physically
214  *	contiguous pages within the specified address range to the kernel
215  *	object.  Creates a wired mapping from this region to these pages, and
216  *	returns the region's starting virtual address.  If M_ZERO is specified
217  *	through the given flags, then the pages are zeroed before they are
218  *	mapped.
219  */
220 vm_offset_t
221 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
222     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
223     vm_memattr_t memattr)
224 {
225 	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
226 	vm_offset_t addr, tmp;
227 	vm_ooffset_t offset;
228 	vm_page_t end_m, m;
229 	int pflags, tries;
230 
231 	size = round_page(size);
232 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
233 		return (0);
234 	offset = addr - VM_MIN_KERNEL_ADDRESS;
235 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
236 	VM_OBJECT_WLOCK(object);
237 	tries = 0;
238 retry:
239 	m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
240 	    atop(size), low, high, alignment, boundary, memattr);
241 	if (m == NULL) {
242 		VM_OBJECT_WUNLOCK(object);
243 		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
244 			vm_pageout_grow_cache(tries, low, high);
245 			VM_OBJECT_WLOCK(object);
246 			tries++;
247 			goto retry;
248 		}
249 		vmem_free(vmem, addr, size);
250 		return (0);
251 	}
252 	end_m = m + atop(size);
253 	tmp = addr;
254 	for (; m < end_m; m++) {
255 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
256 			pmap_zero_page(m);
257 		m->valid = VM_PAGE_BITS_ALL;
258 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
259 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
260 		tmp += PAGE_SIZE;
261 	}
262 	VM_OBJECT_WUNLOCK(object);
263 	return (addr);
264 }
265 
266 /*
267  *	kmem_suballoc:
268  *
269  *	Allocates a map to manage a subrange
270  *	of the kernel virtual address space.
271  *
272  *	Arguments are as follows:
273  *
274  *	parent		Map to take range from
275  *	min, max	Returned endpoints of map
276  *	size		Size of range to find
277  *	superpage_align	Request that min is superpage aligned
278  */
279 vm_map_t
280 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
281     vm_size_t size, boolean_t superpage_align)
282 {
283 	int ret;
284 	vm_map_t result;
285 
286 	size = round_page(size);
287 
288 	*min = vm_map_min(parent);
289 	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
290 	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
291 	    MAP_ACC_NO_CHARGE);
292 	if (ret != KERN_SUCCESS)
293 		panic("kmem_suballoc: bad status return of %d", ret);
294 	*max = *min + size;
295 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
296 	if (result == NULL)
297 		panic("kmem_suballoc: cannot create submap");
298 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
299 		panic("kmem_suballoc: unable to change range to submap");
300 	return (result);
301 }
302 
303 /*
304  *	kmem_malloc:
305  *
306  *	Allocate wired-down pages in the kernel's address space.
307  */
308 vm_offset_t
309 kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
310 {
311 	vm_offset_t addr;
312 	int rv;
313 
314 	size = round_page(size);
315 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
316 		return (0);
317 
318 	rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
319 	    addr, size, flags);
320 	if (rv != KERN_SUCCESS) {
321 		vmem_free(vmem, addr, size);
322 		return (0);
323 	}
324 	return (addr);
325 }
326 
327 /*
328  *	kmem_back:
329  *
330  *	Allocate physical pages for the specified virtual address range.
331  */
332 int
333 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
334 {
335 	vm_offset_t offset, i;
336 	vm_page_t m;
337 	int pflags;
338 
339 	KASSERT(object == kmem_object || object == kernel_object,
340 	    ("kmem_back: only supports kernel objects."));
341 
342 	offset = addr - VM_MIN_KERNEL_ADDRESS;
343 	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
344 
345 	VM_OBJECT_WLOCK(object);
346 	for (i = 0; i < size; i += PAGE_SIZE) {
347 retry:
348 		m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
349 
350 		/*
351 		 * Ran out of space, free everything up and return. Don't need
352 		 * to lock page queues here as we know that the pages we got
353 		 * aren't on any queues.
354 		 */
355 		if (m == NULL) {
356 			if ((flags & M_NOWAIT) == 0) {
357 				VM_OBJECT_WUNLOCK(object);
358 				VM_WAIT;
359 				VM_OBJECT_WLOCK(object);
360 				goto retry;
361 			}
362 			/*
363 			 * Unmap and free the pages.
364 			 */
365 			if (i != 0)
366 				pmap_remove(kernel_pmap, addr, addr + i);
367 			while (i != 0) {
368 				i -= PAGE_SIZE;
369 				m = vm_page_lookup(object,
370 						   OFF_TO_IDX(offset + i));
371 				vm_page_unwire(m, PQ_INACTIVE);
372 				vm_page_free(m);
373 			}
374 			VM_OBJECT_WUNLOCK(object);
375 			return (KERN_NO_SPACE);
376 		}
377 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
378 			pmap_zero_page(m);
379 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
380 		    ("kmem_malloc: page %p is managed", m));
381 		m->valid = VM_PAGE_BITS_ALL;
382 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
383 		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
384 	}
385 	VM_OBJECT_WUNLOCK(object);
386 
387 	return (KERN_SUCCESS);
388 }
389 
390 void
391 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
392 {
393 	vm_page_t m;
394 	vm_offset_t offset;
395 	int i;
396 
397 	KASSERT(object == kmem_object || object == kernel_object,
398 	    ("kmem_unback: only supports kernel objects."));
399 
400 	pmap_remove(kernel_pmap, addr, addr + size);
401 	offset = addr - VM_MIN_KERNEL_ADDRESS;
402 	VM_OBJECT_WLOCK(object);
403 	for (i = 0; i < size; i += PAGE_SIZE) {
404 		m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
405 		vm_page_unwire(m, PQ_INACTIVE);
406 		vm_page_free(m);
407 	}
408 	VM_OBJECT_WUNLOCK(object);
409 }
410 
411 /*
412  *	kmem_free:
413  *
414  *	Free memory allocated with kmem_malloc.  The size must match the
415  *	original allocation.
416  */
417 void
418 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
419 {
420 
421 	size = round_page(size);
422 	kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
423 	    addr, size);
424 	vmem_free(vmem, addr, size);
425 }
426 
427 /*
428  *	kmap_alloc_wait:
429  *
430  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
431  *	has no room, the caller sleeps waiting for more memory in the submap.
432  *
433  *	This routine may block.
434  */
435 vm_offset_t
436 kmap_alloc_wait(map, size)
437 	vm_map_t map;
438 	vm_size_t size;
439 {
440 	vm_offset_t addr;
441 
442 	size = round_page(size);
443 	if (!swap_reserve(size))
444 		return (0);
445 
446 	for (;;) {
447 		/*
448 		 * To make this work for more than one map, use the map's lock
449 		 * to lock out sleepers/wakers.
450 		 */
451 		vm_map_lock(map);
452 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
453 			break;
454 		/* no space now; see if we can ever get space */
455 		if (vm_map_max(map) - vm_map_min(map) < size) {
456 			vm_map_unlock(map);
457 			swap_release(size);
458 			return (0);
459 		}
460 		map->needs_wakeup = TRUE;
461 		vm_map_unlock_and_wait(map, 0);
462 	}
463 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
464 	    VM_PROT_ALL, MAP_ACC_CHARGED);
465 	vm_map_unlock(map);
466 	return (addr);
467 }
468 
469 /*
470  *	kmap_free_wakeup:
471  *
472  *	Returns memory to a submap of the kernel, and wakes up any processes
473  *	waiting for memory in that map.
474  */
475 void
476 kmap_free_wakeup(map, addr, size)
477 	vm_map_t map;
478 	vm_offset_t addr;
479 	vm_size_t size;
480 {
481 
482 	vm_map_lock(map);
483 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
484 	if (map->needs_wakeup) {
485 		map->needs_wakeup = FALSE;
486 		vm_map_wakeup(map);
487 	}
488 	vm_map_unlock(map);
489 }
490 
491 void
492 kmem_init_zero_region(void)
493 {
494 	vm_offset_t addr, i;
495 	vm_page_t m;
496 
497 	/*
498 	 * Map a single physical page of zeros to a larger virtual range.
499 	 * This requires less looping in places that want large amounts of
500 	 * zeros, while not using much more physical resources.
501 	 */
502 	addr = kva_alloc(ZERO_REGION_SIZE);
503 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
504 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
505 	if ((m->flags & PG_ZERO) == 0)
506 		pmap_zero_page(m);
507 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
508 		pmap_qenter(addr + i, &m, 1);
509 	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
510 
511 	zero_region = (const void *)addr;
512 }
513 
514 /*
515  * 	kmem_init:
516  *
517  *	Create the kernel map; insert a mapping covering kernel text,
518  *	data, bss, and all space allocated thus far (`boostrap' data).  The
519  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
520  *	`start' as allocated, and the range between `start' and `end' as free.
521  */
522 void
523 kmem_init(start, end)
524 	vm_offset_t start, end;
525 {
526 	vm_map_t m;
527 
528 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
529 	m->system_map = 1;
530 	vm_map_lock(m);
531 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
532 	kernel_map = m;
533 	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
534 #ifdef __amd64__
535 	    KERNBASE,
536 #else
537 	    VM_MIN_KERNEL_ADDRESS,
538 #endif
539 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
540 	/* ... and ending with the completion of the above `insert' */
541 	vm_map_unlock(m);
542 }
543 
544 #ifdef DIAGNOSTIC
545 /*
546  * Allow userspace to directly trigger the VM drain routine for testing
547  * purposes.
548  */
549 static int
550 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
551 {
552 	int error, i;
553 
554 	i = 0;
555 	error = sysctl_handle_int(oidp, &i, 0, req);
556 	if (error)
557 		return (error);
558 	if (i)
559 		EVENTHANDLER_INVOKE(vm_lowmem, 0);
560 	return (0);
561 }
562 
563 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
564     debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
565 #endif
566