xref: /freebsd/sys/vm/vm_kern.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_kern.c,v 1.54 1999/03/16 07:39:07 alc Exp $
65  */
66 
67 /*
68  *	Kernel memory management.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_prot.h>
79 #include <sys/lock.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 
87 vm_map_t kernel_map=0;
88 vm_map_t kmem_map=0;
89 vm_map_t exec_map=0;
90 vm_map_t clean_map=0;
91 vm_map_t u_map=0;
92 vm_map_t buffer_map=0;
93 vm_map_t mb_map=0;
94 int mb_map_full=0;
95 vm_map_t io_map=0;
96 vm_map_t phys_map=0;
97 
98 /*
99  *	kmem_alloc_pageable:
100  *
101  *	Allocate pageable memory to the kernel's address map.
102  *	"map" must be kernel_map or a submap of kernel_map.
103  */
104 
105 vm_offset_t
106 kmem_alloc_pageable(map, size)
107 	vm_map_t map;
108 	register vm_size_t size;
109 {
110 	vm_offset_t addr;
111 	register int result;
112 
113 	size = round_page(size);
114 	addr = vm_map_min(map);
115 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
116 	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
117 	if (result != KERN_SUCCESS) {
118 		return (0);
119 	}
120 	return (addr);
121 }
122 
123 /*
124  *	kmem_alloc_nofault:
125  *
126  *	Same as kmem_alloc_pageable, except that it create a nofault entry.
127  */
128 
129 vm_offset_t
130 kmem_alloc_nofault(map, size)
131 	vm_map_t map;
132 	register vm_size_t size;
133 {
134 	vm_offset_t addr;
135 	register int result;
136 
137 	size = round_page(size);
138 	addr = vm_map_min(map);
139 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
140 	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
141 	if (result != KERN_SUCCESS) {
142 		return (0);
143 	}
144 	return (addr);
145 }
146 
147 /*
148  *	Allocate wired-down memory in the kernel's address map
149  *	or a submap.
150  */
151 vm_offset_t
152 kmem_alloc(map, size)
153 	register vm_map_t map;
154 	register vm_size_t size;
155 {
156 	vm_offset_t addr;
157 	register vm_offset_t offset;
158 	vm_offset_t i;
159 
160 	size = round_page(size);
161 
162 	/*
163 	 * Use the kernel object for wired-down kernel pages. Assume that no
164 	 * region of the kernel object is referenced more than once.
165 	 */
166 
167 	/*
168 	 * Locate sufficient space in the map.  This will give us the final
169 	 * virtual address for the new memory, and thus will tell us the
170 	 * offset within the kernel map.
171 	 */
172 	vm_map_lock(map);
173 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
174 		vm_map_unlock(map);
175 		return (0);
176 	}
177 	offset = addr - VM_MIN_KERNEL_ADDRESS;
178 	vm_object_reference(kernel_object);
179 	vm_map_insert(map, kernel_object, offset, addr, addr + size,
180 		VM_PROT_ALL, VM_PROT_ALL, 0);
181 	vm_map_unlock(map);
182 
183 	/*
184 	 * Guarantee that there are pages already in this object before
185 	 * calling vm_map_pageable.  This is to prevent the following
186 	 * scenario:
187 	 *
188 	 * 1) Threads have swapped out, so that there is a pager for the
189 	 * kernel_object. 2) The kmsg zone is empty, and so we are
190 	 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
191 	 * there is no page, but there is a pager, so we call
192 	 * pager_data_request.  But the kmsg zone is empty, so we must
193 	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
194 	 * we get the data back from the pager, it will be (very stale)
195 	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
196 	 *
197 	 * We're intentionally not activating the pages we allocate to prevent a
198 	 * race with page-out.  vm_map_pageable will wire the pages.
199 	 */
200 
201 	for (i = 0; i < size; i += PAGE_SIZE) {
202 		vm_page_t mem;
203 
204 		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
205 				VM_ALLOC_ZERO | VM_ALLOC_RETRY);
206 		if ((mem->flags & PG_ZERO) == 0)
207 			vm_page_zero_fill(mem);
208 		mem->valid = VM_PAGE_BITS_ALL;
209 		vm_page_flag_clear(mem, PG_ZERO);
210 		vm_page_wakeup(mem);
211 	}
212 
213 	/*
214 	 * And finally, mark the data as non-pageable.
215 	 */
216 
217 	(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
218 
219 	return (addr);
220 }
221 
222 /*
223  *	kmem_free:
224  *
225  *	Release a region of kernel virtual memory allocated
226  *	with kmem_alloc, and return the physical pages
227  *	associated with that region.
228  *
229  *	This routine may not block on kernel maps.
230  */
231 void
232 kmem_free(map, addr, size)
233 	vm_map_t map;
234 	register vm_offset_t addr;
235 	vm_size_t size;
236 {
237 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
238 }
239 
240 /*
241  *	kmem_suballoc:
242  *
243  *	Allocates a map to manage a subrange
244  *	of the kernel virtual address space.
245  *
246  *	Arguments are as follows:
247  *
248  *	parent		Map to take range from
249  *	size		Size of range to find
250  *	min, max	Returned endpoints of map
251  *	pageable	Can the region be paged
252  */
253 vm_map_t
254 kmem_suballoc(parent, min, max, size)
255 	register vm_map_t parent;
256 	vm_offset_t *min, *max;
257 	register vm_size_t size;
258 {
259 	register int ret;
260 	vm_map_t result;
261 
262 	size = round_page(size);
263 
264 	*min = (vm_offset_t) vm_map_min(parent);
265 	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
266 	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
267 	if (ret != KERN_SUCCESS) {
268 		printf("kmem_suballoc: bad status return of %d.\n", ret);
269 		panic("kmem_suballoc");
270 	}
271 	*max = *min + size;
272 	pmap_reference(vm_map_pmap(parent));
273 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
274 	if (result == NULL)
275 		panic("kmem_suballoc: cannot create submap");
276 	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
277 		panic("kmem_suballoc: unable to change range to submap");
278 	return (result);
279 }
280 
281 /*
282  *	kmem_malloc:
283  *
284  * 	Allocate wired-down memory in the kernel's address map for the higher
285  * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
286  * 	kmem_alloc() because we may need to allocate memory at interrupt
287  * 	level where we cannot block (canwait == FALSE).
288  *
289  * 	This routine has its own private kernel submap (kmem_map) and object
290  * 	(kmem_object).  This, combined with the fact that only malloc uses
291  * 	this routine, ensures that we will never block in map or object waits.
292  *
293  * 	Note that this still only works in a uni-processor environment and
294  * 	when called at splhigh().
295  *
296  * 	We don't worry about expanding the map (adding entries) since entries
297  * 	for wired maps are statically allocated.
298  *
299  *	NOTE:  This routine is not supposed to block if M_NOWAIT is set, but
300  *	I have not verified that it actually does not block.
301  */
302 vm_offset_t
303 kmem_malloc(map, size, flags)
304 	register vm_map_t map;
305 	register vm_size_t size;
306 	int flags;
307 {
308 	register vm_offset_t offset, i;
309 	vm_map_entry_t entry;
310 	vm_offset_t addr;
311 	vm_page_t m;
312 
313 	if (map != kmem_map && map != mb_map)
314 		panic("kmem_malloc: map != {kmem,mb}_map");
315 
316 	size = round_page(size);
317 	addr = vm_map_min(map);
318 
319 	/*
320 	 * Locate sufficient space in the map.  This will give us the final
321 	 * virtual address for the new memory, and thus will tell us the
322 	 * offset within the kernel map.
323 	 */
324 	vm_map_lock(map);
325 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
326 		vm_map_unlock(map);
327 		if (map == mb_map) {
328 			mb_map_full = TRUE;
329 			printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n");
330 			return (0);
331 		}
332 		if ((flags & M_NOWAIT) == 0)
333 			panic("kmem_malloc(%d): kmem_map too small: %d total allocated",
334 				size, map->size);
335 		return (0);
336 	}
337 	offset = addr - VM_MIN_KERNEL_ADDRESS;
338 	vm_object_reference(kmem_object);
339 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
340 		VM_PROT_ALL, VM_PROT_ALL, 0);
341 
342 	for (i = 0; i < size; i += PAGE_SIZE) {
343 		/*
344 		 * Note: if M_NOWAIT specified alone, allocate from
345 		 * interrupt-safe queues only (just the free list).  If
346 		 * M_ASLEEP or M_USE_RESERVE is also specified, we can also
347 		 * allocate from the cache.  Neither of the latter two
348 		 * flags may be specified from an interrupt since interrupts
349 		 * are not allowed to mess with the cache queue.
350 		 */
351 retry:
352 		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
353 		    ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ?
354 			VM_ALLOC_INTERRUPT :
355 			VM_ALLOC_SYSTEM);
356 
357 		/*
358 		 * Ran out of space, free everything up and return. Don't need
359 		 * to lock page queues here as we know that the pages we got
360 		 * aren't on any queues.
361 		 */
362 		if (m == NULL) {
363 			if ((flags & M_NOWAIT) == 0) {
364 				vm_map_unlock(map);
365 				VM_WAIT;
366 				vm_map_lock(map);
367 				goto retry;
368 			}
369 			vm_map_delete(map, addr, addr + size);
370 			vm_map_unlock(map);
371 			if (flags & M_ASLEEP) {
372 				VM_AWAIT;
373 			}
374 			return (0);
375 		}
376 		vm_page_flag_clear(m, PG_ZERO);
377 		m->valid = VM_PAGE_BITS_ALL;
378 	}
379 
380 	/*
381 	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
382 	 * be able to extend the previous entry so there will be a new entry
383 	 * exactly corresponding to this address range and it will have
384 	 * wired_count == 0.
385 	 */
386 	if (!vm_map_lookup_entry(map, addr, &entry) ||
387 	    entry->start != addr || entry->end != addr + size ||
388 	    entry->wired_count != 0)
389 		panic("kmem_malloc: entry not found or misaligned");
390 	entry->wired_count = 1;
391 
392 	vm_map_simplify_entry(map, entry);
393 
394 	/*
395 	 * Loop thru pages, entering them in the pmap. (We cannot add them to
396 	 * the wired count without wrapping the vm_page_queue_lock in
397 	 * splimp...)
398 	 */
399 	for (i = 0; i < size; i += PAGE_SIZE) {
400 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
401 		vm_page_wire(m);
402 		vm_page_wakeup(m);
403 		/*
404 		 * Because this is kernel_pmap, this call will not block.
405 		 */
406 		pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
407 			VM_PROT_ALL, 1);
408 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
409 	}
410 	vm_map_unlock(map);
411 
412 	return (addr);
413 }
414 
415 /*
416  *	kmem_alloc_wait:
417  *
418  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
419  *	has no room, the caller sleeps waiting for more memory in the submap.
420  *
421  *	This routine may block.
422  */
423 
424 vm_offset_t
425 kmem_alloc_wait(map, size)
426 	vm_map_t map;
427 	vm_size_t size;
428 {
429 	vm_offset_t addr;
430 
431 	size = round_page(size);
432 
433 	for (;;) {
434 		/*
435 		 * To make this work for more than one map, use the map's lock
436 		 * to lock out sleepers/wakers.
437 		 */
438 		vm_map_lock(map);
439 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
440 			break;
441 		/* no space now; see if we can ever get space */
442 		if (vm_map_max(map) - vm_map_min(map) < size) {
443 			vm_map_unlock(map);
444 			return (0);
445 		}
446 		vm_map_unlock(map);
447 		tsleep(map, PVM, "kmaw", 0);
448 	}
449 	vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
450 	vm_map_unlock(map);
451 	return (addr);
452 }
453 
454 /*
455  *	kmem_free_wakeup:
456  *
457  *	Returns memory to a submap of the kernel, and wakes up any processes
458  *	waiting for memory in that map.
459  */
460 void
461 kmem_free_wakeup(map, addr, size)
462 	vm_map_t map;
463 	vm_offset_t addr;
464 	vm_size_t size;
465 {
466 	vm_map_lock(map);
467 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
468 	wakeup(map);
469 	vm_map_unlock(map);
470 }
471 
472 /*
473  * 	kmem_init:
474  *
475  *	Create the kernel map; insert a mapping covering kernel text,
476  *	data, bss, and all space allocated thus far (`boostrap' data).  The
477  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
478  *	`start' as allocated, and the range between `start' and `end' as free.
479  */
480 
481 void
482 kmem_init(start, end)
483 	vm_offset_t start, end;
484 {
485 	register vm_map_t m;
486 
487 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
488 	vm_map_lock(m);
489 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
490 	kernel_map = m;
491 	kernel_map->system_map = 1;
492 	(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
493 	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
494 	/* ... and ending with the completion of the above `insert' */
495 	vm_map_unlock(m);
496 }
497 
498