xref: /freebsd/sys/vm/vm_kern.c (revision ee41f1b1cf5e3d4f586cb85b46123b416275862c)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Kernel memory management.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_extern.h>
85 
86 vm_map_t kernel_map=0;
87 vm_map_t kmem_map=0;
88 vm_map_t exec_map=0;
89 vm_map_t clean_map=0;
90 vm_map_t buffer_map=0;
91 vm_map_t mb_map=0;
92 int mb_map_full=0;
93 
94 /*
95  *	kmem_alloc_pageable:
96  *
97  *	Allocate pageable memory to the kernel's address map.
98  *	"map" must be kernel_map or a submap of kernel_map.
99  */
100 
101 vm_offset_t
102 kmem_alloc_pageable(map, size)
103 	vm_map_t map;
104 	vm_size_t size;
105 {
106 	vm_offset_t addr;
107 	int result;
108 
109 	size = round_page(size);
110 	addr = vm_map_min(map);
111 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
112 	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
113 	if (result != KERN_SUCCESS) {
114 		return (0);
115 	}
116 	return (addr);
117 }
118 
119 /*
120  *	kmem_alloc_nofault:
121  *
122  *	Same as kmem_alloc_pageable, except that it create a nofault entry.
123  */
124 
125 vm_offset_t
126 kmem_alloc_nofault(map, size)
127 	vm_map_t map;
128 	vm_size_t size;
129 {
130 	vm_offset_t addr;
131 	int result;
132 
133 	size = round_page(size);
134 	addr = vm_map_min(map);
135 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
136 	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
137 	if (result != KERN_SUCCESS) {
138 		return (0);
139 	}
140 	return (addr);
141 }
142 
143 /*
144  *	Allocate wired-down memory in the kernel's address map
145  *	or a submap.
146  */
147 vm_offset_t
148 kmem_alloc(map, size)
149 	vm_map_t map;
150 	vm_size_t size;
151 {
152 	vm_offset_t addr;
153 	vm_offset_t offset;
154 	vm_offset_t i;
155 
156 	mtx_assert(&Giant, MA_OWNED);
157 	size = round_page(size);
158 
159 	/*
160 	 * Use the kernel object for wired-down kernel pages. Assume that no
161 	 * region of the kernel object is referenced more than once.
162 	 */
163 
164 	/*
165 	 * Locate sufficient space in the map.  This will give us the final
166 	 * virtual address for the new memory, and thus will tell us the
167 	 * offset within the kernel map.
168 	 */
169 	vm_map_lock(map);
170 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
171 		vm_map_unlock(map);
172 		return (0);
173 	}
174 	offset = addr - VM_MIN_KERNEL_ADDRESS;
175 	vm_object_reference(kernel_object);
176 	vm_map_insert(map, kernel_object, offset, addr, addr + size,
177 		VM_PROT_ALL, VM_PROT_ALL, 0);
178 	vm_map_unlock(map);
179 
180 	/*
181 	 * Guarantee that there are pages already in this object before
182 	 * calling vm_map_pageable.  This is to prevent the following
183 	 * scenario:
184 	 *
185 	 * 1) Threads have swapped out, so that there is a pager for the
186 	 * kernel_object. 2) The kmsg zone is empty, and so we are
187 	 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
188 	 * there is no page, but there is a pager, so we call
189 	 * pager_data_request.  But the kmsg zone is empty, so we must
190 	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
191 	 * we get the data back from the pager, it will be (very stale)
192 	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
193 	 *
194 	 * We're intentionally not activating the pages we allocate to prevent a
195 	 * race with page-out.  vm_map_pageable will wire the pages.
196 	 */
197 
198 	for (i = 0; i < size; i += PAGE_SIZE) {
199 		vm_page_t mem;
200 
201 		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
202 				VM_ALLOC_ZERO | VM_ALLOC_RETRY);
203 		if ((mem->flags & PG_ZERO) == 0)
204 			vm_page_zero_fill(mem);
205 		mem->valid = VM_PAGE_BITS_ALL;
206 		vm_page_flag_clear(mem, PG_ZERO);
207 		vm_page_wakeup(mem);
208 	}
209 
210 	/*
211 	 * And finally, mark the data as non-pageable.
212 	 */
213 
214 	(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
215 
216 	return (addr);
217 }
218 
219 /*
220  *	kmem_free:
221  *
222  *	Release a region of kernel virtual memory allocated
223  *	with kmem_alloc, and return the physical pages
224  *	associated with that region.
225  *
226  *	This routine may not block on kernel maps.
227  */
228 void
229 kmem_free(map, addr, size)
230 	vm_map_t map;
231 	vm_offset_t addr;
232 	vm_size_t size;
233 {
234 
235 	mtx_assert(&Giant, MA_OWNED);
236 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
237 }
238 
239 /*
240  *	kmem_suballoc:
241  *
242  *	Allocates a map to manage a subrange
243  *	of the kernel virtual address space.
244  *
245  *	Arguments are as follows:
246  *
247  *	parent		Map to take range from
248  *	min, max	Returned endpoints of map
249  *	size		Size of range to find
250  */
251 vm_map_t
252 kmem_suballoc(parent, min, max, size)
253 	vm_map_t parent;
254 	vm_offset_t *min, *max;
255 	vm_size_t size;
256 {
257 	int ret;
258 	vm_map_t result;
259 
260 	size = round_page(size);
261 
262 	*min = (vm_offset_t) vm_map_min(parent);
263 	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
264 	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
265 	if (ret != KERN_SUCCESS) {
266 		printf("kmem_suballoc: bad status return of %d.\n", ret);
267 		panic("kmem_suballoc");
268 	}
269 	*max = *min + size;
270 	pmap_reference(vm_map_pmap(parent));
271 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
272 	if (result == NULL)
273 		panic("kmem_suballoc: cannot create submap");
274 	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
275 		panic("kmem_suballoc: unable to change range to submap");
276 	return (result);
277 }
278 
279 /*
280  *	kmem_malloc:
281  *
282  * 	Allocate wired-down memory in the kernel's address map for the higher
283  * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
284  * 	kmem_alloc() because we may need to allocate memory at interrupt
285  * 	level where we cannot block (canwait == FALSE).
286  *
287  * 	This routine has its own private kernel submap (kmem_map) and object
288  * 	(kmem_object).  This, combined with the fact that only malloc uses
289  * 	this routine, ensures that we will never block in map or object waits.
290  *
291  * 	Note that this still only works in a uni-processor environment and
292  * 	when called at splhigh().
293  *
294  * 	We don't worry about expanding the map (adding entries) since entries
295  * 	for wired maps are statically allocated.
296  *
297  *	NOTE:  This routine is not supposed to block if M_NOWAIT is set, but
298  *	I have not verified that it actually does not block.
299  */
300 vm_offset_t
301 kmem_malloc(map, size, flags)
302 	vm_map_t map;
303 	vm_size_t size;
304 	int flags;
305 {
306 	vm_offset_t offset, i;
307 	vm_map_entry_t entry;
308 	vm_offset_t addr;
309 	vm_page_t m;
310 
311 	if (map != kmem_map && map != mb_map)
312 		panic("kmem_malloc: map != {kmem,mb}_map");
313 
314 	size = round_page(size);
315 	addr = vm_map_min(map);
316 
317 	/*
318 	 * Locate sufficient space in the map.  This will give us the final
319 	 * virtual address for the new memory, and thus will tell us the
320 	 * offset within the kernel map.
321 	 */
322 	vm_map_lock(map);
323 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
324 		vm_map_unlock(map);
325 		if (map == mb_map) {
326 			mb_map_full = TRUE;
327 			printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n");
328 			return (0);
329 		}
330 		if ((flags & M_NOWAIT) == 0)
331 			panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
332 				(long)size, (long)map->size);
333 		return (0);
334 	}
335 	offset = addr - VM_MIN_KERNEL_ADDRESS;
336 	vm_object_reference(kmem_object);
337 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
338 		VM_PROT_ALL, VM_PROT_ALL, 0);
339 
340 	for (i = 0; i < size; i += PAGE_SIZE) {
341 		/*
342 		 * Note: if M_NOWAIT specified alone, allocate from
343 		 * interrupt-safe queues only (just the free list).  If
344 		 * M_ASLEEP or M_USE_RESERVE is also specified, we can also
345 		 * allocate from the cache.  Neither of the latter two
346 		 * flags may be specified from an interrupt since interrupts
347 		 * are not allowed to mess with the cache queue.
348 		 */
349 retry:
350 		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
351 		    ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ?
352 			VM_ALLOC_INTERRUPT :
353 			VM_ALLOC_SYSTEM);
354 
355 		/*
356 		 * Ran out of space, free everything up and return. Don't need
357 		 * to lock page queues here as we know that the pages we got
358 		 * aren't on any queues.
359 		 */
360 		if (m == NULL) {
361 			if ((flags & M_NOWAIT) == 0) {
362 				vm_map_unlock(map);
363 				VM_WAIT;
364 				vm_map_lock(map);
365 				goto retry;
366 			}
367 			vm_map_delete(map, addr, addr + size);
368 			vm_map_unlock(map);
369 			if (flags & M_ASLEEP) {
370 				VM_AWAIT;
371 			}
372 			return (0);
373 		}
374 		vm_page_flag_clear(m, PG_ZERO);
375 		m->valid = VM_PAGE_BITS_ALL;
376 	}
377 
378 	/*
379 	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
380 	 * be able to extend the previous entry so there will be a new entry
381 	 * exactly corresponding to this address range and it will have
382 	 * wired_count == 0.
383 	 */
384 	if (!vm_map_lookup_entry(map, addr, &entry) ||
385 	    entry->start != addr || entry->end != addr + size ||
386 	    entry->wired_count != 0)
387 		panic("kmem_malloc: entry not found or misaligned");
388 	entry->wired_count = 1;
389 
390 	vm_map_simplify_entry(map, entry);
391 
392 	/*
393 	 * Loop thru pages, entering them in the pmap. (We cannot add them to
394 	 * the wired count without wrapping the vm_page_queue_lock in
395 	 * splimp...)
396 	 */
397 	for (i = 0; i < size; i += PAGE_SIZE) {
398 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
399 		vm_page_wire(m);
400 		vm_page_wakeup(m);
401 		/*
402 		 * Because this is kernel_pmap, this call will not block.
403 		 */
404 		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
405 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
406 	}
407 	vm_map_unlock(map);
408 
409 	return (addr);
410 }
411 
412 /*
413  *	kmem_alloc_wait:
414  *
415  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
416  *	has no room, the caller sleeps waiting for more memory in the submap.
417  *
418  *	This routine may block.
419  */
420 
421 vm_offset_t
422 kmem_alloc_wait(map, size)
423 	vm_map_t map;
424 	vm_size_t size;
425 {
426 	vm_offset_t addr;
427 
428 	size = round_page(size);
429 
430 	for (;;) {
431 		/*
432 		 * To make this work for more than one map, use the map's lock
433 		 * to lock out sleepers/wakers.
434 		 */
435 		vm_map_lock(map);
436 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
437 			break;
438 		/* no space now; see if we can ever get space */
439 		if (vm_map_max(map) - vm_map_min(map) < size) {
440 			vm_map_unlock(map);
441 			return (0);
442 		}
443 		vm_map_unlock(map);
444 		tsleep(map, PVM, "kmaw", 0);
445 	}
446 	vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
447 	vm_map_unlock(map);
448 	return (addr);
449 }
450 
451 /*
452  *	kmem_free_wakeup:
453  *
454  *	Returns memory to a submap of the kernel, and wakes up any processes
455  *	waiting for memory in that map.
456  */
457 void
458 kmem_free_wakeup(map, addr, size)
459 	vm_map_t map;
460 	vm_offset_t addr;
461 	vm_size_t size;
462 {
463 	vm_map_lock(map);
464 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
465 	wakeup(map);
466 	vm_map_unlock(map);
467 }
468 
469 /*
470  * 	kmem_init:
471  *
472  *	Create the kernel map; insert a mapping covering kernel text,
473  *	data, bss, and all space allocated thus far (`boostrap' data).  The
474  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
475  *	`start' as allocated, and the range between `start' and `end' as free.
476  */
477 
478 void
479 kmem_init(start, end)
480 	vm_offset_t start, end;
481 {
482 	vm_map_t m;
483 
484 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
485 	vm_map_lock(m);
486 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
487 	kernel_map = m;
488 	kernel_map->system_map = 1;
489 	(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
490 	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
491 	/* ... and ending with the completion of the above `insert' */
492 	vm_map_unlock(m);
493 }
494