xref: /freebsd/sys/vm/vm_kern.c (revision 380a989b3223d455375b4fae70fd0b9bdd43bafb)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_kern.c,v 1.49 1998/08/24 08:39:37 dfr Exp $
65  */
66 
67 /*
68  *	Kernel memory management.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/malloc.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_prot.h>
79 #include <sys/lock.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 
87 vm_map_t kernel_map=0;
88 vm_map_t kmem_map=0;
89 vm_map_t exec_map=0;
90 vm_map_t clean_map=0;
91 vm_map_t u_map=0;
92 vm_map_t buffer_map=0;
93 vm_map_t mb_map=0;
94 int mb_map_full=0;
95 vm_map_t io_map=0;
96 vm_map_t phys_map=0;
97 
98 /*
99  *	kmem_alloc_pageable:
100  *
101  *	Allocate pageable memory to the kernel's address map.
102  *	"map" must be kernel_map or a submap of kernel_map.
103  */
104 
105 vm_offset_t
106 kmem_alloc_pageable(map, size)
107 	vm_map_t map;
108 	register vm_size_t size;
109 {
110 	vm_offset_t addr;
111 	register int result;
112 
113 	size = round_page(size);
114 	addr = vm_map_min(map);
115 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
116 	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
117 	if (result != KERN_SUCCESS) {
118 		return (0);
119 	}
120 	return (addr);
121 }
122 
123 /*
124  *	Allocate wired-down memory in the kernel's address map
125  *	or a submap.
126  */
127 vm_offset_t
128 kmem_alloc(map, size)
129 	register vm_map_t map;
130 	register vm_size_t size;
131 {
132 	vm_offset_t addr;
133 	register vm_offset_t offset;
134 	vm_offset_t i;
135 
136 	size = round_page(size);
137 
138 	/*
139 	 * Use the kernel object for wired-down kernel pages. Assume that no
140 	 * region of the kernel object is referenced more than once.
141 	 */
142 
143 	/*
144 	 * Locate sufficient space in the map.  This will give us the final
145 	 * virtual address for the new memory, and thus will tell us the
146 	 * offset within the kernel map.
147 	 */
148 	vm_map_lock(map);
149 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
150 		vm_map_unlock(map);
151 		return (0);
152 	}
153 	offset = addr - VM_MIN_KERNEL_ADDRESS;
154 	vm_object_reference(kernel_object);
155 	vm_map_insert(map, kernel_object, offset, addr, addr + size,
156 		VM_PROT_ALL, VM_PROT_ALL, 0);
157 	vm_map_unlock(map);
158 
159 	/*
160 	 * Guarantee that there are pages already in this object before
161 	 * calling vm_map_pageable.  This is to prevent the following
162 	 * scenario:
163 	 *
164 	 * 1) Threads have swapped out, so that there is a pager for the
165 	 * kernel_object. 2) The kmsg zone is empty, and so we are
166 	 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
167 	 * there is no page, but there is a pager, so we call
168 	 * pager_data_request.  But the kmsg zone is empty, so we must
169 	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
170 	 * we get the data back from the pager, it will be (very stale)
171 	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
172 	 *
173 	 * We're intentionally not activating the pages we allocate to prevent a
174 	 * race with page-out.  vm_map_pageable will wire the pages.
175 	 */
176 
177 	for (i = 0; i < size; i += PAGE_SIZE) {
178 		vm_page_t mem;
179 
180 		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
181 				VM_ALLOC_ZERO | VM_ALLOC_RETRY);
182 		if ((mem->flags & PG_ZERO) == 0)
183 			vm_page_zero_fill(mem);
184 		vm_page_flag_clear(mem, (PG_BUSY | PG_ZERO));
185 		mem->valid = VM_PAGE_BITS_ALL;
186 	}
187 
188 	/*
189 	 * And finally, mark the data as non-pageable.
190 	 */
191 
192 	(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
193 
194 	return (addr);
195 }
196 
197 /*
198  *	kmem_free:
199  *
200  *	Release a region of kernel virtual memory allocated
201  *	with kmem_alloc, and return the physical pages
202  *	associated with that region.
203  */
204 void
205 kmem_free(map, addr, size)
206 	vm_map_t map;
207 	register vm_offset_t addr;
208 	vm_size_t size;
209 {
210 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
211 }
212 
213 /*
214  *	kmem_suballoc:
215  *
216  *	Allocates a map to manage a subrange
217  *	of the kernel virtual address space.
218  *
219  *	Arguments are as follows:
220  *
221  *	parent		Map to take range from
222  *	size		Size of range to find
223  *	min, max	Returned endpoints of map
224  *	pageable	Can the region be paged
225  */
226 vm_map_t
227 kmem_suballoc(parent, min, max, size)
228 	register vm_map_t parent;
229 	vm_offset_t *min, *max;
230 	register vm_size_t size;
231 {
232 	register int ret;
233 	vm_map_t result;
234 
235 	size = round_page(size);
236 
237 	*min = (vm_offset_t) vm_map_min(parent);
238 	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
239 	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
240 	if (ret != KERN_SUCCESS) {
241 		printf("kmem_suballoc: bad status return of %d.\n", ret);
242 		panic("kmem_suballoc");
243 	}
244 	*max = *min + size;
245 	pmap_reference(vm_map_pmap(parent));
246 	result = vm_map_create(vm_map_pmap(parent), *min, *max);
247 	if (result == NULL)
248 		panic("kmem_suballoc: cannot create submap");
249 	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
250 		panic("kmem_suballoc: unable to change range to submap");
251 	return (result);
252 }
253 
254 /*
255  * Allocate wired-down memory in the kernel's address map for the higher
256  * level kernel memory allocator (kern/kern_malloc.c).  We cannot use
257  * kmem_alloc() because we may need to allocate memory at interrupt
258  * level where we cannot block (canwait == FALSE).
259  *
260  * This routine has its own private kernel submap (kmem_map) and object
261  * (kmem_object).  This, combined with the fact that only malloc uses
262  * this routine, ensures that we will never block in map or object waits.
263  *
264  * Note that this still only works in a uni-processor environment and
265  * when called at splhigh().
266  *
267  * We don't worry about expanding the map (adding entries) since entries
268  * for wired maps are statically allocated.
269  */
270 vm_offset_t
271 kmem_malloc(map, size, waitflag)
272 	register vm_map_t map;
273 	register vm_size_t size;
274 	boolean_t waitflag;
275 {
276 	register vm_offset_t offset, i;
277 	vm_map_entry_t entry;
278 	vm_offset_t addr;
279 	vm_page_t m;
280 
281 	if (map != kmem_map && map != mb_map)
282 		panic("kmem_malloc: map != {kmem,mb}_map");
283 
284 	size = round_page(size);
285 	addr = vm_map_min(map);
286 
287 	/*
288 	 * Locate sufficient space in the map.  This will give us the final
289 	 * virtual address for the new memory, and thus will tell us the
290 	 * offset within the kernel map.
291 	 */
292 	vm_map_lock(map);
293 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
294 		vm_map_unlock(map);
295 		if (map == mb_map) {
296 			mb_map_full = TRUE;
297 			printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n");
298 			return (0);
299 		}
300 		if (waitflag == M_WAITOK)
301 			panic("kmem_malloc(%d): kmem_map too small: %d total allocated",
302 				size, map->size);
303 		return (0);
304 	}
305 	offset = addr - VM_MIN_KERNEL_ADDRESS;
306 	vm_object_reference(kmem_object);
307 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
308 		VM_PROT_ALL, VM_PROT_ALL, 0);
309 
310 	for (i = 0; i < size; i += PAGE_SIZE) {
311 retry:
312 		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
313 			(waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
314 
315 		/*
316 		 * Ran out of space, free everything up and return. Don't need
317 		 * to lock page queues here as we know that the pages we got
318 		 * aren't on any queues.
319 		 */
320 		if (m == NULL) {
321 			if (waitflag == M_WAITOK) {
322 				VM_WAIT;
323 				goto retry;
324 			}
325 			while (i != 0) {
326 				i -= PAGE_SIZE;
327 				m = vm_page_lookup(kmem_object,
328 					OFF_TO_IDX(offset + i));
329 				vm_page_free(m);
330 			}
331 			vm_map_delete(map, addr, addr + size);
332 			vm_map_unlock(map);
333 			return (0);
334 		}
335 		vm_page_flag_clear(m, PG_ZERO);
336 		m->valid = VM_PAGE_BITS_ALL;
337 	}
338 
339 	/*
340 	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
341 	 * be able to extend the previous entry so there will be a new entry
342 	 * exactly corresponding to this address range and it will have
343 	 * wired_count == 0.
344 	 */
345 	if (!vm_map_lookup_entry(map, addr, &entry) ||
346 	    entry->start != addr || entry->end != addr + size ||
347 	    entry->wired_count)
348 		panic("kmem_malloc: entry not found or misaligned");
349 	entry->wired_count++;
350 
351 	vm_map_simplify_entry(map, entry);
352 
353 	/*
354 	 * Loop thru pages, entering them in the pmap. (We cannot add them to
355 	 * the wired count without wrapping the vm_page_queue_lock in
356 	 * splimp...)
357 	 */
358 	for (i = 0; i < size; i += PAGE_SIZE) {
359 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
360 		vm_page_wire(m);
361 		vm_page_wakeup(m);
362 		pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
363 			VM_PROT_ALL, 1);
364 		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
365 	}
366 	vm_map_unlock(map);
367 
368 	return (addr);
369 }
370 
371 /*
372  *	kmem_alloc_wait
373  *
374  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
375  *	has no room, the caller sleeps waiting for more memory in the submap.
376  *
377  */
378 vm_offset_t
379 kmem_alloc_wait(map, size)
380 	vm_map_t map;
381 	vm_size_t size;
382 {
383 	vm_offset_t addr;
384 
385 	size = round_page(size);
386 
387 	for (;;) {
388 		/*
389 		 * To make this work for more than one map, use the map's lock
390 		 * to lock out sleepers/wakers.
391 		 */
392 		vm_map_lock(map);
393 		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
394 			break;
395 		/* no space now; see if we can ever get space */
396 		if (vm_map_max(map) - vm_map_min(map) < size) {
397 			vm_map_unlock(map);
398 			return (0);
399 		}
400 		vm_map_unlock(map);
401 		tsleep(map, PVM, "kmaw", 0);
402 	}
403 	vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
404 	vm_map_unlock(map);
405 	return (addr);
406 }
407 
408 /*
409  *	kmem_free_wakeup
410  *
411  *	Returns memory to a submap of the kernel, and wakes up any processes
412  *	waiting for memory in that map.
413  */
414 void
415 kmem_free_wakeup(map, addr, size)
416 	vm_map_t map;
417 	vm_offset_t addr;
418 	vm_size_t size;
419 {
420 	vm_map_lock(map);
421 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
422 	wakeup(map);
423 	vm_map_unlock(map);
424 }
425 
426 /*
427  * Create the kernel map; insert a mapping covering kernel text, data, bss,
428  * and all space allocated thus far (`boostrap' data).  The new map will thus
429  * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
430  * the range between `start' and `end' as free.
431  */
432 void
433 kmem_init(start, end)
434 	vm_offset_t start, end;
435 {
436 	register vm_map_t m;
437 
438 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
439 	vm_map_lock(m);
440 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
441 	kernel_map = m;
442 	kernel_map->system_map = 1;
443 	(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
444 	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
445 	/* ... and ending with the completion of the above `insert' */
446 	vm_map_unlock(m);
447 }
448