xref: /freebsd/sys/vm/vm_kern.c (revision 0c43d89a0d8e976ca494d4837f4c1f3734d2c300)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_kern.c,v 1.6 1994/08/07 14:53:26 davidg Exp $
65  */
66 
67 /*
68  *	Kernel memory management.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_kern.h>
80 
81 vm_map_t	buffer_map;
82 vm_map_t	kernel_map;
83 vm_map_t	kmem_map;
84 vm_map_t	mb_map;
85 vm_map_t	io_map;
86 vm_map_t	clean_map;
87 vm_map_t	pager_map;
88 vm_map_t	phys_map;
89 
90 /*
91  *	kmem_alloc_pageable:
92  *
93  *	Allocate pageable memory to the kernel's address map.
94  *	map must be "kernel_map" below.
95  */
96 
97 vm_offset_t kmem_alloc_pageable(map, size)
98 	vm_map_t		map;
99 	register vm_size_t	size;
100 {
101 	vm_offset_t		addr;
102 	register int		result;
103 
104 #if	0
105 	if (map != kernel_map)
106 		panic("kmem_alloc_pageable: not called with kernel_map");
107 #endif
108 
109 	size = round_page(size);
110 
111 	addr = vm_map_min(map);
112 	result = vm_map_find(map, NULL, (vm_offset_t) 0,
113 				&addr, size, TRUE);
114 	if (result != KERN_SUCCESS) {
115 		return(0);
116 	}
117 
118 	return(addr);
119 }
120 
121 /*
122  *	Allocate wired-down memory in the kernel's address map
123  *	or a submap.
124  */
125 vm_offset_t kmem_alloc(map, size)
126 	register vm_map_t	map;
127 	register vm_size_t	size;
128 {
129 	vm_offset_t		addr;
130 	register vm_offset_t	offset;
131 	vm_offset_t		i;
132 
133 	size = round_page(size);
134 
135 	/*
136 	 *	Use the kernel object for wired-down kernel pages.
137 	 *	Assume that no region of the kernel object is
138 	 *	referenced more than once.
139 	 */
140 
141 	/*
142 	 * Locate sufficient space in the map.  This will give us the
143 	 * final virtual address for the new memory, and thus will tell
144 	 * us the offset within the kernel map.
145 	 */
146 	vm_map_lock(map);
147 	if (vm_map_findspace(map, 0, size, &addr)) {
148 		vm_map_unlock(map);
149 		return (0);
150 	}
151 	offset = addr - VM_MIN_KERNEL_ADDRESS;
152 	vm_object_reference(kernel_object);
153 	vm_map_insert(map, kernel_object, offset, addr, addr + size);
154 	vm_map_unlock(map);
155 
156 	/*
157 	 *	Guarantee that there are pages already in this object
158 	 *	before calling vm_map_pageable.  This is to prevent the
159 	 *	following scenario:
160 	 *
161 	 *		1) Threads have swapped out, so that there is a
162 	 *		   pager for the kernel_object.
163 	 *		2) The kmsg zone is empty, and so we are kmem_allocing
164 	 *		   a new page for it.
165 	 *		3) vm_map_pageable calls vm_fault; there is no page,
166 	 *		   but there is a pager, so we call
167 	 *		   pager_data_request.  But the kmsg zone is empty,
168 	 *		   so we must kmem_alloc.
169 	 *		4) goto 1
170 	 *		5) Even if the kmsg zone is not empty: when we get
171 	 *		   the data back from the pager, it will be (very
172 	 *		   stale) non-zero data.  kmem_alloc is defined to
173 	 *		   return zero-filled memory.
174 	 *
175 	 *	We're intentionally not activating the pages we allocate
176 	 *	to prevent a race with page-out.  vm_map_pageable will wire
177 	 *	the pages.
178 	 */
179 
180 	vm_object_lock(kernel_object);
181 	for (i = 0 ; i < size; i+= PAGE_SIZE) {
182 		vm_page_t	mem;
183 
184 		while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) {
185 			vm_object_unlock(kernel_object);
186 			VM_WAIT;
187 			vm_object_lock(kernel_object);
188 		}
189 		vm_page_zero_fill(mem);
190 		mem->flags &= ~PG_BUSY;
191 	}
192 	vm_object_unlock(kernel_object);
193 
194 	/*
195 	 *	And finally, mark the data as non-pageable.
196 	 */
197 
198 	(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
199 
200 	/*
201 	 *	Try to coalesce the map
202 	 */
203 
204 	vm_map_simplify(map, addr);
205 
206 	return(addr);
207 }
208 
209 /*
210  *	kmem_free:
211  *
212  *	Release a region of kernel virtual memory allocated
213  *	with kmem_alloc, and return the physical pages
214  *	associated with that region.
215  */
216 void kmem_free(map, addr, size)
217 	vm_map_t		map;
218 	register vm_offset_t	addr;
219 	vm_size_t		size;
220 {
221 	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
222 }
223 
224 /*
225  *	kmem_suballoc:
226  *
227  *	Allocates a map to manage a subrange
228  *	of the kernel virtual address space.
229  *
230  *	Arguments are as follows:
231  *
232  *	parent		Map to take range from
233  *	size		Size of range to find
234  *	min, max	Returned endpoints of map
235  *	pageable	Can the region be paged
236  */
237 vm_map_t kmem_suballoc(parent, min, max, size, pageable)
238 	register vm_map_t	parent;
239 	vm_offset_t		*min, *max;
240 	register vm_size_t	size;
241 	boolean_t		pageable;
242 {
243 	register int	ret;
244 	vm_map_t	result;
245 
246 	size = round_page(size);
247 
248 	*min = (vm_offset_t) vm_map_min(parent);
249 	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
250 				min, size, TRUE);
251 	if (ret != KERN_SUCCESS) {
252 		printf("kmem_suballoc: bad status return of %d.\n", ret);
253 		panic("kmem_suballoc");
254 	}
255 	*max = *min + size;
256 	pmap_reference(vm_map_pmap(parent));
257 	result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
258 	if (result == NULL)
259 		panic("kmem_suballoc: cannot create submap");
260 	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
261 		panic("kmem_suballoc: unable to change range to submap");
262 	return(result);
263 }
264 
265 /*
266  * Allocate wired-down memory in the kernel's address map for the higher
267  * level kernel memory allocator (kern/kern_malloc.c).  We cannot use
268  * kmem_alloc() because we may need to allocate memory at interrupt
269  * level where we cannot block (canwait == FALSE).
270  *
271  * This routine has its own private kernel submap (kmem_map) and object
272  * (kmem_object).  This, combined with the fact that only malloc uses
273  * this routine, ensures that we will never block in map or object waits.
274  *
275  * Note that this still only works in a uni-processor environment and
276  * when called at splhigh().
277  *
278  * We don't worry about expanding the map (adding entries) since entries
279  * for wired maps are statically allocated.
280  */
281 vm_offset_t
282 kmem_malloc(map, size, canwait)
283 	register vm_map_t	map;
284 	register vm_size_t	size;
285 	boolean_t		canwait;
286 {
287 	register vm_offset_t	offset, i;
288 	vm_map_entry_t		entry;
289 	vm_offset_t		addr;
290 	vm_page_t		m;
291 
292 	if (map != kmem_map && map != mb_map)
293 		panic("kern_malloc_alloc: map != {kmem,mb}_map");
294 
295 	size = round_page(size);
296 	addr = vm_map_min(map);
297 
298 	/*
299 	 * Locate sufficient space in the map.  This will give us the
300 	 * final virtual address for the new memory, and thus will tell
301 	 * us the offset within the kernel map.
302 	 */
303 	vm_map_lock(map);
304 	if (vm_map_findspace(map, 0, size, &addr)) {
305 		vm_map_unlock(map);
306 #if 0
307 		if (canwait)		/* XXX  should wait */
308 			panic("kmem_malloc: %s too small",
309 			    map == kmem_map ? "kmem_map" : "mb_map");
310 #endif
311 		if (canwait)
312 			panic("kmem_malloc: map too small");
313 		return (0);
314 	}
315 	offset = addr - vm_map_min(kmem_map);
316 	vm_object_reference(kmem_object);
317 	vm_map_insert(map, kmem_object, offset, addr, addr + size);
318 
319 	/*
320 	 * If we can wait, just mark the range as wired
321 	 * (will fault pages as necessary).
322 	 */
323 	if (canwait) {
324 		vm_map_unlock(map);
325 		(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
326 				       FALSE);
327 		vm_map_simplify(map, addr);
328 		return(addr);
329 	}
330 
331 	/*
332 	 * If we cannot wait then we must allocate all memory up front,
333 	 * pulling it off the active queue to prevent pageout.
334 	 */
335 	vm_object_lock(kmem_object);
336 	for (i = 0; i < size; i += PAGE_SIZE) {
337 		m = vm_page_alloc(kmem_object, offset + i);
338 
339 		/*
340 		 * Ran out of space, free everything up and return.
341 		 * Don't need to lock page queues here as we know
342 		 * that the pages we got aren't on any queues.
343 		 */
344 		if (m == NULL) {
345 			while (i != 0) {
346 				i -= PAGE_SIZE;
347 				m = vm_page_lookup(kmem_object, offset + i);
348 				vm_page_free(m);
349 			}
350 			vm_object_unlock(kmem_object);
351 			vm_map_delete(map, addr, addr + size);
352 			vm_map_unlock(map);
353 			return(0);
354 		}
355 #if 0
356 		vm_page_zero_fill(m);
357 #endif
358 		m->flags &= ~PG_BUSY;
359 	}
360 	vm_object_unlock(kmem_object);
361 
362 	/*
363 	 * Mark map entry as non-pageable.
364 	 * Assert: vm_map_insert() will never be able to extend the previous
365 	 * entry so there will be a new entry exactly corresponding to this
366 	 * address range and it will have wired_count == 0.
367 	 */
368 	if (!vm_map_lookup_entry(map, addr, &entry) ||
369 	    entry->start != addr || entry->end != addr + size ||
370 	    entry->wired_count)
371 		panic("kmem_malloc: entry not found or misaligned");
372 	entry->wired_count++;
373 
374 	/*
375 	 * Loop thru pages, entering them in the pmap.
376 	 * (We cannot add them to the wired count without
377 	 * wrapping the vm_page_queue_lock in splimp...)
378 	 */
379 	for (i = 0; i < size; i += PAGE_SIZE) {
380 		vm_object_lock(kmem_object);
381 		m = vm_page_lookup(kmem_object, offset + i);
382 		vm_object_unlock(kmem_object);
383 		pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
384 	}
385 	vm_map_unlock(map);
386 
387 	vm_map_simplify(map, addr);
388 	return(addr);
389 }
390 
391 /*
392  *	kmem_alloc_wait
393  *
394  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
395  *	has no room, the caller sleeps waiting for more memory in the submap.
396  *
397  */
398 vm_offset_t kmem_alloc_wait(map, size)
399 	vm_map_t	map;
400 	vm_size_t	size;
401 {
402 	vm_offset_t	addr;
403 
404 	size = round_page(size);
405 
406 	for (;;) {
407 		/*
408 		 * To make this work for more than one map,
409 		 * use the map's lock to lock out sleepers/wakers.
410 		 */
411 		vm_map_lock(map);
412 		if (vm_map_findspace(map, 0, size, &addr) == 0)
413 			break;
414 		/* no space now; see if we can ever get space */
415 		if (vm_map_max(map) - vm_map_min(map) < size) {
416 			vm_map_unlock(map);
417 			return (0);
418 		}
419 		assert_wait((int)map, TRUE);
420 		vm_map_unlock(map);
421 		thread_block("kmaw");
422 	}
423 	vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
424 	vm_map_unlock(map);
425 	return (addr);
426 }
427 
428 /*
429  *	kmem_free_wakeup
430  *
431  *	Returns memory to a submap of the kernel, and wakes up any threads
432  *	waiting for memory in that map.
433  */
434 void	kmem_free_wakeup(map, addr, size)
435 	vm_map_t	map;
436 	vm_offset_t	addr;
437 	vm_size_t	size;
438 {
439 	vm_map_lock(map);
440 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
441 	thread_wakeup((int)map);
442 	vm_map_unlock(map);
443 }
444 
445 /*
446  * Create the kernel map; insert a mapping covering kernel text, data, bss,
447  * and all space allocated thus far (`boostrap' data).  The new map will thus
448  * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
449  * the range between `start' and `end' as free.
450  */
451 void kmem_init(start, end)
452 	vm_offset_t start, end;
453 {
454 	register vm_map_t m;
455 
456 	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
457 	vm_map_lock(m);
458 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
459 	kernel_map = m;
460 	(void) vm_map_insert(m, NULL, (vm_offset_t)0,
461 	    VM_MIN_KERNEL_ADDRESS, start);
462 	/* ... and ending with the completion of the above `insert' */
463 	vm_map_unlock(m);
464 }
465