xref: /freebsd/sys/vm/vm_page.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  *	$Id: vm_page.c,v 1.80 1997/09/01 03:17:23 bde Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 
67 /*
68  *	Resident memory management module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_prot.h>
80 #include <sys/lock.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 
87 static void	vm_page_queue_init __P((void));
88 static vm_page_t vm_page_select_free __P((vm_object_t object,
89 			vm_pindex_t pindex, int prefqueue));
90 
91 /*
92  *	Associated with page of user-allocatable memory is a
93  *	page structure.
94  */
95 
96 static struct pglist *vm_page_buckets;	/* Array of buckets */
97 static int vm_page_bucket_count;	/* How big is array? */
98 static int vm_page_hash_mask;		/* Mask for hash function */
99 
100 struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
101 struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
102 struct pglist vm_page_queue_active = {0};
103 struct pglist vm_page_queue_inactive = {0};
104 struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0};
105 
106 int no_queue=0;
107 
108 struct vpgqueues vm_page_queues[PQ_COUNT] = {0};
109 int pqcnt[PQ_COUNT] = {0};
110 
111 static void
112 vm_page_queue_init(void) {
113 	int i;
114 
115 	vm_page_queues[PQ_NONE].pl = NULL;
116 	vm_page_queues[PQ_NONE].cnt = &no_queue;
117 	for(i=0;i<PQ_L2_SIZE;i++) {
118 		vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
119 		vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
120 	}
121 	for(i=0;i<PQ_L2_SIZE;i++) {
122 		vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
123 		vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
124 	}
125 	vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
126 	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
127 
128 	vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active;
129 	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
130 	for(i=0;i<PQ_L2_SIZE;i++) {
131 		vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i];
132 		vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
133 	}
134 	for(i=0;i<PQ_COUNT;i++) {
135 		if (vm_page_queues[i].pl) {
136 			TAILQ_INIT(vm_page_queues[i].pl);
137 		} else if (i != 0) {
138 			panic("vm_page_queue_init: queue %d is null", i);
139 		}
140 		vm_page_queues[i].lcnt = &pqcnt[i];
141 	}
142 }
143 
144 vm_page_t vm_page_array = 0;
145 int vm_page_array_size = 0;
146 long first_page = 0;
147 static long last_page;
148 static vm_size_t page_mask;
149 static int page_shift;
150 int vm_page_zero_count = 0;
151 
152 /*
153  * map of contiguous valid DEV_BSIZE chunks in a page
154  * (this list is valid for page sizes upto 16*DEV_BSIZE)
155  */
156 static u_short vm_page_dev_bsize_chunks[] = {
157 	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
158 	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
159 };
160 
161 static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
162 static int vm_page_freechk_and_unqueue __P((vm_page_t m));
163 static void vm_page_free_wakeup __P((void));
164 
165 /*
166  *	vm_set_page_size:
167  *
168  *	Sets the page size, perhaps based upon the memory
169  *	size.  Must be called before any use of page-size
170  *	dependent functions.
171  *
172  *	Sets page_shift and page_mask from cnt.v_page_size.
173  */
174 void
175 vm_set_page_size()
176 {
177 
178 	if (cnt.v_page_size == 0)
179 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
180 	page_mask = cnt.v_page_size - 1;
181 	if ((page_mask & cnt.v_page_size) != 0)
182 		panic("vm_set_page_size: page size not a power of two");
183 	for (page_shift = 0;; page_shift++)
184 		if ((1 << page_shift) == cnt.v_page_size)
185 			break;
186 }
187 
188 /*
189  *	vm_page_startup:
190  *
191  *	Initializes the resident memory module.
192  *
193  *	Allocates memory for the page cells, and
194  *	for the object/offset-to-page hash table headers.
195  *	Each page cell is initialized and placed on the free list.
196  */
197 
198 vm_offset_t
199 vm_page_startup(starta, enda, vaddr)
200 	register vm_offset_t starta;
201 	vm_offset_t enda;
202 	register vm_offset_t vaddr;
203 {
204 	register vm_offset_t mapped;
205 	register vm_page_t m;
206 	register struct pglist *bucket;
207 	vm_size_t npages, page_range;
208 	register vm_offset_t new_start;
209 	int i;
210 	vm_offset_t pa;
211 	int nblocks;
212 	vm_offset_t first_managed_page;
213 
214 	/* the biggest memory array is the second group of pages */
215 	vm_offset_t start;
216 	vm_offset_t biggestone, biggestsize;
217 
218 	vm_offset_t total;
219 
220 	total = 0;
221 	biggestsize = 0;
222 	biggestone = 0;
223 	nblocks = 0;
224 	vaddr = round_page(vaddr);
225 
226 	for (i = 0; phys_avail[i + 1]; i += 2) {
227 		phys_avail[i] = round_page(phys_avail[i]);
228 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
229 	}
230 
231 	for (i = 0; phys_avail[i + 1]; i += 2) {
232 		int size = phys_avail[i + 1] - phys_avail[i];
233 
234 		if (size > biggestsize) {
235 			biggestone = i;
236 			biggestsize = size;
237 		}
238 		++nblocks;
239 		total += size;
240 	}
241 
242 	start = phys_avail[biggestone];
243 
244 	/*
245 	 * Initialize the queue headers for the free queue, the active queue
246 	 * and the inactive queue.
247 	 */
248 
249 	vm_page_queue_init();
250 
251 	/*
252 	 * Allocate (and initialize) the hash table buckets.
253 	 *
254 	 * The number of buckets MUST BE a power of 2, and the actual value is
255 	 * the next power of 2 greater than the number of physical pages in
256 	 * the system.
257 	 *
258 	 * Note: This computation can be tweaked if desired.
259 	 */
260 	vm_page_buckets = (struct pglist *) vaddr;
261 	bucket = vm_page_buckets;
262 	if (vm_page_bucket_count == 0) {
263 		vm_page_bucket_count = 1;
264 		while (vm_page_bucket_count < atop(total))
265 			vm_page_bucket_count <<= 1;
266 	}
267 	vm_page_hash_mask = vm_page_bucket_count - 1;
268 
269 	/*
270 	 * Validate these addresses.
271 	 */
272 
273 	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
274 	new_start = round_page(new_start);
275 	mapped = vaddr;
276 	vaddr = pmap_map(mapped, start, new_start,
277 	    VM_PROT_READ | VM_PROT_WRITE);
278 	start = new_start;
279 	bzero((caddr_t) mapped, vaddr - mapped);
280 	mapped = vaddr;
281 
282 	for (i = 0; i < vm_page_bucket_count; i++) {
283 		TAILQ_INIT(bucket);
284 		bucket++;
285 	}
286 
287 	/*
288 	 * Validate these zone addresses.
289 	 */
290 
291 	new_start = start + (vaddr - mapped);
292 	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
293 	bzero((caddr_t) mapped, (vaddr - mapped));
294 	start = round_page(new_start);
295 
296 	/*
297 	 * Compute the number of pages of memory that will be available for
298 	 * use (taking into account the overhead of a page structure per
299 	 * page).
300 	 */
301 
302 	first_page = phys_avail[0] / PAGE_SIZE;
303 	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
304 
305 	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
306 	npages = (total - (page_range * sizeof(struct vm_page)) -
307 	    (start - phys_avail[biggestone])) / PAGE_SIZE;
308 
309 	/*
310 	 * Initialize the mem entry structures now, and put them in the free
311 	 * queue.
312 	 */
313 
314 	vm_page_array = (vm_page_t) vaddr;
315 	mapped = vaddr;
316 
317 	/*
318 	 * Validate these addresses.
319 	 */
320 
321 	new_start = round_page(start + page_range * sizeof(struct vm_page));
322 	mapped = pmap_map(mapped, start, new_start,
323 	    VM_PROT_READ | VM_PROT_WRITE);
324 	start = new_start;
325 
326 	first_managed_page = start / PAGE_SIZE;
327 
328 	/*
329 	 * Clear all of the page structures
330 	 */
331 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
332 	vm_page_array_size = page_range;
333 
334 	cnt.v_page_count = 0;
335 	cnt.v_free_count = 0;
336 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
337 		if (i == biggestone)
338 			pa = ptoa(first_managed_page);
339 		else
340 			pa = phys_avail[i];
341 		while (pa < phys_avail[i + 1] && npages-- > 0) {
342 			++cnt.v_page_count;
343 			++cnt.v_free_count;
344 			m = PHYS_TO_VM_PAGE(pa);
345 			m->phys_addr = pa;
346 			m->flags = 0;
347 			m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
348 			m->queue = PQ_FREE + m->pc;
349 			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
350 			++(*vm_page_queues[m->queue].lcnt);
351 			pa += PAGE_SIZE;
352 		}
353 	}
354 
355 	return (mapped);
356 }
357 
358 /*
359  *	vm_page_hash:
360  *
361  *	Distributes the object/offset key pair among hash buckets.
362  *
363  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
364  */
365 static inline int
366 vm_page_hash(object, pindex)
367 	vm_object_t object;
368 	vm_pindex_t pindex;
369 {
370 	return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask;
371 }
372 
373 /*
374  *	vm_page_insert:		[ internal use only ]
375  *
376  *	Inserts the given mem entry into the object/object-page
377  *	table and object list.
378  *
379  *	The object and page must be locked, and must be splhigh.
380  */
381 
382 void
383 vm_page_insert(m, object, pindex)
384 	register vm_page_t m;
385 	register vm_object_t object;
386 	register vm_pindex_t pindex;
387 {
388 	register struct pglist *bucket;
389 
390 	if (m->flags & PG_TABLED)
391 		panic("vm_page_insert: already inserted");
392 
393 	/*
394 	 * Record the object/offset pair in this page
395 	 */
396 
397 	m->object = object;
398 	m->pindex = pindex;
399 
400 	/*
401 	 * Insert it into the object_object/offset hash table
402 	 */
403 
404 	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
405 	TAILQ_INSERT_TAIL(bucket, m, hashq);
406 
407 	/*
408 	 * Now link into the object's list of backed pages.
409 	 */
410 
411 	TAILQ_INSERT_TAIL(&object->memq, m, listq);
412 	m->flags |= PG_TABLED;
413 	m->object->page_hint = m;
414 
415 	/*
416 	 * And show that the object has one more resident page.
417 	 */
418 
419 	object->resident_page_count++;
420 }
421 
422 /*
423  *	vm_page_remove:		[ internal use only ]
424  *				NOTE: used by device pager as well -wfj
425  *
426  *	Removes the given mem entry from the object/offset-page
427  *	table and the object page list.
428  *
429  *	The object and page must be locked, and at splhigh.
430  */
431 
432 void
433 vm_page_remove(m)
434 	register vm_page_t m;
435 {
436 	register struct pglist *bucket;
437 
438 	if (!(m->flags & PG_TABLED))
439 		return;
440 
441 	if (m->object->page_hint == m)
442 		m->object->page_hint = NULL;
443 
444 	/*
445 	 * Remove from the object_object/offset hash table
446 	 */
447 
448 	bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
449 	TAILQ_REMOVE(bucket, m, hashq);
450 
451 	/*
452 	 * Now remove from the object's list of backed pages.
453 	 */
454 
455 	TAILQ_REMOVE(&m->object->memq, m, listq);
456 
457 	/*
458 	 * And show that the object has one fewer resident page.
459 	 */
460 
461 	m->object->resident_page_count--;
462 
463 	m->flags &= ~PG_TABLED;
464 }
465 
466 /*
467  *	vm_page_lookup:
468  *
469  *	Returns the page associated with the object/offset
470  *	pair specified; if none is found, NULL is returned.
471  *
472  *	The object must be locked.  No side effects.
473  */
474 
475 vm_page_t
476 vm_page_lookup(object, pindex)
477 	register vm_object_t object;
478 	register vm_pindex_t pindex;
479 {
480 	register vm_page_t m;
481 	register struct pglist *bucket;
482 	int s;
483 
484 	/*
485 	 * Search the hash table for this object/offset pair
486 	 */
487 
488 	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
489 
490 	s = splvm();
491 	for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
492 		if ((m->object == object) && (m->pindex == pindex)) {
493 			splx(s);
494 			m->object->page_hint = m;
495 			return (m);
496 		}
497 	}
498 	splx(s);
499 	return (NULL);
500 }
501 
502 /*
503  *	vm_page_rename:
504  *
505  *	Move the given memory entry from its
506  *	current object to the specified target object/offset.
507  *
508  *	The object must be locked.
509  */
510 void
511 vm_page_rename(m, new_object, new_pindex)
512 	register vm_page_t m;
513 	register vm_object_t new_object;
514 	vm_pindex_t new_pindex;
515 {
516 	int s;
517 
518 	s = splvm();
519 	vm_page_remove(m);
520 	vm_page_insert(m, new_object, new_pindex);
521 	splx(s);
522 }
523 
524 /*
525  * vm_page_unqueue without any wakeup
526  */
527 void
528 vm_page_unqueue_nowakeup(m)
529 	vm_page_t m;
530 {
531 	int queue = m->queue;
532 	struct vpgqueues *pq;
533 	if (queue != PQ_NONE) {
534 		pq = &vm_page_queues[queue];
535 		m->queue = PQ_NONE;
536 		TAILQ_REMOVE(pq->pl, m, pageq);
537 		--(*pq->cnt);
538 		--(*pq->lcnt);
539 	}
540 }
541 
542 /*
543  * vm_page_unqueue must be called at splhigh();
544  */
545 void
546 vm_page_unqueue(m)
547 	vm_page_t m;
548 {
549 	int queue = m->queue;
550 	struct vpgqueues *pq;
551 	if (queue != PQ_NONE) {
552 		m->queue = PQ_NONE;
553 		pq = &vm_page_queues[queue];
554 		TAILQ_REMOVE(pq->pl, m, pageq);
555 		--(*pq->cnt);
556 		--(*pq->lcnt);
557 		if ((queue - m->pc) == PQ_CACHE) {
558 			if ((cnt.v_cache_count + cnt.v_free_count) <
559 				(cnt.v_free_reserved + cnt.v_cache_min))
560 				pagedaemon_wakeup();
561 		}
562 	}
563 }
564 
565 /*
566  * Find a page on the specified queue with color optimization.
567  */
568 vm_page_t
569 vm_page_list_find(basequeue, index)
570 	int basequeue, index;
571 {
572 #if PQ_L2_SIZE > 1
573 
574 	int i,j;
575 	vm_page_t m;
576 	int hindex;
577 
578 	for(j = 0; j < PQ_L1_SIZE; j++) {
579 		for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1);
580 			i >= 0;
581 			i -= PQ_L1_SIZE) {
582 			hindex = (index + (i+j)) & PQ_L2_MASK;
583 			m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
584 			if (m)
585 				return m;
586 
587 			hindex = (index - (i+j)) & PQ_L2_MASK;
588 			m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
589 			if (m)
590 				return m;
591 		}
592 	}
593 	return NULL;
594 #else
595 	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
596 #endif
597 
598 }
599 
600 /*
601  * Find a page on the specified queue with color optimization.
602  */
603 vm_page_t
604 vm_page_select(object, pindex, basequeue)
605 	vm_object_t object;
606 	vm_pindex_t pindex;
607 	int basequeue;
608 {
609 
610 #if PQ_L2_SIZE > 1
611 	int index;
612 	index = (pindex + object->pg_color) & PQ_L2_MASK;
613 	return vm_page_list_find(basequeue, index);
614 
615 #else
616 	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
617 #endif
618 
619 }
620 
621 /*
622  * Find a free or zero page, with specified preference.
623  */
624 static vm_page_t
625 vm_page_select_free(object, pindex, prefqueue)
626 	vm_object_t object;
627 	vm_pindex_t pindex;
628 	int prefqueue;
629 {
630 #if PQ_L2_SIZE > 1
631 	int i,j;
632 	int index, hindex;
633 #endif
634 	vm_page_t m;
635 	int oqueuediff;
636 
637 	if (prefqueue == PQ_ZERO)
638 		oqueuediff = PQ_FREE - PQ_ZERO;
639 	else
640 		oqueuediff = PQ_ZERO - PQ_FREE;
641 
642 	if (object->page_hint) {
643 		 if (object->page_hint->pindex == (pindex - 1)) {
644 			vm_offset_t last_phys;
645 			if ((object->page_hint->flags & PG_FICTITIOUS) == 0) {
646 				if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) &&
647 					(object->page_hint >= &vm_page_array[0])) {
648 					int queue;
649 					last_phys = VM_PAGE_TO_PHYS(object->page_hint);
650 					m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE);
651 					queue = m->queue - m->pc;
652 					if (queue == PQ_FREE || queue == PQ_ZERO) {
653 						return m;
654 					}
655 				}
656 			}
657 		}
658 	}
659 
660 
661 #if PQ_L2_SIZE > 1
662 
663 	index = pindex + object->pg_color;
664 	for(j = 0; j < PQ_L1_SIZE; j++) {
665 		for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1);
666 			(i + j) >= 0;
667 			i -= PQ_L1_SIZE) {
668 
669 			hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK);
670 			if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
671 				return m;
672 			if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
673 				return m;
674 
675 			hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK);
676 			if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
677 				return m;
678 			if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
679 				return m;
680 		}
681 	}
682 #else
683 	if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl))
684 		return m;
685 	else
686 		return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl);
687 #endif
688 
689 	return NULL;
690 }
691 
692 /*
693  *	vm_page_alloc:
694  *
695  *	Allocate and return a memory cell associated
696  *	with this VM object/offset pair.
697  *
698  *	page_req classes:
699  *	VM_ALLOC_NORMAL		normal process request
700  *	VM_ALLOC_SYSTEM		system *really* needs a page
701  *	VM_ALLOC_INTERRUPT	interrupt time request
702  *	VM_ALLOC_ZERO		zero page
703  *
704  *	Object must be locked.
705  */
706 vm_page_t
707 vm_page_alloc(object, pindex, page_req)
708 	vm_object_t object;
709 	vm_pindex_t pindex;
710 	int page_req;
711 {
712 	register vm_page_t m;
713 	struct vpgqueues *pq;
714 	int queue, qtype;
715 	int s;
716 
717 #ifdef DIAGNOSTIC
718 	m = vm_page_lookup(object, pindex);
719 	if (m)
720 		panic("vm_page_alloc: page already allocated");
721 #endif
722 
723 	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
724 		page_req = VM_ALLOC_SYSTEM;
725 	};
726 
727 	s = splvm();
728 
729 	switch (page_req) {
730 
731 	case VM_ALLOC_NORMAL:
732 		if (cnt.v_free_count >= cnt.v_free_reserved) {
733 			m = vm_page_select_free(object, pindex, PQ_FREE);
734 #if defined(DIAGNOSTIC)
735 			if (m == NULL)
736 				panic("vm_page_alloc(NORMAL): missing page on free queue\n");
737 #endif
738 		} else {
739 			m = vm_page_select(object, pindex, PQ_CACHE);
740 			if (m == NULL) {
741 				splx(s);
742 #if defined(DIAGNOSTIC)
743 				if (cnt.v_cache_count > 0)
744 					printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
745 #endif
746 				pagedaemon_wakeup();
747 				return (NULL);
748 			}
749 		}
750 		break;
751 
752 	case VM_ALLOC_ZERO:
753 		if (cnt.v_free_count >= cnt.v_free_reserved) {
754 			m = vm_page_select_free(object, pindex, PQ_ZERO);
755 #if defined(DIAGNOSTIC)
756 			if (m == NULL)
757 				panic("vm_page_alloc(ZERO): missing page on free queue\n");
758 #endif
759 		} else {
760 			m = vm_page_select(object, pindex, PQ_CACHE);
761 			if (m == NULL) {
762 				splx(s);
763 #if defined(DIAGNOSTIC)
764 				if (cnt.v_cache_count > 0)
765 					printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
766 #endif
767 				pagedaemon_wakeup();
768 				return (NULL);
769 			}
770 		}
771 		break;
772 
773 	case VM_ALLOC_SYSTEM:
774 		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
775 		    ((cnt.v_cache_count == 0) &&
776 		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
777 			m = vm_page_select_free(object, pindex, PQ_FREE);
778 #if defined(DIAGNOSTIC)
779 			if (m == NULL)
780 				panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
781 #endif
782 		} else {
783 			m = vm_page_select(object, pindex, PQ_CACHE);
784 			if (m == NULL) {
785 				splx(s);
786 #if defined(DIAGNOSTIC)
787 				if (cnt.v_cache_count > 0)
788 					printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
789 #endif
790 				pagedaemon_wakeup();
791 				return (NULL);
792 			}
793 		}
794 		break;
795 
796 	case VM_ALLOC_INTERRUPT:
797 		if (cnt.v_free_count > 0) {
798 			m = vm_page_select_free(object, pindex, PQ_FREE);
799 #if defined(DIAGNOSTIC)
800 			if (m == NULL)
801 				panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
802 #endif
803 		} else {
804 			splx(s);
805 			pagedaemon_wakeup();
806 			return (NULL);
807 		}
808 		break;
809 
810 	default:
811 		panic("vm_page_alloc: invalid allocation class");
812 	}
813 
814 	queue = m->queue;
815 	qtype = queue - m->pc;
816 	if (qtype == PQ_ZERO)
817 		--vm_page_zero_count;
818 	pq = &vm_page_queues[queue];
819 	TAILQ_REMOVE(pq->pl, m, pageq);
820 	--(*pq->cnt);
821 	--(*pq->lcnt);
822 	if (qtype == PQ_ZERO) {
823 		m->flags = PG_ZERO|PG_BUSY;
824 	} else if (qtype == PQ_CACHE) {
825 		vm_page_remove(m);
826 		m->flags = PG_BUSY;
827 	} else {
828 		m->flags = PG_BUSY;
829 	}
830 	m->wire_count = 0;
831 	m->hold_count = 0;
832 	m->act_count = 0;
833 	m->busy = 0;
834 	m->valid = 0;
835 	m->dirty = 0;
836 	m->queue = PQ_NONE;
837 
838 	/* XXX before splx until vm_page_insert is safe */
839 	vm_page_insert(m, object, pindex);
840 
841 	splx(s);
842 
843 	/*
844 	 * Don't wakeup too often - wakeup the pageout daemon when
845 	 * we would be nearly out of memory.
846 	 */
847 	if (((cnt.v_free_count + cnt.v_cache_count) <
848 		(cnt.v_free_reserved + cnt.v_cache_min)) ||
849 			(cnt.v_free_count < cnt.v_pageout_free_min))
850 		pagedaemon_wakeup();
851 
852 	return (m);
853 }
854 
855 void
856 vm_wait()
857 {
858 	int s;
859 
860 	s = splvm();
861 	if (curproc == pageproc) {
862 		vm_pageout_pages_needed = 1;
863 		tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
864 	} else {
865 		if (!vm_pages_needed) {
866 			vm_pages_needed++;
867 			wakeup(&vm_pages_needed);
868 		}
869 		tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
870 	}
871 	splx(s);
872 }
873 
874 
875 /*
876  *	vm_page_activate:
877  *
878  *	Put the specified page on the active list (if appropriate).
879  *
880  *	The page queues must be locked.
881  */
882 void
883 vm_page_activate(m)
884 	register vm_page_t m;
885 {
886 	int s;
887 
888 	s = splvm();
889 	if (m->queue == PQ_ACTIVE)
890 		panic("vm_page_activate: already active");
891 
892 	if ((m->queue - m->pc) == PQ_CACHE)
893 		cnt.v_reactivated++;
894 
895 	vm_page_unqueue(m);
896 
897 	if (m->wire_count == 0) {
898 		m->queue = PQ_ACTIVE;
899 		++(*vm_page_queues[PQ_ACTIVE].lcnt);
900 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
901 		if (m->act_count < ACT_INIT)
902 			m->act_count = ACT_INIT;
903 		cnt.v_active_count++;
904 	}
905 	splx(s);
906 }
907 
908 /*
909  * helper routine for vm_page_free and vm_page_free_zero
910  */
911 static int
912 vm_page_freechk_and_unqueue(m)
913 	vm_page_t m;
914 {
915 	if (m->busy ||
916 		(m->flags & PG_BUSY) ||
917 		((m->queue - m->pc) == PQ_FREE) ||
918 		(m->hold_count != 0)) {
919 		printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
920 			m->pindex, m->busy,
921 			(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
922 		if ((m->queue - m->pc) == PQ_FREE)
923 			panic("vm_page_free: freeing free page");
924 		else
925 			panic("vm_page_free: freeing busy page");
926 	}
927 
928 	vm_page_remove(m);
929 	vm_page_unqueue_nowakeup(m);
930 	if ((m->flags & PG_FICTITIOUS) != 0) {
931 		return 0;
932 	}
933 	if (m->wire_count != 0) {
934 		if (m->wire_count > 1) {
935 			panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
936 				m->wire_count, m->pindex);
937 		}
938 		m->wire_count = 0;
939 		cnt.v_wire_count--;
940 	}
941 
942 	return 1;
943 }
944 
945 /*
946  * helper routine for vm_page_free and vm_page_free_zero
947  */
948 static __inline void
949 vm_page_free_wakeup()
950 {
951 
952 /*
953  * if pageout daemon needs pages, then tell it that there are
954  * some free.
955  */
956 	if (vm_pageout_pages_needed) {
957 		wakeup(&vm_pageout_pages_needed);
958 		vm_pageout_pages_needed = 0;
959 	}
960 	/*
961 	 * wakeup processes that are waiting on memory if we hit a
962 	 * high water mark. And wakeup scheduler process if we have
963 	 * lots of memory. this process will swapin processes.
964 	 */
965 	if (vm_pages_needed &&
966 		((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
967 		wakeup(&cnt.v_free_count);
968 		vm_pages_needed = 0;
969 	}
970 }
971 
972 /*
973  *	vm_page_free:
974  *
975  *	Returns the given page to the free list,
976  *	disassociating it with any VM object.
977  *
978  *	Object and page must be locked prior to entry.
979  */
980 void
981 vm_page_free(m)
982 	register vm_page_t m;
983 {
984 	int s;
985 	struct vpgqueues *pq;
986 
987 	s = splvm();
988 
989 	cnt.v_tfree++;
990 
991 	if (!vm_page_freechk_and_unqueue(m)) {
992 		splx(s);
993 		return;
994 	}
995 
996 	m->queue = PQ_FREE + m->pc;
997 	pq = &vm_page_queues[m->queue];
998 	++(*pq->lcnt);
999 	++(*pq->cnt);
1000 	/*
1001 	 * If the pageout process is grabbing the page, it is likely
1002 	 * that the page is NOT in the cache.  It is more likely that
1003 	 * the page will be partially in the cache if it is being
1004 	 * explicitly freed.
1005 	 */
1006 	if (curproc == pageproc) {
1007 		TAILQ_INSERT_TAIL(pq->pl, m, pageq);
1008 	} else {
1009 		TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1010 	}
1011 	vm_page_free_wakeup();
1012 	splx(s);
1013 }
1014 
1015 void
1016 vm_page_free_zero(m)
1017 	register vm_page_t m;
1018 {
1019 	int s;
1020 	struct vpgqueues *pq;
1021 
1022 	s = splvm();
1023 
1024 	cnt.v_tfree++;
1025 
1026 	if (!vm_page_freechk_and_unqueue(m)) {
1027 		splx(s);
1028 		return;
1029 	}
1030 
1031 	m->queue = PQ_ZERO + m->pc;
1032 	pq = &vm_page_queues[m->queue];
1033 	++(*pq->lcnt);
1034 	++(*pq->cnt);
1035 
1036 	TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1037 	++vm_page_zero_count;
1038 	vm_page_free_wakeup();
1039 	splx(s);
1040 }
1041 
1042 /*
1043  *	vm_page_wire:
1044  *
1045  *	Mark this page as wired down by yet
1046  *	another map, removing it from paging queues
1047  *	as necessary.
1048  *
1049  *	The page queues must be locked.
1050  */
1051 void
1052 vm_page_wire(m)
1053 	register vm_page_t m;
1054 {
1055 	int s;
1056 
1057 	if (m->wire_count == 0) {
1058 		s = splvm();
1059 		vm_page_unqueue(m);
1060 		splx(s);
1061 		cnt.v_wire_count++;
1062 	}
1063 	++(*vm_page_queues[PQ_NONE].lcnt);
1064 	m->wire_count++;
1065 	m->flags |= PG_MAPPED;
1066 }
1067 
1068 /*
1069  *	vm_page_unwire:
1070  *
1071  *	Release one wiring of this page, potentially
1072  *	enabling it to be paged again.
1073  *
1074  *	The page queues must be locked.
1075  */
1076 void
1077 vm_page_unwire(m)
1078 	register vm_page_t m;
1079 {
1080 	int s;
1081 
1082 	s = splvm();
1083 
1084 	if (m->wire_count > 0)
1085 		m->wire_count--;
1086 
1087 	if (m->wire_count == 0) {
1088 		cnt.v_wire_count--;
1089 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1090 		m->queue = PQ_ACTIVE;
1091 		++(*vm_page_queues[PQ_ACTIVE].lcnt);
1092 		cnt.v_active_count++;
1093 	}
1094 	splx(s);
1095 }
1096 
1097 
1098 /*
1099  *	vm_page_deactivate:
1100  *
1101  *	Returns the given page to the inactive list,
1102  *	indicating that no physical maps have access
1103  *	to this page.  [Used by the physical mapping system.]
1104  *
1105  *	The page queues must be locked.
1106  */
1107 void
1108 vm_page_deactivate(m)
1109 	register vm_page_t m;
1110 {
1111 	int s;
1112 
1113 	/*
1114 	 * Only move active pages -- ignore locked or already inactive ones.
1115 	 *
1116 	 * XXX: sometimes we get pages which aren't wired down or on any queue -
1117 	 * we need to put them on the inactive queue also, otherwise we lose
1118 	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
1119 	 */
1120 	if (m->queue == PQ_INACTIVE)
1121 		return;
1122 
1123 	s = splvm();
1124 	if (m->wire_count == 0 && m->hold_count == 0) {
1125 		if ((m->queue - m->pc) == PQ_CACHE)
1126 			cnt.v_reactivated++;
1127 		vm_page_unqueue(m);
1128 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1129 		m->queue = PQ_INACTIVE;
1130 		++(*vm_page_queues[PQ_INACTIVE].lcnt);
1131 		cnt.v_inactive_count++;
1132 	}
1133 	splx(s);
1134 }
1135 
1136 /*
1137  * vm_page_cache
1138  *
1139  * Put the specified page onto the page cache queue (if appropriate).
1140  */
1141 void
1142 vm_page_cache(m)
1143 	register vm_page_t m;
1144 {
1145 	int s;
1146 
1147 	if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
1148 		printf("vm_page_cache: attempting to cache busy page\n");
1149 		return;
1150 	}
1151 	if ((m->queue - m->pc) == PQ_CACHE)
1152 		return;
1153 
1154 	vm_page_protect(m, VM_PROT_NONE);
1155 	if (m->dirty != 0) {
1156 		panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
1157 	}
1158 	s = splvm();
1159 	vm_page_unqueue_nowakeup(m);
1160 	m->queue = PQ_CACHE + m->pc;
1161 	++(*vm_page_queues[m->queue].lcnt);
1162 	TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1163 	cnt.v_cache_count++;
1164 	vm_page_free_wakeup();
1165 	splx(s);
1166 }
1167 
1168 
1169 /*
1170  * mapping function for valid bits or for dirty bits in
1171  * a page
1172  */
1173 inline int
1174 vm_page_bits(int base, int size)
1175 {
1176 	u_short chunk;
1177 
1178 	if ((base == 0) && (size >= PAGE_SIZE))
1179 		return VM_PAGE_BITS_ALL;
1180 	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1181 	base = (base % PAGE_SIZE) / DEV_BSIZE;
1182 	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1183 	return (chunk << base) & VM_PAGE_BITS_ALL;
1184 }
1185 
1186 /*
1187  * set a page valid and clean
1188  */
1189 void
1190 vm_page_set_validclean(m, base, size)
1191 	vm_page_t m;
1192 	int base;
1193 	int size;
1194 {
1195 	int pagebits = vm_page_bits(base, size);
1196 	m->valid |= pagebits;
1197 	m->dirty &= ~pagebits;
1198 	if( base == 0 && size == PAGE_SIZE)
1199 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1200 }
1201 
1202 /*
1203  * set a page (partially) invalid
1204  */
1205 void
1206 vm_page_set_invalid(m, base, size)
1207 	vm_page_t m;
1208 	int base;
1209 	int size;
1210 {
1211 	int bits;
1212 
1213 	m->valid &= ~(bits = vm_page_bits(base, size));
1214 	if (m->valid == 0)
1215 		m->dirty &= ~bits;
1216 }
1217 
1218 /*
1219  * is (partial) page valid?
1220  */
1221 int
1222 vm_page_is_valid(m, base, size)
1223 	vm_page_t m;
1224 	int base;
1225 	int size;
1226 {
1227 	int bits = vm_page_bits(base, size);
1228 
1229 	if (m->valid && ((m->valid & bits) == bits))
1230 		return 1;
1231 	else
1232 		return 0;
1233 }
1234 
1235 void
1236 vm_page_test_dirty(m)
1237 	vm_page_t m;
1238 {
1239 	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1240 	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1241 		m->dirty = VM_PAGE_BITS_ALL;
1242 	}
1243 }
1244 
1245 /*
1246  * This interface is for merging with malloc() someday.
1247  * Even if we never implement compaction so that contiguous allocation
1248  * works after initialization time, malloc()'s data structures are good
1249  * for statistics and for allocations of less than a page.
1250  */
1251 void *
1252 contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
1253 	unsigned long size;	/* should be size_t here and for malloc() */
1254 	int type;
1255 	int flags;
1256 	unsigned long low;
1257 	unsigned long high;
1258 	unsigned long alignment;
1259 	unsigned long boundary;
1260 	vm_map_t map;
1261 {
1262 	int i, s, start;
1263 	vm_offset_t addr, phys, tmp_addr;
1264 	int pass;
1265 	vm_page_t pga = vm_page_array;
1266 
1267 	size = round_page(size);
1268 	if (size == 0)
1269 		panic("contigmalloc1: size must not be 0");
1270 	if ((alignment & (alignment - 1)) != 0)
1271 		panic("contigmalloc1: alignment must be a power of 2");
1272 	if ((boundary & (boundary - 1)) != 0)
1273 		panic("contigmalloc1: boundary must be a power of 2");
1274 
1275 	start = 0;
1276 	for (pass = 0; pass <= 1; pass++) {
1277 		s = splvm();
1278 again:
1279 		/*
1280 		 * Find first page in array that is free, within range, aligned, and
1281 		 * such that the boundary won't be crossed.
1282 		 */
1283 		for (i = start; i < cnt.v_page_count; i++) {
1284 			int pqtype;
1285 			phys = VM_PAGE_TO_PHYS(&pga[i]);
1286 			pqtype = pga[i].queue - pga[i].pc;
1287 			if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1288 			    (phys >= low) && (phys < high) &&
1289 			    ((phys & (alignment - 1)) == 0) &&
1290 			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1291 				break;
1292 		}
1293 
1294 		/*
1295 		 * If the above failed or we will exceed the upper bound, fail.
1296 		 */
1297 		if ((i == cnt.v_page_count) ||
1298 			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1299 			vm_page_t m, next;
1300 
1301 again1:
1302 			for (m = TAILQ_FIRST(&vm_page_queue_inactive);
1303 				m != NULL;
1304 				m = next) {
1305 
1306 				if (m->queue != PQ_INACTIVE) {
1307 					break;
1308 				}
1309 
1310 				next = TAILQ_NEXT(m, pageq);
1311 				if (m->flags & PG_BUSY) {
1312 					m->flags |= PG_WANTED;
1313 					tsleep(m, PVM, "vpctw0", 0);
1314 					goto again1;
1315 				}
1316 				vm_page_test_dirty(m);
1317 				if (m->dirty) {
1318 					if (m->object->type == OBJT_VNODE) {
1319 						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1320 						goto again1;
1321 					} else if (m->object->type == OBJT_SWAP ||
1322 								m->object->type == OBJT_DEFAULT) {
1323 						vm_page_protect(m, VM_PROT_NONE);
1324 						vm_pageout_flush(&m, 1, 0);
1325 						goto again1;
1326 					}
1327 				}
1328 				if ((m->dirty == 0) &&
1329 					(m->busy == 0) &&
1330 					(m->hold_count == 0))
1331 					vm_page_cache(m);
1332 			}
1333 
1334 			for (m = TAILQ_FIRST(&vm_page_queue_active);
1335 				m != NULL;
1336 				m = next) {
1337 
1338 				if (m->queue != PQ_ACTIVE) {
1339 					break;
1340 				}
1341 
1342 				next = TAILQ_NEXT(m, pageq);
1343 				if (m->flags & PG_BUSY) {
1344 					m->flags |= PG_WANTED;
1345 					tsleep(m, PVM, "vpctw1", 0);
1346 					goto again1;
1347 				}
1348 				vm_page_test_dirty(m);
1349 				if (m->dirty) {
1350 					if (m->object->type == OBJT_VNODE) {
1351 						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1352 						goto again1;
1353 					} else if (m->object->type == OBJT_SWAP ||
1354 								m->object->type == OBJT_DEFAULT) {
1355 						vm_page_protect(m, VM_PROT_NONE);
1356 						vm_pageout_flush(&m, 1, 0);
1357 						goto again1;
1358 					}
1359 				}
1360 				if ((m->dirty == 0) &&
1361 					(m->busy == 0) &&
1362 					(m->hold_count == 0))
1363 					vm_page_cache(m);
1364 			}
1365 
1366 			splx(s);
1367 			continue;
1368 		}
1369 		start = i;
1370 
1371 		/*
1372 		 * Check successive pages for contiguous and free.
1373 		 */
1374 		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1375 			int pqtype;
1376 			pqtype = pga[i].queue - pga[i].pc;
1377 			if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1378 			    (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1379 			    ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1380 				start++;
1381 				goto again;
1382 			}
1383 		}
1384 
1385 		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1386 			int pqtype;
1387 			vm_page_t m = &pga[i];
1388 
1389 			pqtype = m->queue - m->pc;
1390 			if (pqtype == PQ_CACHE)
1391 				vm_page_free(m);
1392 
1393 			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
1394 			--(*vm_page_queues[m->queue].lcnt);
1395 			cnt.v_free_count--;
1396 			m->valid = VM_PAGE_BITS_ALL;
1397 			m->flags = 0;
1398 			m->dirty = 0;
1399 			m->wire_count = 0;
1400 			m->busy = 0;
1401 			m->queue = PQ_NONE;
1402 			m->object = NULL;
1403 			vm_page_wire(m);
1404 		}
1405 
1406 		/*
1407 		 * We've found a contiguous chunk that meets are requirements.
1408 		 * Allocate kernel VM, unfree and assign the physical pages to it and
1409 		 * return kernel VM pointer.
1410 		 */
1411 		tmp_addr = addr = kmem_alloc_pageable(map, size);
1412 		if (addr == 0) {
1413 			/*
1414 			 * XXX We almost never run out of kernel virtual
1415 			 * space, so we don't make the allocated memory
1416 			 * above available.
1417 			 */
1418 			splx(s);
1419 			return (NULL);
1420 		}
1421 
1422 		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1423 			vm_page_t m = &pga[i];
1424 			vm_page_insert(m, kernel_object,
1425 				OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1426 			pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
1427 			tmp_addr += PAGE_SIZE;
1428 		}
1429 
1430 		splx(s);
1431 		return ((void *)addr);
1432 	}
1433 	return NULL;
1434 }
1435 
1436 void *
1437 contigmalloc(size, type, flags, low, high, alignment, boundary)
1438 	unsigned long size;	/* should be size_t here and for malloc() */
1439 	int type;
1440 	int flags;
1441 	unsigned long low;
1442 	unsigned long high;
1443 	unsigned long alignment;
1444 	unsigned long boundary;
1445 {
1446 	return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1447 			     kernel_map);
1448 }
1449 
1450 vm_offset_t
1451 vm_page_alloc_contig(size, low, high, alignment)
1452 	vm_offset_t size;
1453 	vm_offset_t low;
1454 	vm_offset_t high;
1455 	vm_offset_t alignment;
1456 {
1457 	return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1458 					  alignment, 0ul, kernel_map));
1459 }
1460 
1461 #include "opt_ddb.h"
1462 #ifdef DDB
1463 #include <sys/kernel.h>
1464 
1465 #include <ddb/ddb.h>
1466 
1467 DB_SHOW_COMMAND(page, vm_page_print_page_info)
1468 {
1469 	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1470 	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1471 	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1472 	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1473 	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1474 	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1475 	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1476 	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1477 	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1478 	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1479 }
1480 
1481 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1482 {
1483 	int i;
1484 	db_printf("PQ_FREE:");
1485 	for(i=0;i<PQ_L2_SIZE;i++) {
1486 		db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt);
1487 	}
1488 	db_printf("\n");
1489 
1490 	db_printf("PQ_CACHE:");
1491 	for(i=0;i<PQ_L2_SIZE;i++) {
1492 		db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt);
1493 	}
1494 	db_printf("\n");
1495 
1496 	db_printf("PQ_ZERO:");
1497 	for(i=0;i<PQ_L2_SIZE;i++) {
1498 		db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
1499 	}
1500 	db_printf("\n");
1501 
1502 	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1503 		*vm_page_queues[PQ_ACTIVE].lcnt,
1504 		*vm_page_queues[PQ_INACTIVE].lcnt);
1505 }
1506 #endif /* DDB */
1507