xref: /freebsd/sys/vm/vm_page.c (revision 48991a368427cadb9cdac39581d1676c29619c52)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  *	$Id: vm_page.c,v 1.37 1995/10/23 05:35:46 dyson Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 
67 /*
68  *	Resident memory management module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_pageout.h>
80 
81 /*
82  *	Associated with page of user-allocatable memory is a
83  *	page structure.
84  */
85 
86 struct pglist *vm_page_buckets;	/* Array of buckets */
87 int vm_page_bucket_count;	/* How big is array? */
88 static int vm_page_hash_mask;		/* Mask for hash function */
89 
90 struct pglist vm_page_queue_free;
91 struct pglist vm_page_queue_zero;
92 struct pglist vm_page_queue_active;
93 struct pglist vm_page_queue_inactive;
94 struct pglist vm_page_queue_cache;
95 
96 /* has physical page allocation been initialized? */
97 boolean_t vm_page_startup_initialized;
98 
99 vm_page_t vm_page_array;
100 int vm_page_array_size;
101 long first_page;
102 long last_page;
103 vm_offset_t first_phys_addr;
104 vm_offset_t last_phys_addr;
105 vm_size_t page_mask;
106 int page_shift;
107 
108 /*
109  * map of contiguous valid DEV_BSIZE chunks in a page
110  * (this list is valid for page sizes upto 16*DEV_BSIZE)
111  */
112 static u_short vm_page_dev_bsize_chunks[] = {
113 	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
114 	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
115 };
116 
117 static void vm_page_unqueue __P((vm_page_t ));
118 
119 /*
120  *	vm_set_page_size:
121  *
122  *	Sets the page size, perhaps based upon the memory
123  *	size.  Must be called before any use of page-size
124  *	dependent functions.
125  *
126  *	Sets page_shift and page_mask from cnt.v_page_size.
127  */
128 void
129 vm_set_page_size()
130 {
131 
132 	if (cnt.v_page_size == 0)
133 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
134 	page_mask = cnt.v_page_size - 1;
135 	if ((page_mask & cnt.v_page_size) != 0)
136 		panic("vm_set_page_size: page size not a power of two");
137 	for (page_shift = 0;; page_shift++)
138 		if ((1 << page_shift) == cnt.v_page_size)
139 			break;
140 }
141 
142 /*
143  *	vm_page_startup:
144  *
145  *	Initializes the resident memory module.
146  *
147  *	Allocates memory for the page cells, and
148  *	for the object/offset-to-page hash table headers.
149  *	Each page cell is initialized and placed on the free list.
150  */
151 
152 vm_offset_t
153 vm_page_startup(starta, enda, vaddr)
154 	register vm_offset_t starta;
155 	vm_offset_t enda;
156 	register vm_offset_t vaddr;
157 {
158 	register vm_offset_t mapped;
159 	register vm_page_t m;
160 	register struct pglist *bucket;
161 	vm_size_t npages, page_range;
162 	register vm_offset_t new_start;
163 	int i;
164 	vm_offset_t pa;
165 	int nblocks;
166 	vm_offset_t first_managed_page;
167 
168 	/* the biggest memory array is the second group of pages */
169 	vm_offset_t start;
170 	vm_offset_t biggestone, biggestsize;
171 
172 	vm_offset_t total;
173 
174 	total = 0;
175 	biggestsize = 0;
176 	biggestone = 0;
177 	nblocks = 0;
178 	vaddr = round_page(vaddr);
179 
180 	for (i = 0; phys_avail[i + 1]; i += 2) {
181 		phys_avail[i] = round_page(phys_avail[i]);
182 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
183 	}
184 
185 	for (i = 0; phys_avail[i + 1]; i += 2) {
186 		int size = phys_avail[i + 1] - phys_avail[i];
187 
188 		if (size > biggestsize) {
189 			biggestone = i;
190 			biggestsize = size;
191 		}
192 		++nblocks;
193 		total += size;
194 	}
195 
196 	start = phys_avail[biggestone];
197 
198 	/*
199 	 * Initialize the queue headers for the free queue, the active queue
200 	 * and the inactive queue.
201 	 */
202 
203 	TAILQ_INIT(&vm_page_queue_free);
204 	TAILQ_INIT(&vm_page_queue_zero);
205 	TAILQ_INIT(&vm_page_queue_active);
206 	TAILQ_INIT(&vm_page_queue_inactive);
207 	TAILQ_INIT(&vm_page_queue_cache);
208 
209 	/*
210 	 * Allocate (and initialize) the hash table buckets.
211 	 *
212 	 * The number of buckets MUST BE a power of 2, and the actual value is
213 	 * the next power of 2 greater than the number of physical pages in
214 	 * the system.
215 	 *
216 	 * Note: This computation can be tweaked if desired.
217 	 */
218 	vm_page_buckets = (struct pglist *) vaddr;
219 	bucket = vm_page_buckets;
220 	if (vm_page_bucket_count == 0) {
221 		vm_page_bucket_count = 1;
222 		while (vm_page_bucket_count < atop(total))
223 			vm_page_bucket_count <<= 1;
224 	}
225 	vm_page_hash_mask = vm_page_bucket_count - 1;
226 
227 	/*
228 	 * Validate these addresses.
229 	 */
230 
231 	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
232 	new_start = round_page(new_start);
233 	mapped = vaddr;
234 	vaddr = pmap_map(mapped, start, new_start,
235 	    VM_PROT_READ | VM_PROT_WRITE);
236 	start = new_start;
237 	bzero((caddr_t) mapped, vaddr - mapped);
238 	mapped = vaddr;
239 
240 	for (i = 0; i < vm_page_bucket_count; i++) {
241 		TAILQ_INIT(bucket);
242 		bucket++;
243 	}
244 
245 	/*
246 	 * round (or truncate) the addresses to our page size.
247 	 */
248 
249 	/*
250 	 * Pre-allocate maps and map entries that cannot be dynamically
251 	 * allocated via malloc().  The maps include the kernel_map and
252 	 * kmem_map which must be initialized before malloc() will work
253 	 * (obviously).  Also could include pager maps which would be
254 	 * allocated before kmeminit.
255 	 *
256 	 * Allow some kernel map entries... this should be plenty since people
257 	 * shouldn't be cluttering up the kernel map (they should use their
258 	 * own maps).
259 	 */
260 
261 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
262 	    MAX_KMAPENT * sizeof(struct vm_map_entry);
263 	kentry_data_size = round_page(kentry_data_size);
264 	kentry_data = (vm_offset_t) vaddr;
265 	vaddr += kentry_data_size;
266 
267 	/*
268 	 * Validate these zone addresses.
269 	 */
270 
271 	new_start = start + (vaddr - mapped);
272 	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
273 	bzero((caddr_t) mapped, (vaddr - mapped));
274 	start = round_page(new_start);
275 
276 	/*
277 	 * Compute the number of pages of memory that will be available for
278 	 * use (taking into account the overhead of a page structure per
279 	 * page).
280 	 */
281 
282 	first_page = phys_avail[0] / PAGE_SIZE;
283 	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
284 
285 	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
286 	npages = (total - (page_range * sizeof(struct vm_page)) -
287 	    (start - phys_avail[biggestone])) / PAGE_SIZE;
288 
289 	/*
290 	 * Initialize the mem entry structures now, and put them in the free
291 	 * queue.
292 	 */
293 
294 	vm_page_array = (vm_page_t) vaddr;
295 	mapped = vaddr;
296 
297 
298 	/*
299 	 * Validate these addresses.
300 	 */
301 
302 	new_start = round_page(start + page_range * sizeof(struct vm_page));
303 	mapped = pmap_map(mapped, start, new_start,
304 	    VM_PROT_READ | VM_PROT_WRITE);
305 	start = new_start;
306 
307 	first_managed_page = start / PAGE_SIZE;
308 
309 	/*
310 	 * Clear all of the page structures
311 	 */
312 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
313 	vm_page_array_size = page_range;
314 
315 	cnt.v_page_count = 0;
316 	cnt.v_free_count = 0;
317 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
318 		if (i == biggestone)
319 			pa = ptoa(first_managed_page);
320 		else
321 			pa = phys_avail[i];
322 		while (pa < phys_avail[i + 1] && npages-- > 0) {
323 			++cnt.v_page_count;
324 			++cnt.v_free_count;
325 			m = PHYS_TO_VM_PAGE(pa);
326 			m->flags = PG_FREE;
327 			m->phys_addr = pa;
328 			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
329 			pa += PAGE_SIZE;
330 		}
331 	}
332 
333 	return (mapped);
334 }
335 
336 /*
337  *	vm_page_hash:
338  *
339  *	Distributes the object/offset key pair among hash buckets.
340  *
341  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
342  */
343 inline const int
344 vm_page_hash(vm_object_t object, vm_offset_t offset)
345 {
346 	return ((unsigned) object + (offset >> PAGE_SHIFT)) & vm_page_hash_mask;
347 }
348 
349 /*
350  *	vm_page_insert:		[ internal use only ]
351  *
352  *	Inserts the given mem entry into the object/object-page
353  *	table and object list.
354  *
355  *	The object and page must be locked, and must be splhigh.
356  */
357 
358 inline void
359 vm_page_insert(mem, object, offset)
360 	register vm_page_t mem;
361 	register vm_object_t object;
362 	register vm_offset_t offset;
363 {
364 	register struct pglist *bucket;
365 
366 	if (mem->flags & PG_TABLED)
367 		panic("vm_page_insert: already inserted");
368 
369 	/*
370 	 * Record the object/offset pair in this page
371 	 */
372 
373 	mem->object = object;
374 	mem->offset = offset;
375 
376 	/*
377 	 * Insert it into the object_object/offset hash table
378 	 */
379 
380 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
381 	TAILQ_INSERT_TAIL(bucket, mem, hashq);
382 
383 	/*
384 	 * Now link into the object's list of backed pages.
385 	 */
386 
387 	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
388 	mem->flags |= PG_TABLED;
389 
390 	/*
391 	 * And show that the object has one more resident page.
392 	 */
393 
394 	object->resident_page_count++;
395 }
396 
397 /*
398  *	vm_page_remove:		[ internal use only ]
399  *				NOTE: used by device pager as well -wfj
400  *
401  *	Removes the given mem entry from the object/offset-page
402  *	table and the object page list.
403  *
404  *	The object and page must be locked, and at splhigh.
405  */
406 
407 inline void
408 vm_page_remove(mem)
409 	register vm_page_t mem;
410 {
411 	register struct pglist *bucket;
412 
413 	if (!(mem->flags & PG_TABLED))
414 		return;
415 
416 	/*
417 	 * Remove from the object_object/offset hash table
418 	 */
419 
420 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
421 	TAILQ_REMOVE(bucket, mem, hashq);
422 
423 	/*
424 	 * Now remove from the object's list of backed pages.
425 	 */
426 
427 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
428 
429 	/*
430 	 * And show that the object has one fewer resident page.
431 	 */
432 
433 	mem->object->resident_page_count--;
434 
435 	mem->flags &= ~PG_TABLED;
436 }
437 
438 /*
439  *	vm_page_lookup:
440  *
441  *	Returns the page associated with the object/offset
442  *	pair specified; if none is found, NULL is returned.
443  *
444  *	The object must be locked.  No side effects.
445  */
446 
447 vm_page_t
448 vm_page_lookup(object, offset)
449 	register vm_object_t object;
450 	register vm_offset_t offset;
451 {
452 	register vm_page_t mem;
453 	register struct pglist *bucket;
454 	int s;
455 
456 	/*
457 	 * Search the hash table for this object/offset pair
458 	 */
459 
460 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
461 
462 	s = splhigh();
463 	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
464 		if ((mem->object == object) && (mem->offset == offset)) {
465 			splx(s);
466 			return (mem);
467 		}
468 	}
469 
470 	splx(s);
471 	return (NULL);
472 }
473 
474 /*
475  *	vm_page_rename:
476  *
477  *	Move the given memory entry from its
478  *	current object to the specified target object/offset.
479  *
480  *	The object must be locked.
481  */
482 void
483 vm_page_rename(mem, new_object, new_offset)
484 	register vm_page_t mem;
485 	register vm_object_t new_object;
486 	vm_offset_t new_offset;
487 {
488 	int s;
489 
490 	if (mem->object == new_object)
491 		return;
492 
493 	s = splhigh();
494 	vm_page_remove(mem);
495 	vm_page_insert(mem, new_object, new_offset);
496 	splx(s);
497 }
498 
499 /*
500  * vm_page_unqueue must be called at splhigh();
501  */
502 static inline void
503 vm_page_unqueue(vm_page_t mem)
504 {
505 	int origflags;
506 
507 	origflags = mem->flags;
508 
509 	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
510 		return;
511 
512 	if (origflags & PG_ACTIVE) {
513 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
514 		cnt.v_active_count--;
515 		mem->flags &= ~PG_ACTIVE;
516 	} else if (origflags & PG_INACTIVE) {
517 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
518 		cnt.v_inactive_count--;
519 		mem->flags &= ~PG_INACTIVE;
520 	} else if (origflags & PG_CACHE) {
521 		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
522 		cnt.v_cache_count--;
523 		mem->flags &= ~PG_CACHE;
524 		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
525 			pagedaemon_wakeup();
526 	}
527 	return;
528 }
529 
530 /*
531  *	vm_page_alloc:
532  *
533  *	Allocate and return a memory cell associated
534  *	with this VM object/offset pair.
535  *
536  *	page_req classes:
537  *	VM_ALLOC_NORMAL		normal process request
538  *	VM_ALLOC_SYSTEM		system *really* needs a page
539  *	VM_ALLOC_INTERRUPT	interrupt time request
540  *	or in:
541  *	VM_ALLOC_ZERO		zero page
542  *
543  *	Object must be locked.
544  */
545 vm_page_t
546 vm_page_alloc(object, offset, page_req)
547 	vm_object_t object;
548 	vm_offset_t offset;
549 	int page_req;
550 {
551 	register vm_page_t mem;
552 	int s;
553 
554 #ifdef DIAGNOSTIC
555 	if (offset != trunc_page(offset))
556 		panic("vm_page_alloc: offset not page aligned");
557 
558 #if 0
559 	mem = vm_page_lookup(object, offset);
560 	if (mem)
561 		panic("vm_page_alloc: page already allocated");
562 #endif
563 #endif
564 
565 	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
566 		page_req = VM_ALLOC_SYSTEM;
567 	};
568 
569 	s = splhigh();
570 
571 	switch ((page_req & ~(VM_ALLOC_ZERO))) {
572 	case VM_ALLOC_NORMAL:
573 		if (cnt.v_free_count >= cnt.v_free_reserved) {
574 			if (page_req & VM_ALLOC_ZERO) {
575 				mem = vm_page_queue_zero.tqh_first;
576 				if (mem) {
577 					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
578 					mem->flags = PG_BUSY|PG_ZERO;
579 				} else {
580 					mem = vm_page_queue_free.tqh_first;
581 					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
582 					mem->flags = PG_BUSY;
583 				}
584 			} else {
585 				mem = vm_page_queue_free.tqh_first;
586 				if (mem) {
587 					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
588 					mem->flags = PG_BUSY;
589 				} else {
590 					mem = vm_page_queue_zero.tqh_first;
591 					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
592 					mem->flags = PG_BUSY|PG_ZERO;
593 				}
594 			}
595 			cnt.v_free_count--;
596 		} else {
597 			mem = vm_page_queue_cache.tqh_first;
598 			if (mem != NULL) {
599 				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
600 				vm_page_remove(mem);
601 				mem->flags = PG_BUSY;
602 				cnt.v_cache_count--;
603 			} else {
604 				splx(s);
605 				pagedaemon_wakeup();
606 				return (NULL);
607 			}
608 		}
609 		break;
610 
611 	case VM_ALLOC_SYSTEM:
612 		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
613 		    ((cnt.v_cache_count == 0) &&
614 		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
615 			if (page_req & VM_ALLOC_ZERO) {
616 				mem = vm_page_queue_zero.tqh_first;
617 				if (mem) {
618 					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
619 					mem->flags = PG_BUSY|PG_ZERO;
620 				} else {
621 					mem = vm_page_queue_free.tqh_first;
622 					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
623 					mem->flags = PG_BUSY;
624 				}
625 			} else {
626 				mem = vm_page_queue_free.tqh_first;
627 				if (mem) {
628 					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
629 					mem->flags = PG_BUSY;
630 				} else {
631 					mem = vm_page_queue_zero.tqh_first;
632 					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
633 					mem->flags = PG_BUSY|PG_ZERO;
634 				}
635 			}
636 			cnt.v_free_count--;
637 		} else {
638 			mem = vm_page_queue_cache.tqh_first;
639 			if (mem != NULL) {
640 				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
641 				vm_page_remove(mem);
642 				mem->flags = PG_BUSY;
643 				cnt.v_cache_count--;
644 			} else {
645 				splx(s);
646 				pagedaemon_wakeup();
647 				return (NULL);
648 			}
649 		}
650 		break;
651 
652 	case VM_ALLOC_INTERRUPT:
653 		if (cnt.v_free_count > 0) {
654 			mem = vm_page_queue_free.tqh_first;
655 			if (mem) {
656 				TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
657 				mem->flags = PG_BUSY;
658 			} else {
659 				mem = vm_page_queue_zero.tqh_first;
660 				TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
661 				mem->flags = PG_BUSY|PG_ZERO;
662 			}
663 			cnt.v_free_count--;
664 		} else {
665 			splx(s);
666 			pagedaemon_wakeup();
667 			return NULL;
668 		}
669 		break;
670 
671 	default:
672 		panic("vm_page_alloc: invalid allocation class");
673 	}
674 
675 	mem->wire_count = 0;
676 	mem->hold_count = 0;
677 	mem->act_count = 0;
678 	mem->busy = 0;
679 	mem->valid = 0;
680 	mem->dirty = 0;
681 	mem->bmapped = 0;
682 
683 	/* XXX before splx until vm_page_insert is safe */
684 	vm_page_insert(mem, object, offset);
685 
686 	splx(s);
687 
688 	/*
689 	 * Don't wakeup too often - wakeup the pageout daemon when
690 	 * we would be nearly out of memory.
691 	 */
692 	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
693 	    (cnt.v_free_count < cnt.v_pageout_free_min))
694 		pagedaemon_wakeup();
695 
696 	return (mem);
697 }
698 
699 vm_offset_t
700 vm_page_alloc_contig(size, low, high, alignment)
701 	vm_offset_t size;
702 	vm_offset_t low;
703 	vm_offset_t high;
704 	vm_offset_t alignment;
705 {
706 	int i, s, start;
707 	vm_offset_t addr, phys, tmp_addr;
708 	vm_page_t pga = vm_page_array;
709 
710 	if ((alignment & (alignment - 1)) != 0)
711 		panic("vm_page_alloc_contig: alignment must be a power of 2");
712 
713 	start = 0;
714 	s = splhigh();
715 again:
716 	/*
717 	 * Find first page in array that is free, within range, and aligned.
718 	 */
719 	for (i = start; i < cnt.v_page_count; i++) {
720 		phys = VM_PAGE_TO_PHYS(&pga[i]);
721 		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
722 		    (phys >= low) && (phys < high) &&
723 		    ((phys & (alignment - 1)) == 0))
724 			break;
725 	}
726 
727 	/*
728 	 * If the above failed or we will exceed the upper bound, fail.
729 	 */
730 	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
731 		splx(s);
732 		return (NULL);
733 	}
734 	start = i;
735 
736 	/*
737 	 * Check successive pages for contiguous and free.
738 	 */
739 	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
740 		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
741 			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
742 		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
743 			start++;
744 			goto again;
745 		}
746 	}
747 
748 	/*
749 	 * We've found a contiguous chunk that meets are requirements.
750 	 * Allocate kernel VM, unfree and assign the physical pages to it and
751 	 * return kernel VM pointer.
752 	 */
753 	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
754 
755 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
756 		vm_page_t m = &pga[i];
757 
758 		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
759 		cnt.v_free_count--;
760 		m->valid = VM_PAGE_BITS_ALL;
761 		m->flags = 0;
762 		m->dirty = 0;
763 		m->wire_count = 0;
764 		m->act_count = 0;
765 		m->bmapped = 0;
766 		m->busy = 0;
767 		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
768 		vm_page_wire(m);
769 		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
770 		tmp_addr += PAGE_SIZE;
771 	}
772 
773 	splx(s);
774 	return (addr);
775 }
776 
777 /*
778  *	vm_page_free:
779  *
780  *	Returns the given page to the free list,
781  *	disassociating it with any VM object.
782  *
783  *	Object and page must be locked prior to entry.
784  */
785 void
786 vm_page_free(mem)
787 	register vm_page_t mem;
788 {
789 	int s;
790 	int flags;
791 
792 	s = splhigh();
793 	vm_page_remove(mem);
794 	vm_page_unqueue(mem);
795 
796 	flags = mem->flags;
797 	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
798 		if (flags & PG_FREE)
799 			panic("vm_page_free: freeing free page");
800 		printf("vm_page_free: offset(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
801 		    mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
802 		panic("vm_page_free: freeing busy page");
803 	}
804 
805 	if ((flags & PG_WANTED) != 0)
806 		wakeup(mem);
807 	if ((flags & PG_FICTITIOUS) == 0) {
808 		if (mem->wire_count) {
809 			if (mem->wire_count > 1) {
810 				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
811 				panic("vm_page_free: invalid wire count");
812 			}
813 			cnt.v_wire_count--;
814 			mem->wire_count = 0;
815 		}
816 		mem->flags |= PG_FREE;
817 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
818 		splx(s);
819 		/*
820 		 * if pageout daemon needs pages, then tell it that there are
821 		 * some free.
822 		 */
823 		if (vm_pageout_pages_needed) {
824 			wakeup(&vm_pageout_pages_needed);
825 			vm_pageout_pages_needed = 0;
826 		}
827 
828 		cnt.v_free_count++;
829 		/*
830 		 * wakeup processes that are waiting on memory if we hit a
831 		 * high water mark. And wakeup scheduler process if we have
832 		 * lots of memory. this process will swapin processes.
833 		 */
834 		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
835 			wakeup(&cnt.v_free_count);
836 			wakeup(&proc0);
837 		}
838 	} else {
839 		splx(s);
840 	}
841 	cnt.v_tfree++;
842 }
843 
844 
845 /*
846  *	vm_page_wire:
847  *
848  *	Mark this page as wired down by yet
849  *	another map, removing it from paging queues
850  *	as necessary.
851  *
852  *	The page queues must be locked.
853  */
854 void
855 vm_page_wire(mem)
856 	register vm_page_t mem;
857 {
858 	int s;
859 
860 	if (mem->wire_count == 0) {
861 		s = splhigh();
862 		vm_page_unqueue(mem);
863 		splx(s);
864 		cnt.v_wire_count++;
865 	}
866 	mem->flags |= PG_WRITEABLE|PG_MAPPED;
867 	mem->wire_count++;
868 }
869 
870 /*
871  *	vm_page_unwire:
872  *
873  *	Release one wiring of this page, potentially
874  *	enabling it to be paged again.
875  *
876  *	The page queues must be locked.
877  */
878 void
879 vm_page_unwire(mem)
880 	register vm_page_t mem;
881 {
882 	int s;
883 
884 	s = splhigh();
885 
886 	if (mem->wire_count)
887 		mem->wire_count--;
888 	if (mem->wire_count == 0) {
889 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
890 		cnt.v_active_count++;
891 		mem->flags |= PG_ACTIVE;
892 		cnt.v_wire_count--;
893 	}
894 	splx(s);
895 }
896 
897 /*
898  *	vm_page_activate:
899  *
900  *	Put the specified page on the active list (if appropriate).
901  *
902  *	The page queues must be locked.
903  */
904 void
905 vm_page_activate(m)
906 	register vm_page_t m;
907 {
908 	int s;
909 
910 	s = splhigh();
911 	if (m->flags & PG_ACTIVE)
912 		panic("vm_page_activate: already active");
913 
914 	if (m->flags & PG_CACHE)
915 		cnt.v_reactivated++;
916 
917 	vm_page_unqueue(m);
918 
919 	if (m->wire_count == 0) {
920 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
921 		m->flags |= PG_ACTIVE;
922 		if (m->act_count < 5)
923 			m->act_count = 5;
924 		else if( m->act_count < ACT_MAX)
925 			m->act_count += 1;
926 		cnt.v_active_count++;
927 	}
928 	splx(s);
929 }
930 
931 /*
932  *	vm_page_deactivate:
933  *
934  *	Returns the given page to the inactive list,
935  *	indicating that no physical maps have access
936  *	to this page.  [Used by the physical mapping system.]
937  *
938  *	The page queues must be locked.
939  */
940 void
941 vm_page_deactivate(m)
942 	register vm_page_t m;
943 {
944 	int spl;
945 
946 	/*
947 	 * Only move active pages -- ignore locked or already inactive ones.
948 	 *
949 	 * XXX: sometimes we get pages which aren't wired down or on any queue -
950 	 * we need to put them on the inactive queue also, otherwise we lose
951 	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
952 	 */
953 
954 	spl = splhigh();
955 	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
956 	    m->hold_count == 0) {
957 		if (m->flags & PG_CACHE)
958 			cnt.v_reactivated++;
959 		vm_page_unqueue(m);
960 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
961 		m->flags |= PG_INACTIVE;
962 		cnt.v_inactive_count++;
963 		m->act_count = 0;
964 	}
965 	splx(spl);
966 }
967 
968 /*
969  * vm_page_cache
970  *
971  * Put the specified page onto the page cache queue (if appropriate).
972  */
973 void
974 vm_page_cache(m)
975 	register vm_page_t m;
976 {
977 	int s;
978 
979 	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
980 	    m->bmapped)
981 		return;
982 
983 	s = splhigh();
984 	vm_page_unqueue(m);
985 	vm_page_protect(m, VM_PROT_NONE);
986 
987 	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
988 	m->flags |= PG_CACHE;
989 	cnt.v_cache_count++;
990 	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
991 		wakeup(&cnt.v_free_count);
992 		wakeup(&proc0);
993 	}
994 	if (vm_pageout_pages_needed) {
995 		wakeup(&vm_pageout_pages_needed);
996 		vm_pageout_pages_needed = 0;
997 	}
998 
999 	splx(s);
1000 }
1001 
1002 /*
1003  *	vm_page_zero_fill:
1004  *
1005  *	Zero-fill the specified page.
1006  *	Written as a standard pagein routine, to
1007  *	be used by the zero-fill object.
1008  */
1009 boolean_t
1010 vm_page_zero_fill(m)
1011 	vm_page_t m;
1012 {
1013 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1014 	return (TRUE);
1015 }
1016 
1017 /*
1018  *	vm_page_copy:
1019  *
1020  *	Copy one page to another
1021  */
1022 void
1023 vm_page_copy(src_m, dest_m)
1024 	vm_page_t src_m;
1025 	vm_page_t dest_m;
1026 {
1027 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1028 	dest_m->valid = VM_PAGE_BITS_ALL;
1029 }
1030 
1031 
1032 /*
1033  * mapping function for valid bits or for dirty bits in
1034  * a page
1035  */
1036 inline int
1037 vm_page_bits(int base, int size)
1038 {
1039 	u_short chunk;
1040 
1041 	if ((base == 0) && (size >= PAGE_SIZE))
1042 		return VM_PAGE_BITS_ALL;
1043 	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1044 	base = (base % PAGE_SIZE) / DEV_BSIZE;
1045 	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1046 	return (chunk << base) & VM_PAGE_BITS_ALL;
1047 }
1048 
1049 /*
1050  * set a page valid and clean
1051  */
1052 void
1053 vm_page_set_validclean(m, base, size)
1054 	vm_page_t m;
1055 	int base;
1056 	int size;
1057 {
1058 	int pagebits = vm_page_bits(base, size);
1059 	m->valid |= pagebits;
1060 	m->dirty &= ~pagebits;
1061 	if( base == 0 && size == PAGE_SIZE)
1062 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1063 }
1064 
1065 /*
1066  * set a page (partially) invalid
1067  */
1068 void
1069 vm_page_set_invalid(m, base, size)
1070 	vm_page_t m;
1071 	int base;
1072 	int size;
1073 {
1074 	int bits;
1075 
1076 	m->valid &= ~(bits = vm_page_bits(base, size));
1077 	if (m->valid == 0)
1078 		m->dirty &= ~bits;
1079 }
1080 
1081 /*
1082  * is (partial) page valid?
1083  */
1084 int
1085 vm_page_is_valid(m, base, size)
1086 	vm_page_t m;
1087 	int base;
1088 	int size;
1089 {
1090 	int bits = vm_page_bits(base, size);
1091 
1092 	if (m->valid && ((m->valid & bits) == bits))
1093 		return 1;
1094 	else
1095 		return 0;
1096 }
1097 
1098 
1099 
1100 void
1101 vm_page_test_dirty(m)
1102 	vm_page_t m;
1103 {
1104 	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1105 	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1106 		m->dirty = VM_PAGE_BITS_ALL;
1107 	}
1108 }
1109 
1110 #ifdef DDB
1111 void
1112 print_page_info(void)
1113 {
1114 	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1115 	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1116 	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1117 	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1118 	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1119 	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1120 	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1121 	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1122 	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1123 	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1124 }
1125 #endif
1126