xref: /freebsd/sys/vm/vm_page.c (revision 417ed37975261df51f61d13e179ad04d8f4839c7)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  *	$Id: vm_page.c,v 1.14 1995/01/10 07:32:48 davidg Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 
67 /*
68  *	Resident memory management module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_pageout.h>
79 
80 /*
81  *	Associated with page of user-allocatable memory is a
82  *	page structure.
83  */
84 
85 struct pglist *vm_page_buckets;	/* Array of buckets */
86 int vm_page_bucket_count = 0;	/* How big is array? */
87 int vm_page_hash_mask;		/* Mask for hash function */
88 simple_lock_data_t bucket_lock;	/* lock for all buckets XXX */
89 
90 struct pglist vm_page_queue_free;
91 struct pglist vm_page_queue_active;
92 struct pglist vm_page_queue_inactive;
93 struct pglist vm_page_queue_cache;
94 simple_lock_data_t vm_page_queue_lock;
95 simple_lock_data_t vm_page_queue_free_lock;
96 
97 /* has physical page allocation been initialized? */
98 boolean_t vm_page_startup_initialized;
99 
100 vm_page_t vm_page_array;
101 int vm_page_array_size;
102 long first_page;
103 long last_page;
104 vm_offset_t first_phys_addr;
105 vm_offset_t last_phys_addr;
106 vm_size_t page_mask;
107 int page_shift;
108 
109 /*
110  * map of contiguous valid DEV_BSIZE chunks in a page
111  * (this list is valid for page sizes upto 16*DEV_BSIZE)
112  */
113 static u_short vm_page_dev_bsize_chunks[] = {
114 	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
115 	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
116 };
117 
118 
119 /*
120  *	vm_set_page_size:
121  *
122  *	Sets the page size, perhaps based upon the memory
123  *	size.  Must be called before any use of page-size
124  *	dependent functions.
125  *
126  *	Sets page_shift and page_mask from cnt.v_page_size.
127  */
128 void
129 vm_set_page_size()
130 {
131 
132 	if (cnt.v_page_size == 0)
133 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
134 	page_mask = cnt.v_page_size - 1;
135 	if ((page_mask & cnt.v_page_size) != 0)
136 		panic("vm_set_page_size: page size not a power of two");
137 	for (page_shift = 0;; page_shift++)
138 		if ((1 << page_shift) == cnt.v_page_size)
139 			break;
140 }
141 
142 /*
143  *	vm_page_startup:
144  *
145  *	Initializes the resident memory module.
146  *
147  *	Allocates memory for the page cells, and
148  *	for the object/offset-to-page hash table headers.
149  *	Each page cell is initialized and placed on the free list.
150  */
151 
152 vm_offset_t
153 vm_page_startup(starta, enda, vaddr)
154 	register vm_offset_t starta;
155 	vm_offset_t enda;
156 	register vm_offset_t vaddr;
157 {
158 	register vm_offset_t mapped;
159 	register vm_page_t m;
160 	register struct pglist *bucket;
161 	vm_size_t npages, page_range;
162 	register vm_offset_t new_start;
163 	int i;
164 	vm_offset_t pa;
165 	int nblocks;
166 	vm_offset_t first_managed_page;
167 
168 	extern vm_offset_t kentry_data;
169 	extern vm_size_t kentry_data_size;
170 	extern vm_offset_t phys_avail[];
171 
172 	/* the biggest memory array is the second group of pages */
173 	vm_offset_t start;
174 	vm_offset_t biggestone, biggestsize;
175 
176 	vm_offset_t total;
177 
178 	total = 0;
179 	biggestsize = 0;
180 	biggestone = 0;
181 	nblocks = 0;
182 	vaddr = round_page(vaddr);
183 
184 	for (i = 0; phys_avail[i + 1]; i += 2) {
185 		phys_avail[i] = round_page(phys_avail[i]);
186 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
187 	}
188 
189 	for (i = 0; phys_avail[i + 1]; i += 2) {
190 		int size = phys_avail[i + 1] - phys_avail[i];
191 
192 		if (size > biggestsize) {
193 			biggestone = i;
194 			biggestsize = size;
195 		}
196 		++nblocks;
197 		total += size;
198 	}
199 
200 	start = phys_avail[biggestone];
201 
202 
203 	/*
204 	 * Initialize the locks
205 	 */
206 
207 	simple_lock_init(&vm_page_queue_free_lock);
208 	simple_lock_init(&vm_page_queue_lock);
209 
210 	/*
211 	 * Initialize the queue headers for the free queue, the active queue
212 	 * and the inactive queue.
213 	 */
214 
215 	TAILQ_INIT(&vm_page_queue_free);
216 	TAILQ_INIT(&vm_page_queue_active);
217 	TAILQ_INIT(&vm_page_queue_inactive);
218 	TAILQ_INIT(&vm_page_queue_cache);
219 
220 	/*
221 	 * Allocate (and initialize) the hash table buckets.
222 	 *
223 	 * The number of buckets MUST BE a power of 2, and the actual value is
224 	 * the next power of 2 greater than the number of physical pages in
225 	 * the system.
226 	 *
227 	 * Note: This computation can be tweaked if desired.
228 	 */
229 	vm_page_buckets = (struct pglist *) vaddr;
230 	bucket = vm_page_buckets;
231 	if (vm_page_bucket_count == 0) {
232 		vm_page_bucket_count = 1;
233 		while (vm_page_bucket_count < atop(total))
234 			vm_page_bucket_count <<= 1;
235 	}
236 	vm_page_hash_mask = vm_page_bucket_count - 1;
237 
238 	/*
239 	 * Validate these addresses.
240 	 */
241 
242 	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
243 	new_start = round_page(new_start);
244 	mapped = vaddr;
245 	vaddr = pmap_map(mapped, start, new_start,
246 	    VM_PROT_READ | VM_PROT_WRITE);
247 	start = new_start;
248 	bzero((caddr_t) mapped, vaddr - mapped);
249 	mapped = vaddr;
250 
251 	for (i = 0; i < vm_page_bucket_count; i++) {
252 		TAILQ_INIT(bucket);
253 		bucket++;
254 	}
255 
256 	simple_lock_init(&bucket_lock);
257 
258 	/*
259 	 * round (or truncate) the addresses to our page size.
260 	 */
261 
262 	/*
263 	 * Pre-allocate maps and map entries that cannot be dynamically
264 	 * allocated via malloc().  The maps include the kernel_map and
265 	 * kmem_map which must be initialized before malloc() will work
266 	 * (obviously).  Also could include pager maps which would be
267 	 * allocated before kmeminit.
268 	 *
269 	 * Allow some kernel map entries... this should be plenty since people
270 	 * shouldn't be cluttering up the kernel map (they should use their
271 	 * own maps).
272 	 */
273 
274 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
275 	    MAX_KMAPENT * sizeof(struct vm_map_entry);
276 	kentry_data_size = round_page(kentry_data_size);
277 	kentry_data = (vm_offset_t) vaddr;
278 	vaddr += kentry_data_size;
279 
280 	/*
281 	 * Validate these zone addresses.
282 	 */
283 
284 	new_start = start + (vaddr - mapped);
285 	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
286 	bzero((caddr_t) mapped, (vaddr - mapped));
287 	start = round_page(new_start);
288 
289 	/*
290 	 * Compute the number of pages of memory that will be available for
291 	 * use (taking into account the overhead of a page structure per
292 	 * page).
293 	 */
294 
295 	first_page = phys_avail[0] / PAGE_SIZE;
296 
297 	/* for VM_PAGE_CHECK() */
298 	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
299 
300 	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
301 	npages = (total - (page_range * sizeof(struct vm_page)) -
302 	    (start - phys_avail[biggestone])) / PAGE_SIZE;
303 
304 	/*
305 	 * Initialize the mem entry structures now, and put them in the free
306 	 * queue.
307 	 */
308 
309 	vm_page_array = (vm_page_t) vaddr;
310 	mapped = vaddr;
311 
312 
313 	/*
314 	 * Validate these addresses.
315 	 */
316 
317 	new_start = round_page(start + page_range * sizeof(struct vm_page));
318 	mapped = pmap_map(mapped, start, new_start,
319 	    VM_PROT_READ | VM_PROT_WRITE);
320 	start = new_start;
321 
322 	first_managed_page = start / PAGE_SIZE;
323 
324 	/*
325 	 * Clear all of the page structures
326 	 */
327 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
328 	vm_page_array_size = page_range;
329 
330 	cnt.v_page_count = 0;
331 	cnt.v_free_count = 0;
332 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
333 		if (i == biggestone)
334 			pa = ptoa(first_managed_page);
335 		else
336 			pa = phys_avail[i];
337 		while (pa < phys_avail[i + 1] && npages-- > 0) {
338 			++cnt.v_page_count;
339 			++cnt.v_free_count;
340 			m = PHYS_TO_VM_PAGE(pa);
341 			m->flags = PG_FREE;
342 			vm_page_set_clean(m, 0, PAGE_SIZE);
343 			m->object = 0;
344 			m->phys_addr = pa;
345 			m->hold_count = 0;
346 			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
347 			pa += PAGE_SIZE;
348 		}
349 	}
350 
351 	/*
352 	 * Initialize vm_pages_needed lock here - don't wait for pageout
353 	 * daemon	XXX
354 	 */
355 	simple_lock_init(&vm_pages_needed_lock);
356 
357 	return (mapped);
358 }
359 
360 /*
361  *	vm_page_hash:
362  *
363  *	Distributes the object/offset key pair among hash buckets.
364  *
365  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
366  */
367 inline const int
368 vm_page_hash(object, offset)
369 	vm_object_t object;
370 	vm_offset_t offset;
371 {
372 	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
373 }
374 
375 /*
376  *	vm_page_insert:		[ internal use only ]
377  *
378  *	Inserts the given mem entry into the object/object-page
379  *	table and object list.
380  *
381  *	The object and page must be locked.
382  */
383 
384 void
385 vm_page_insert(mem, object, offset)
386 	register vm_page_t mem;
387 	register vm_object_t object;
388 	register vm_offset_t offset;
389 {
390 	register struct pglist *bucket;
391 	int s;
392 
393 	VM_PAGE_CHECK(mem);
394 
395 	if (mem->flags & PG_TABLED)
396 		panic("vm_page_insert: already inserted");
397 
398 	/*
399 	 * Record the object/offset pair in this page
400 	 */
401 
402 	mem->object = object;
403 	mem->offset = offset;
404 
405 	/*
406 	 * Insert it into the object_object/offset hash table
407 	 */
408 
409 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
410 	s = splhigh();
411 	simple_lock(&bucket_lock);
412 	TAILQ_INSERT_TAIL(bucket, mem, hashq);
413 	simple_unlock(&bucket_lock);
414 	(void) splx(s);
415 
416 	/*
417 	 * Now link into the object's list of backed pages.
418 	 */
419 
420 	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
421 	mem->flags |= PG_TABLED;
422 
423 	/*
424 	 * And show that the object has one more resident page.
425 	 */
426 
427 	object->resident_page_count++;
428 }
429 
430 /*
431  *	vm_page_remove:		[ internal use only ]
432  *				NOTE: used by device pager as well -wfj
433  *
434  *	Removes the given mem entry from the object/offset-page
435  *	table and the object page list.
436  *
437  *	The object and page must be locked.
438  */
439 
440 void
441 vm_page_remove(mem)
442 	register vm_page_t mem;
443 {
444 	register struct pglist *bucket;
445 	int s;
446 
447 	VM_PAGE_CHECK(mem);
448 
449 
450 	if (!(mem->flags & PG_TABLED))
451 		return;
452 
453 	/*
454 	 * Remove from the object_object/offset hash table
455 	 */
456 
457 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
458 	s = splhigh();
459 	simple_lock(&bucket_lock);
460 	TAILQ_REMOVE(bucket, mem, hashq);
461 	simple_unlock(&bucket_lock);
462 	(void) splx(s);
463 
464 	/*
465 	 * Now remove from the object's list of backed pages.
466 	 */
467 
468 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
469 
470 	/*
471 	 * And show that the object has one fewer resident page.
472 	 */
473 
474 	mem->object->resident_page_count--;
475 
476 	mem->flags &= ~PG_TABLED;
477 }
478 
479 /*
480  *	vm_page_lookup:
481  *
482  *	Returns the page associated with the object/offset
483  *	pair specified; if none is found, NULL is returned.
484  *
485  *	The object must be locked.  No side effects.
486  */
487 
488 vm_page_t
489 vm_page_lookup(object, offset)
490 	register vm_object_t object;
491 	register vm_offset_t offset;
492 {
493 	register vm_page_t mem;
494 	register struct pglist *bucket;
495 	int s;
496 
497 	/*
498 	 * Search the hash table for this object/offset pair
499 	 */
500 
501 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
502 
503 	s = splhigh();
504 	simple_lock(&bucket_lock);
505 	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
506 		VM_PAGE_CHECK(mem);
507 		if ((mem->object == object) && (mem->offset == offset)) {
508 			simple_unlock(&bucket_lock);
509 			splx(s);
510 			return (mem);
511 		}
512 	}
513 
514 	simple_unlock(&bucket_lock);
515 	splx(s);
516 	return (NULL);
517 }
518 
519 /*
520  *	vm_page_rename:
521  *
522  *	Move the given memory entry from its
523  *	current object to the specified target object/offset.
524  *
525  *	The object must be locked.
526  */
527 void
528 vm_page_rename(mem, new_object, new_offset)
529 	register vm_page_t mem;
530 	register vm_object_t new_object;
531 	vm_offset_t new_offset;
532 {
533 	int s;
534 
535 	if (mem->object == new_object)
536 		return;
537 
538 	vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
539 	s = splhigh();
540 	vm_page_remove(mem);
541 	vm_page_insert(mem, new_object, new_offset);
542 	splx(s);
543 	vm_page_unlock_queues();
544 }
545 
546 int
547 vm_page_unqueue(vm_page_t mem)
548 {
549 	int s, origflags;
550 
551 	s = splhigh();
552 	origflags = mem->flags;
553 	if (mem->flags & PG_ACTIVE) {
554 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
555 		cnt.v_active_count--;
556 		mem->flags &= ~PG_ACTIVE;
557 	} else if (mem->flags & PG_INACTIVE) {
558 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
559 		cnt.v_inactive_count--;
560 		mem->flags &= ~PG_INACTIVE;
561 	} else if (mem->flags & PG_CACHE) {
562 		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
563 		cnt.v_cache_count--;
564 		mem->flags &= ~PG_CACHE;
565 		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
566 			wakeup((caddr_t) &vm_pages_needed);
567 	}
568 	splx(s);
569 	return origflags;
570 }
571 
572 void
573 vm_page_requeue(vm_page_t mem, int flags)
574 {
575 	int s;
576 
577 	if (mem->wire_count)
578 		return;
579 	s = splhigh();
580 	if (flags & PG_CACHE) {
581 		TAILQ_INSERT_TAIL(&vm_page_queue_cache, mem, pageq);
582 		mem->flags |= PG_CACHE;
583 		cnt.v_cache_count++;
584 	} else if (flags & PG_ACTIVE) {
585 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
586 		mem->flags |= PG_ACTIVE;
587 		cnt.v_active_count++;
588 	} else if (flags & PG_INACTIVE) {
589 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, mem, pageq);
590 		mem->flags |= PG_INACTIVE;
591 		cnt.v_inactive_count++;
592 	}
593 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
594 	TAILQ_INSERT_TAIL(&mem->object->memq, mem, listq);
595 	splx(s);
596 }
597 
598 /*
599  *	vm_page_alloc:
600  *
601  *	Allocate and return a memory cell associated
602  *	with this VM object/offset pair.
603  *
604  *	Object must be locked.
605  */
606 vm_page_t
607 vm_page_alloc(object, offset, inttime)
608 	vm_object_t object;
609 	vm_offset_t offset;
610 	int inttime;
611 {
612 	register vm_page_t mem;
613 	int s;
614 
615 	simple_lock(&vm_page_queue_free_lock);
616 
617 	s = splhigh();
618 
619 	if (object != kernel_object &&
620 	    object != kmem_object &&
621 	    curproc != pageproc &&
622 	    curproc != &proc0 &&
623 	    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
624 		simple_unlock(&vm_page_queue_free_lock);
625 		splx(s);
626 		return (NULL);
627 	}
628 	if (inttime) {
629 		if ((mem = vm_page_queue_free.tqh_first) == 0) {
630 			for (mem = vm_page_queue_cache.tqh_first; mem; mem = mem->pageq.tqe_next) {
631 				if ((mem->object->flags & OBJ_ILOCKED) == 0) {
632 					TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
633 					vm_page_remove(mem);
634 					cnt.v_cache_count--;
635 					goto gotpage;
636 				}
637 			}
638 			simple_unlock(&vm_page_queue_free_lock);
639 			splx(s);
640 			return NULL;
641 		}
642 	} else {
643 		if ((cnt.v_free_count < 3) ||
644 		    (mem = vm_page_queue_free.tqh_first) == 0) {
645 			mem = vm_page_queue_cache.tqh_first;
646 			if (mem) {
647 				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
648 				vm_page_remove(mem);
649 				cnt.v_cache_count--;
650 				goto gotpage;
651 			}
652 			simple_unlock(&vm_page_queue_free_lock);
653 			splx(s);
654 			return (NULL);
655 		}
656 	}
657 
658 	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
659 	cnt.v_free_count--;
660 
661 gotpage:
662 	simple_unlock(&vm_page_queue_free_lock);
663 
664 	mem->flags = PG_BUSY | PG_CLEAN;
665 	mem->wire_count = 0;
666 	mem->hold_count = 0;
667 	mem->act_count = 0;
668 	mem->busy = 0;
669 	mem->valid = 0;
670 	mem->dirty = 0;
671 	mem->bmapped = 0;
672 
673 	/* XXX before splx until vm_page_insert is safe */
674 	vm_page_insert(mem, object, offset);
675 
676 	splx(s);
677 
678 /*
679  * don't wakeup too often, so we wakeup the pageout daemon when
680  * we would be nearly out of memory.
681  */
682 	if (curproc != pageproc &&
683 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min))
684 		wakeup((caddr_t) &vm_pages_needed);
685 
686 	return (mem);
687 }
688 
689 vm_offset_t
690 vm_page_alloc_contig(size, low, high, alignment)
691 	vm_offset_t size;
692 	vm_offset_t low;
693 	vm_offset_t high;
694 	vm_offset_t alignment;
695 {
696 	int i, s, start;
697 	vm_offset_t addr, phys, tmp_addr;
698 	vm_page_t pga = vm_page_array;
699 	extern vm_map_t kernel_map;
700 
701 	if ((alignment & (alignment - 1)) != 0)
702 		panic("vm_page_alloc_contig: alignment must be a power of 2");
703 
704 	start = 0;
705 	s = splhigh();
706 again:
707 	/*
708 	 * Find first page in array that is free, within range, and aligned.
709 	 */
710 	for (i = start; i < cnt.v_page_count; i++) {
711 		phys = VM_PAGE_TO_PHYS(&pga[i]);
712 		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
713 		    (phys >= low) && (phys < high) &&
714 		    ((phys & (alignment - 1)) == 0))
715 			break;
716 	}
717 
718 	/*
719 	 * If the above failed or we will exceed the upper bound, fail.
720 	 */
721 	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
722 		splx(s);
723 		return (NULL);
724 	}
725 	start = i;
726 
727 	/*
728 	 * Check successive pages for contiguous and free.
729 	 */
730 	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
731 		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
732 			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
733 		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
734 			start++;
735 			goto again;
736 		}
737 	}
738 
739 	/*
740 	 * We've found a contiguous chunk that meets are requirements.
741 	 * Allocate kernel VM, unfree and assign the physical pages to it and
742 	 * return kernel VM pointer.
743 	 */
744 	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
745 
746 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
747 		TAILQ_REMOVE(&vm_page_queue_free, &pga[i], pageq);
748 		cnt.v_free_count--;
749 		vm_page_wire(&pga[i]);
750 		vm_page_set_clean(&pga[i], 0, PAGE_SIZE);
751 		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
752 		tmp_addr += PAGE_SIZE;
753 	}
754 
755 	splx(s);
756 	return (addr);
757 }
758 
759 /*
760  *	vm_page_free:
761  *
762  *	Returns the given page to the free list,
763  *	disassociating it with any VM object.
764  *
765  *	Object and page must be locked prior to entry.
766  */
767 void
768 vm_page_free(mem)
769 	register vm_page_t mem;
770 {
771 	int s;
772 
773 	s = splhigh();
774 	vm_page_remove(mem);
775 	vm_page_unqueue(mem);
776 
777 	if (mem->bmapped || mem->busy || mem->flags & PG_BUSY) {
778 		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
779 		    mem->offset, mem->bmapped, mem->busy, (mem->flags & PG_BUSY) ? 1 : 0);
780 		panic("vm_page_free: freeing busy page\n");
781 	}
782 	if (mem->flags & PG_FREE)
783 		panic("vm_page_free: freeing free page");
784 
785 	if (!(mem->flags & PG_FICTITIOUS)) {
786 
787 		simple_lock(&vm_page_queue_free_lock);
788 		if (mem->wire_count) {
789 			if (mem->wire_count > 1) {
790 				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
791 				panic("vm_page_free: invalid wire count");
792 			}
793 			cnt.v_wire_count--;
794 			mem->wire_count = 0;
795 		}
796 		mem->flags |= PG_FREE;
797 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
798 
799 		cnt.v_free_count++;
800 		simple_unlock(&vm_page_queue_free_lock);
801 		splx(s);
802 		/*
803 		 * if pageout daemon needs pages, then tell it that there are
804 		 * some free.
805 		 */
806 		if (vm_pageout_pages_needed)
807 			wakeup((caddr_t) &vm_pageout_pages_needed);
808 
809 		/*
810 		 * wakeup processes that are waiting on memory if we hit a
811 		 * high water mark. And wakeup scheduler process if we have
812 		 * lots of memory. this process will swapin processes.
813 		 */
814 		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
815 			wakeup((caddr_t) &cnt.v_free_count);
816 			wakeup((caddr_t) &proc0);
817 		}
818 	} else {
819 		splx(s);
820 	}
821 	if (mem->flags & PG_WANTED)
822 		wakeup((caddr_t) mem);
823 	cnt.v_tfree++;
824 }
825 
826 
827 /*
828  *	vm_page_wire:
829  *
830  *	Mark this page as wired down by yet
831  *	another map, removing it from paging queues
832  *	as necessary.
833  *
834  *	The page queues must be locked.
835  */
836 void
837 vm_page_wire(mem)
838 	register vm_page_t mem;
839 {
840 	int s;
841 
842 	VM_PAGE_CHECK(mem);
843 
844 	if (mem->wire_count == 0) {
845 		vm_page_unqueue(mem);
846 		cnt.v_wire_count++;
847 	}
848 	mem->wire_count++;
849 }
850 
851 /*
852  *	vm_page_unwire:
853  *
854  *	Release one wiring of this page, potentially
855  *	enabling it to be paged again.
856  *
857  *	The page queues must be locked.
858  */
859 void
860 vm_page_unwire(mem)
861 	register vm_page_t mem;
862 {
863 	int s;
864 
865 	VM_PAGE_CHECK(mem);
866 
867 	s = splhigh();
868 
869 	if (mem->wire_count)
870 		mem->wire_count--;
871 	if (mem->wire_count == 0) {
872 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
873 		cnt.v_active_count++;
874 		mem->flags |= PG_ACTIVE;
875 		cnt.v_wire_count--;
876 	}
877 	splx(s);
878 }
879 
880 /*
881  *	vm_page_deactivate:
882  *
883  *	Returns the given page to the inactive list,
884  *	indicating that no physical maps have access
885  *	to this page.  [Used by the physical mapping system.]
886  *
887  *	The page queues must be locked.
888  */
889 void
890 vm_page_deactivate(m)
891 	register vm_page_t m;
892 {
893 	int spl;
894 
895 	VM_PAGE_CHECK(m);
896 
897 	/*
898 	 * Only move active pages -- ignore locked or already inactive ones.
899 	 *
900 	 * XXX: sometimes we get pages which aren't wired down or on any queue -
901 	 * we need to put them on the inactive queue also, otherwise we lose
902 	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
903 	 */
904 
905 	spl = splhigh();
906 	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
907 	    m->hold_count == 0) {
908 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
909 		vm_page_unqueue(m);
910 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
911 		m->flags |= PG_INACTIVE;
912 		cnt.v_inactive_count++;
913 		m->act_count = 0;
914 	}
915 	splx(spl);
916 }
917 
918 /*
919  * vm_page_cache
920  *
921  * Put the specified page onto the page cache queue (if appropriate).
922  */
923 
924 void
925 vm_page_cache(m)
926 	register vm_page_t m;
927 {
928 	int s;
929 
930 	VM_PAGE_CHECK(m);
931 	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
932 	    m->bmapped)
933 		return;
934 
935 	s = splhigh();
936 	vm_page_unqueue(m);
937 	pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
938 
939 	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
940 	m->flags |= PG_CACHE;
941 	cnt.v_cache_count++;
942 	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
943 		wakeup((caddr_t) &cnt.v_free_count);
944 		wakeup((caddr_t) &proc0);
945 	}
946 	if (vm_pageout_pages_needed)
947 		wakeup((caddr_t) &vm_pageout_pages_needed);
948 
949 	splx(s);
950 }
951 
952 /*
953  *	vm_page_activate:
954  *
955  *	Put the specified page on the active list (if appropriate).
956  *
957  *	The page queues must be locked.
958  */
959 
960 void
961 vm_page_activate(m)
962 	register vm_page_t m;
963 {
964 	int s;
965 
966 	VM_PAGE_CHECK(m);
967 
968 	s = splhigh();
969 	if (m->flags & PG_ACTIVE)
970 		panic("vm_page_activate: already active");
971 
972 	vm_page_unqueue(m);
973 
974 	if (m->wire_count == 0) {
975 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
976 		m->flags |= PG_ACTIVE;
977 		TAILQ_REMOVE(&m->object->memq, m, listq);
978 		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
979 		if (m->act_count < 5)
980 			m->act_count = 5;
981 		else
982 			m->act_count += 1;
983 		cnt.v_active_count++;
984 	}
985 	splx(s);
986 }
987 
988 /*
989  *	vm_page_zero_fill:
990  *
991  *	Zero-fill the specified page.
992  *	Written as a standard pagein routine, to
993  *	be used by the zero-fill object.
994  */
995 
996 boolean_t
997 vm_page_zero_fill(m)
998 	vm_page_t m;
999 {
1000 	VM_PAGE_CHECK(m);
1001 
1002 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1003 	m->valid = VM_PAGE_BITS_ALL;
1004 	return (TRUE);
1005 }
1006 
1007 /*
1008  *	vm_page_copy:
1009  *
1010  *	Copy one page to another
1011  */
1012 void
1013 vm_page_copy(src_m, dest_m)
1014 	vm_page_t src_m;
1015 	vm_page_t dest_m;
1016 {
1017 	VM_PAGE_CHECK(src_m);
1018 	VM_PAGE_CHECK(dest_m);
1019 
1020 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1021 	dest_m->valid = VM_PAGE_BITS_ALL;
1022 }
1023 
1024 
1025 /*
1026  * mapping function for valid bits or for dirty bits in
1027  * a page
1028  */
1029 inline int
1030 vm_page_bits(int base, int size)
1031 {
1032 	u_short chunk;
1033 
1034 	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1035 	base = (base % PAGE_SIZE) / DEV_BSIZE;
1036 	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1037 	return (chunk << base) & VM_PAGE_BITS_ALL;
1038 }
1039 
1040 /*
1041  * set a page (partially) valid
1042  */
1043 void
1044 vm_page_set_valid(m, base, size)
1045 	vm_page_t m;
1046 	int base;
1047 	int size;
1048 {
1049 	m->valid |= vm_page_bits(base, size);
1050 }
1051 
1052 /*
1053  * set a page (partially) invalid
1054  */
1055 void
1056 vm_page_set_invalid(m, base, size)
1057 	vm_page_t m;
1058 	int base;
1059 	int size;
1060 {
1061 	int bits;
1062 
1063 	m->valid &= ~(bits = vm_page_bits(base, size));
1064 	if (m->valid == 0)
1065 		m->dirty &= ~bits;
1066 }
1067 
1068 /*
1069  * is (partial) page valid?
1070  */
1071 int
1072 vm_page_is_valid(m, base, size)
1073 	vm_page_t m;
1074 	int base;
1075 	int size;
1076 {
1077 	int bits;
1078 
1079 	if (m->valid && ((m->valid & (bits = vm_page_bits(base, size))) == bits))
1080 		return 1;
1081 	else
1082 		return 0;
1083 }
1084 
1085 
1086 /*
1087  * set a page (partially) dirty
1088  */
1089 void
1090 vm_page_set_dirty(m, base, size)
1091 	vm_page_t m;
1092 	int base;
1093 	int size;
1094 {
1095 	if ((base != 0) || (size != PAGE_SIZE)) {
1096 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1097 			m->dirty = VM_PAGE_BITS_ALL;
1098 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1099 			return;
1100 		}
1101 		m->dirty |= vm_page_bits(base, size);
1102 	} else {
1103 		m->dirty = VM_PAGE_BITS_ALL;
1104 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1105 	}
1106 }
1107 
1108 void
1109 vm_page_test_dirty(m)
1110 	vm_page_t m;
1111 {
1112 	if ((!m->dirty || (m->dirty != vm_page_bits(0, PAGE_SIZE))) &&
1113 	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1114 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1115 		m->dirty = VM_PAGE_BITS_ALL;
1116 	}
1117 }
1118 
1119 /*
1120  * set a page (partially) clean
1121  */
1122 void
1123 vm_page_set_clean(m, base, size)
1124 	vm_page_t m;
1125 	int base;
1126 	int size;
1127 {
1128 	m->dirty &= ~vm_page_bits(base, size);
1129 }
1130 
1131 /*
1132  * is (partial) page clean
1133  */
1134 int
1135 vm_page_is_clean(m, base, size)
1136 	vm_page_t m;
1137 	int base;
1138 	int size;
1139 {
1140 	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1141 		m->dirty = VM_PAGE_BITS_ALL;
1142 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1143 	}
1144 	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1145 		return 1;
1146 	else
1147 		return 0;
1148 }
1149 
1150 void
1151 print_page_info()
1152 {
1153 	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1154 	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1155 	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1156 	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1157 	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1158 	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1159 	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1160 	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1161 	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1162 	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1163 }
1164