xref: /freebsd/sys/vm/vm_page.c (revision 17ee9d00bc1ae1e598c38f25826f861e4bc6c3ce)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  *	$Id: vm_page.c,v 1.21 1995/02/22 10:16:21 davidg Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 
67 /*
68  *	Resident memory management module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_pageout.h>
79 
80 /*
81  *	Associated with page of user-allocatable memory is a
82  *	page structure.
83  */
84 
85 struct pglist *vm_page_buckets;	/* Array of buckets */
86 int vm_page_bucket_count = 0;	/* How big is array? */
87 int vm_page_hash_mask;		/* Mask for hash function */
88 simple_lock_data_t bucket_lock;	/* lock for all buckets XXX */
89 
90 struct pglist vm_page_queue_free;
91 struct pglist vm_page_queue_active;
92 struct pglist vm_page_queue_inactive;
93 struct pglist vm_page_queue_cache;
94 simple_lock_data_t vm_page_queue_lock;
95 simple_lock_data_t vm_page_queue_free_lock;
96 
97 /* has physical page allocation been initialized? */
98 boolean_t vm_page_startup_initialized;
99 
100 vm_page_t vm_page_array;
101 int vm_page_array_size;
102 long first_page;
103 long last_page;
104 vm_offset_t first_phys_addr;
105 vm_offset_t last_phys_addr;
106 vm_size_t page_mask;
107 int page_shift;
108 
109 /*
110  * map of contiguous valid DEV_BSIZE chunks in a page
111  * (this list is valid for page sizes upto 16*DEV_BSIZE)
112  */
113 static u_short vm_page_dev_bsize_chunks[] = {
114 	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
115 	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
116 };
117 
118 
119 /*
120  *	vm_set_page_size:
121  *
122  *	Sets the page size, perhaps based upon the memory
123  *	size.  Must be called before any use of page-size
124  *	dependent functions.
125  *
126  *	Sets page_shift and page_mask from cnt.v_page_size.
127  */
128 void
129 vm_set_page_size()
130 {
131 
132 	if (cnt.v_page_size == 0)
133 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
134 	page_mask = cnt.v_page_size - 1;
135 	if ((page_mask & cnt.v_page_size) != 0)
136 		panic("vm_set_page_size: page size not a power of two");
137 	for (page_shift = 0;; page_shift++)
138 		if ((1 << page_shift) == cnt.v_page_size)
139 			break;
140 }
141 
142 /*
143  *	vm_page_startup:
144  *
145  *	Initializes the resident memory module.
146  *
147  *	Allocates memory for the page cells, and
148  *	for the object/offset-to-page hash table headers.
149  *	Each page cell is initialized and placed on the free list.
150  */
151 
152 vm_offset_t
153 vm_page_startup(starta, enda, vaddr)
154 	register vm_offset_t starta;
155 	vm_offset_t enda;
156 	register vm_offset_t vaddr;
157 {
158 	register vm_offset_t mapped;
159 	register vm_page_t m;
160 	register struct pglist *bucket;
161 	vm_size_t npages, page_range;
162 	register vm_offset_t new_start;
163 	int i;
164 	vm_offset_t pa;
165 	int nblocks;
166 	vm_offset_t first_managed_page;
167 
168 	extern vm_offset_t kentry_data;
169 	extern vm_size_t kentry_data_size;
170 	extern vm_offset_t phys_avail[];
171 
172 	/* the biggest memory array is the second group of pages */
173 	vm_offset_t start;
174 	vm_offset_t biggestone, biggestsize;
175 
176 	vm_offset_t total;
177 
178 	total = 0;
179 	biggestsize = 0;
180 	biggestone = 0;
181 	nblocks = 0;
182 	vaddr = round_page(vaddr);
183 
184 	for (i = 0; phys_avail[i + 1]; i += 2) {
185 		phys_avail[i] = round_page(phys_avail[i]);
186 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
187 	}
188 
189 	for (i = 0; phys_avail[i + 1]; i += 2) {
190 		int size = phys_avail[i + 1] - phys_avail[i];
191 
192 		if (size > biggestsize) {
193 			biggestone = i;
194 			biggestsize = size;
195 		}
196 		++nblocks;
197 		total += size;
198 	}
199 
200 	start = phys_avail[biggestone];
201 
202 
203 	/*
204 	 * Initialize the locks
205 	 */
206 
207 	simple_lock_init(&vm_page_queue_free_lock);
208 	simple_lock_init(&vm_page_queue_lock);
209 
210 	/*
211 	 * Initialize the queue headers for the free queue, the active queue
212 	 * and the inactive queue.
213 	 */
214 
215 	TAILQ_INIT(&vm_page_queue_free);
216 	TAILQ_INIT(&vm_page_queue_active);
217 	TAILQ_INIT(&vm_page_queue_inactive);
218 	TAILQ_INIT(&vm_page_queue_cache);
219 
220 	/*
221 	 * Allocate (and initialize) the hash table buckets.
222 	 *
223 	 * The number of buckets MUST BE a power of 2, and the actual value is
224 	 * the next power of 2 greater than the number of physical pages in
225 	 * the system.
226 	 *
227 	 * Note: This computation can be tweaked if desired.
228 	 */
229 	vm_page_buckets = (struct pglist *) vaddr;
230 	bucket = vm_page_buckets;
231 	if (vm_page_bucket_count == 0) {
232 		vm_page_bucket_count = 1;
233 		while (vm_page_bucket_count < atop(total))
234 			vm_page_bucket_count <<= 1;
235 	}
236 	vm_page_hash_mask = vm_page_bucket_count - 1;
237 
238 	/*
239 	 * Validate these addresses.
240 	 */
241 
242 	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
243 	new_start = round_page(new_start);
244 	mapped = vaddr;
245 	vaddr = pmap_map(mapped, start, new_start,
246 	    VM_PROT_READ | VM_PROT_WRITE);
247 	start = new_start;
248 	bzero((caddr_t) mapped, vaddr - mapped);
249 	mapped = vaddr;
250 
251 	for (i = 0; i < vm_page_bucket_count; i++) {
252 		TAILQ_INIT(bucket);
253 		bucket++;
254 	}
255 
256 	simple_lock_init(&bucket_lock);
257 
258 	/*
259 	 * round (or truncate) the addresses to our page size.
260 	 */
261 
262 	/*
263 	 * Pre-allocate maps and map entries that cannot be dynamically
264 	 * allocated via malloc().  The maps include the kernel_map and
265 	 * kmem_map which must be initialized before malloc() will work
266 	 * (obviously).  Also could include pager maps which would be
267 	 * allocated before kmeminit.
268 	 *
269 	 * Allow some kernel map entries... this should be plenty since people
270 	 * shouldn't be cluttering up the kernel map (they should use their
271 	 * own maps).
272 	 */
273 
274 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
275 	    MAX_KMAPENT * sizeof(struct vm_map_entry);
276 	kentry_data_size = round_page(kentry_data_size);
277 	kentry_data = (vm_offset_t) vaddr;
278 	vaddr += kentry_data_size;
279 
280 	/*
281 	 * Validate these zone addresses.
282 	 */
283 
284 	new_start = start + (vaddr - mapped);
285 	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
286 	bzero((caddr_t) mapped, (vaddr - mapped));
287 	start = round_page(new_start);
288 
289 	/*
290 	 * Compute the number of pages of memory that will be available for
291 	 * use (taking into account the overhead of a page structure per
292 	 * page).
293 	 */
294 
295 	first_page = phys_avail[0] / PAGE_SIZE;
296 
297 	/* for VM_PAGE_CHECK() */
298 	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
299 
300 	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
301 	npages = (total - (page_range * sizeof(struct vm_page)) -
302 	    (start - phys_avail[biggestone])) / PAGE_SIZE;
303 
304 	/*
305 	 * Initialize the mem entry structures now, and put them in the free
306 	 * queue.
307 	 */
308 
309 	vm_page_array = (vm_page_t) vaddr;
310 	mapped = vaddr;
311 
312 
313 	/*
314 	 * Validate these addresses.
315 	 */
316 
317 	new_start = round_page(start + page_range * sizeof(struct vm_page));
318 	mapped = pmap_map(mapped, start, new_start,
319 	    VM_PROT_READ | VM_PROT_WRITE);
320 	start = new_start;
321 
322 	first_managed_page = start / PAGE_SIZE;
323 
324 	/*
325 	 * Clear all of the page structures
326 	 */
327 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
328 	vm_page_array_size = page_range;
329 
330 	cnt.v_page_count = 0;
331 	cnt.v_free_count = 0;
332 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
333 		if (i == biggestone)
334 			pa = ptoa(first_managed_page);
335 		else
336 			pa = phys_avail[i];
337 		while (pa < phys_avail[i + 1] && npages-- > 0) {
338 			++cnt.v_page_count;
339 			++cnt.v_free_count;
340 			m = PHYS_TO_VM_PAGE(pa);
341 			m->flags = PG_FREE;
342 			vm_page_set_clean(m, 0, PAGE_SIZE);
343 			m->object = 0;
344 			m->phys_addr = pa;
345 			m->hold_count = 0;
346 			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
347 			pa += PAGE_SIZE;
348 		}
349 	}
350 
351 	/*
352 	 * Initialize vm_pages_needed lock here - don't wait for pageout
353 	 * daemon	XXX
354 	 */
355 	simple_lock_init(&vm_pages_needed_lock);
356 
357 	return (mapped);
358 }
359 
360 /*
361  *	vm_page_hash:
362  *
363  *	Distributes the object/offset key pair among hash buckets.
364  *
365  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
366  */
367 inline const int
368 vm_page_hash(object, offset)
369 	vm_object_t object;
370 	vm_offset_t offset;
371 {
372 	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
373 }
374 
375 /*
376  *	vm_page_insert:		[ internal use only ]
377  *
378  *	Inserts the given mem entry into the object/object-page
379  *	table and object list.
380  *
381  *	The object and page must be locked.
382  */
383 
384 void
385 vm_page_insert(mem, object, offset)
386 	register vm_page_t mem;
387 	register vm_object_t object;
388 	register vm_offset_t offset;
389 {
390 	register struct pglist *bucket;
391 	int s;
392 
393 	VM_PAGE_CHECK(mem);
394 
395 	if (mem->flags & PG_TABLED)
396 		panic("vm_page_insert: already inserted");
397 
398 	/*
399 	 * Record the object/offset pair in this page
400 	 */
401 
402 	mem->object = object;
403 	mem->offset = offset;
404 
405 	/*
406 	 * Insert it into the object_object/offset hash table
407 	 */
408 
409 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
410 	s = splhigh();
411 	simple_lock(&bucket_lock);
412 	TAILQ_INSERT_TAIL(bucket, mem, hashq);
413 	simple_unlock(&bucket_lock);
414 
415 	/*
416 	 * Now link into the object's list of backed pages.
417 	 */
418 
419 	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
420 	(void) splx(s);
421 	mem->flags |= PG_TABLED;
422 
423 	/*
424 	 * And show that the object has one more resident page.
425 	 */
426 
427 	object->resident_page_count++;
428 }
429 
430 /*
431  *	vm_page_remove:		[ internal use only ]
432  *				NOTE: used by device pager as well -wfj
433  *
434  *	Removes the given mem entry from the object/offset-page
435  *	table and the object page list.
436  *
437  *	The object and page must be locked.
438  */
439 
440 void
441 vm_page_remove(mem)
442 	register vm_page_t mem;
443 {
444 	register struct pglist *bucket;
445 	int s;
446 
447 	VM_PAGE_CHECK(mem);
448 
449 
450 	if (!(mem->flags & PG_TABLED))
451 		return;
452 
453 	/*
454 	 * Remove from the object_object/offset hash table
455 	 */
456 
457 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
458 	s = splhigh();
459 	simple_lock(&bucket_lock);
460 	TAILQ_REMOVE(bucket, mem, hashq);
461 	simple_unlock(&bucket_lock);
462 
463 	/*
464 	 * Now remove from the object's list of backed pages.
465 	 */
466 
467 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
468 	(void) splx(s);
469 
470 	/*
471 	 * And show that the object has one fewer resident page.
472 	 */
473 
474 	mem->object->resident_page_count--;
475 
476 	mem->flags &= ~PG_TABLED;
477 }
478 
479 /*
480  *	vm_page_lookup:
481  *
482  *	Returns the page associated with the object/offset
483  *	pair specified; if none is found, NULL is returned.
484  *
485  *	The object must be locked.  No side effects.
486  */
487 
488 vm_page_t
489 vm_page_lookup(object, offset)
490 	register vm_object_t object;
491 	register vm_offset_t offset;
492 {
493 	register vm_page_t mem;
494 	register struct pglist *bucket;
495 	int s;
496 
497 	/*
498 	 * Search the hash table for this object/offset pair
499 	 */
500 
501 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
502 
503 	s = splhigh();
504 	simple_lock(&bucket_lock);
505 	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
506 		VM_PAGE_CHECK(mem);
507 		if ((mem->object == object) && (mem->offset == offset)) {
508 			simple_unlock(&bucket_lock);
509 			splx(s);
510 			return (mem);
511 		}
512 	}
513 
514 	simple_unlock(&bucket_lock);
515 	splx(s);
516 	return (NULL);
517 }
518 
519 /*
520  *	vm_page_rename:
521  *
522  *	Move the given memory entry from its
523  *	current object to the specified target object/offset.
524  *
525  *	The object must be locked.
526  */
527 void
528 vm_page_rename(mem, new_object, new_offset)
529 	register vm_page_t mem;
530 	register vm_object_t new_object;
531 	vm_offset_t new_offset;
532 {
533 	int s;
534 
535 	if (mem->object == new_object)
536 		return;
537 
538 	vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
539 	s = splhigh();
540 	vm_page_remove(mem);
541 	vm_page_insert(mem, new_object, new_offset);
542 	splx(s);
543 	vm_page_unlock_queues();
544 }
545 
546 int
547 vm_page_unqueue(vm_page_t mem)
548 {
549 	int s, origflags;
550 
551 	origflags = mem->flags;
552 
553 	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
554 		return origflags;
555 
556 	s = splhigh();
557 	if (mem->flags & PG_ACTIVE) {
558 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
559 		cnt.v_active_count--;
560 		mem->flags &= ~PG_ACTIVE;
561 	} else if (mem->flags & PG_INACTIVE) {
562 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
563 		cnt.v_inactive_count--;
564 		mem->flags &= ~PG_INACTIVE;
565 	} else if (mem->flags & PG_CACHE) {
566 		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
567 		cnt.v_cache_count--;
568 		mem->flags &= ~PG_CACHE;
569 		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
570 			wakeup((caddr_t) &vm_pages_needed);
571 	}
572 	splx(s);
573 	return origflags;
574 }
575 
576 void
577 vm_page_requeue(vm_page_t mem, int flags)
578 {
579 	int s;
580 
581 	if (mem->wire_count)
582 		return;
583 	s = splhigh();
584 	if (flags & PG_CACHE) {
585 		TAILQ_INSERT_TAIL(&vm_page_queue_cache, mem, pageq);
586 		mem->flags |= PG_CACHE;
587 		cnt.v_cache_count++;
588 	} else if (flags & PG_ACTIVE) {
589 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
590 		mem->flags |= PG_ACTIVE;
591 		cnt.v_active_count++;
592 	} else if (flags & PG_INACTIVE) {
593 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, mem, pageq);
594 		mem->flags |= PG_INACTIVE;
595 		cnt.v_inactive_count++;
596 	}
597 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
598 	TAILQ_INSERT_TAIL(&mem->object->memq, mem, listq);
599 	splx(s);
600 }
601 
602 /*
603  *	vm_page_alloc:
604  *
605  *	Allocate and return a memory cell associated
606  *	with this VM object/offset pair.
607  *
608  *	page_req -- 0	normal process request			VM_ALLOC_NORMAL
609  *	page_req -- 1	interrupt time request			VM_ALLOC_INTERRUPT
610  *	page_req -- 2	system *really* needs a page	VM_ALLOC_SYSTEM
611  *					but *cannot* be at interrupt time
612  *
613  *	Object must be locked.
614  */
615 vm_page_t
616 vm_page_alloc(object, offset, page_req)
617 	vm_object_t object;
618 	vm_offset_t offset;
619 	int page_req;
620 {
621 	register vm_page_t mem;
622 	int s;
623 
624 	simple_lock(&vm_page_queue_free_lock);
625 
626 	s = splhigh();
627 
628 	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) &&
629 	    (page_req == VM_ALLOC_NORMAL) &&
630 	    (curproc != pageproc)) {
631 		simple_unlock(&vm_page_queue_free_lock);
632 		splx(s);
633 		return (NULL);
634 	}
635 
636 	if (page_req == VM_ALLOC_INTERRUPT) {
637 		if ((mem = vm_page_queue_free.tqh_first) == 0) {
638 			simple_unlock(&vm_page_queue_free_lock);
639 			splx(s);
640 			/*
641 			 * need to wakeup at interrupt time -- it doesn't do VM_WAIT
642 			 */
643 			wakeup((caddr_t) &vm_pages_needed);
644 			return NULL;
645 		}
646 	} else {
647 		if ((cnt.v_free_count < cnt.v_free_reserved) ||
648 		    (mem = vm_page_queue_free.tqh_first) == 0) {
649 			mem = vm_page_queue_cache.tqh_first;
650 			if (mem) {
651 				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
652 				vm_page_remove(mem);
653 				cnt.v_cache_count--;
654 				goto gotpage;
655 			}
656 
657 			if (page_req == VM_ALLOC_SYSTEM &&
658 			    cnt.v_free_count > cnt.v_interrupt_free_min) {
659 				mem = vm_page_queue_free.tqh_first;
660 			}
661 
662 			if( !mem) {
663 				simple_unlock(&vm_page_queue_free_lock);
664 				splx(s);
665 				wakeup((caddr_t) &vm_pages_needed);
666 				return (NULL);
667 			}
668 		}
669 	}
670 
671 	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
672 	cnt.v_free_count--;
673 
674 gotpage:
675 	simple_unlock(&vm_page_queue_free_lock);
676 
677 	mem->flags = PG_BUSY;
678 	mem->wire_count = 0;
679 	mem->hold_count = 0;
680 	mem->act_count = 0;
681 	mem->busy = 0;
682 	mem->valid = 0;
683 	mem->dirty = 0;
684 	mem->bmapped = 0;
685 
686 	/* XXX before splx until vm_page_insert is safe */
687 	vm_page_insert(mem, object, offset);
688 
689 	splx(s);
690 
691 /*
692  * don't wakeup too often, so we wakeup the pageout daemon when
693  * we would be nearly out of memory.
694  */
695 	if (curproc != pageproc &&
696 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
697 		(cnt.v_free_count < cnt.v_pageout_free_min))
698 		wakeup((caddr_t) &vm_pages_needed);
699 
700 	return (mem);
701 }
702 
703 vm_offset_t
704 vm_page_alloc_contig(size, low, high, alignment)
705 	vm_offset_t size;
706 	vm_offset_t low;
707 	vm_offset_t high;
708 	vm_offset_t alignment;
709 {
710 	int i, s, start;
711 	vm_offset_t addr, phys, tmp_addr;
712 	vm_page_t pga = vm_page_array;
713 	extern vm_map_t kernel_map;
714 
715 	if ((alignment & (alignment - 1)) != 0)
716 		panic("vm_page_alloc_contig: alignment must be a power of 2");
717 
718 	start = 0;
719 	s = splhigh();
720 again:
721 	/*
722 	 * Find first page in array that is free, within range, and aligned.
723 	 */
724 	for (i = start; i < cnt.v_page_count; i++) {
725 		phys = VM_PAGE_TO_PHYS(&pga[i]);
726 		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
727 		    (phys >= low) && (phys < high) &&
728 		    ((phys & (alignment - 1)) == 0))
729 			break;
730 	}
731 
732 	/*
733 	 * If the above failed or we will exceed the upper bound, fail.
734 	 */
735 	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
736 		splx(s);
737 		return (NULL);
738 	}
739 	start = i;
740 
741 	/*
742 	 * Check successive pages for contiguous and free.
743 	 */
744 	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
745 		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
746 			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
747 		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
748 			start++;
749 			goto again;
750 		}
751 	}
752 
753 	/*
754 	 * We've found a contiguous chunk that meets are requirements.
755 	 * Allocate kernel VM, unfree and assign the physical pages to it and
756 	 * return kernel VM pointer.
757 	 */
758 	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
759 
760 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
761 		vm_page_t m = &pga[i];;
762 
763 		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
764 		cnt.v_free_count--;
765 		m->valid = VM_PAGE_BITS_ALL;
766 		m->flags = 0;
767 		m->dirty = 0;
768 		m->wire_count = 0;
769 		m->act_count = 0;
770 		m->bmapped = 0;
771 		m->busy = 0;
772 		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
773 		vm_page_wire(m);
774 		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
775 		tmp_addr += PAGE_SIZE;
776 	}
777 
778 	splx(s);
779 	return (addr);
780 }
781 
782 /*
783  *	vm_page_free:
784  *
785  *	Returns the given page to the free list,
786  *	disassociating it with any VM object.
787  *
788  *	Object and page must be locked prior to entry.
789  */
790 void
791 vm_page_free(mem)
792 	register vm_page_t mem;
793 {
794 	int s;
795 
796 	s = splhigh();
797 	vm_page_remove(mem);
798 	vm_page_unqueue(mem);
799 
800 	if (mem->bmapped || mem->busy || mem->flags & PG_BUSY) {
801 		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
802 		    mem->offset, mem->bmapped, mem->busy, (mem->flags & PG_BUSY) ? 1 : 0);
803 		panic("vm_page_free: freeing busy page\n");
804 	}
805 	if (mem->flags & PG_FREE)
806 		panic("vm_page_free: freeing free page");
807 
808 	if (!(mem->flags & PG_FICTITIOUS)) {
809 
810 		simple_lock(&vm_page_queue_free_lock);
811 		if (mem->wire_count) {
812 			if (mem->wire_count > 1) {
813 				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
814 				panic("vm_page_free: invalid wire count");
815 			}
816 			cnt.v_wire_count--;
817 			mem->wire_count = 0;
818 		}
819 		mem->flags |= PG_FREE;
820 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
821 
822 		cnt.v_free_count++;
823 		simple_unlock(&vm_page_queue_free_lock);
824 		splx(s);
825 		/*
826 		 * if pageout daemon needs pages, then tell it that there are
827 		 * some free.
828 		 */
829 		if (vm_pageout_pages_needed) {
830 			wakeup((caddr_t) &vm_pageout_pages_needed);
831 			vm_pageout_pages_needed = 0;
832 		}
833 
834 		/*
835 		 * wakeup processes that are waiting on memory if we hit a
836 		 * high water mark. And wakeup scheduler process if we have
837 		 * lots of memory. this process will swapin processes.
838 		 */
839 		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
840 			wakeup((caddr_t) &cnt.v_free_count);
841 			wakeup((caddr_t) &proc0);
842 		}
843 	} else {
844 		splx(s);
845 	}
846 	if (mem->flags & PG_WANTED)
847 		wakeup((caddr_t) mem);
848 	cnt.v_tfree++;
849 }
850 
851 
852 /*
853  *	vm_page_wire:
854  *
855  *	Mark this page as wired down by yet
856  *	another map, removing it from paging queues
857  *	as necessary.
858  *
859  *	The page queues must be locked.
860  */
861 void
862 vm_page_wire(mem)
863 	register vm_page_t mem;
864 {
865 	int s;
866 
867 	VM_PAGE_CHECK(mem);
868 
869 	if (mem->wire_count == 0) {
870 		vm_page_unqueue(mem);
871 		cnt.v_wire_count++;
872 	}
873 	mem->wire_count++;
874 }
875 
876 /*
877  *	vm_page_unwire:
878  *
879  *	Release one wiring of this page, potentially
880  *	enabling it to be paged again.
881  *
882  *	The page queues must be locked.
883  */
884 void
885 vm_page_unwire(mem)
886 	register vm_page_t mem;
887 {
888 	int s;
889 
890 	VM_PAGE_CHECK(mem);
891 
892 	s = splhigh();
893 
894 	if (mem->wire_count)
895 		mem->wire_count--;
896 	if (mem->wire_count == 0) {
897 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
898 		cnt.v_active_count++;
899 		mem->flags |= PG_ACTIVE;
900 		cnt.v_wire_count--;
901 	}
902 	splx(s);
903 }
904 
905 /*
906  *	vm_page_deactivate:
907  *
908  *	Returns the given page to the inactive list,
909  *	indicating that no physical maps have access
910  *	to this page.  [Used by the physical mapping system.]
911  *
912  *	The page queues must be locked.
913  */
914 void
915 vm_page_deactivate(m)
916 	register vm_page_t m;
917 {
918 	int spl;
919 
920 	VM_PAGE_CHECK(m);
921 
922 	/*
923 	 * Only move active pages -- ignore locked or already inactive ones.
924 	 *
925 	 * XXX: sometimes we get pages which aren't wired down or on any queue -
926 	 * we need to put them on the inactive queue also, otherwise we lose
927 	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
928 	 */
929 
930 	spl = splhigh();
931 	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
932 	    m->hold_count == 0) {
933 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
934 		vm_page_unqueue(m);
935 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
936 		m->flags |= PG_INACTIVE;
937 		cnt.v_inactive_count++;
938 		m->act_count = 0;
939 	}
940 	splx(spl);
941 }
942 
943 /*
944  * vm_page_cache
945  *
946  * Put the specified page onto the page cache queue (if appropriate).
947  */
948 
949 void
950 vm_page_cache(m)
951 	register vm_page_t m;
952 {
953 	int s;
954 
955 	VM_PAGE_CHECK(m);
956 	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
957 	    m->bmapped)
958 		return;
959 
960 	s = splhigh();
961 	vm_page_unqueue(m);
962 	pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
963 
964 	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
965 	m->flags |= PG_CACHE;
966 	cnt.v_cache_count++;
967 	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
968 		wakeup((caddr_t) &cnt.v_free_count);
969 		wakeup((caddr_t) &proc0);
970 	}
971 	if (vm_pageout_pages_needed) {
972 		wakeup((caddr_t) &vm_pageout_pages_needed);
973 		vm_pageout_pages_needed = 0;
974 	}
975 
976 	splx(s);
977 }
978 
979 /*
980  *	vm_page_activate:
981  *
982  *	Put the specified page on the active list (if appropriate).
983  *
984  *	The page queues must be locked.
985  */
986 
987 void
988 vm_page_activate(m)
989 	register vm_page_t m;
990 {
991 	int s;
992 
993 	VM_PAGE_CHECK(m);
994 
995 	s = splhigh();
996 	if (m->flags & PG_ACTIVE)
997 		panic("vm_page_activate: already active");
998 
999 	vm_page_unqueue(m);
1000 
1001 	if (m->wire_count == 0) {
1002 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1003 		m->flags |= PG_ACTIVE;
1004 		TAILQ_REMOVE(&m->object->memq, m, listq);
1005 		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
1006 		if (m->act_count < 5)
1007 			m->act_count = 5;
1008 		else if( m->act_count < ACT_MAX)
1009 			m->act_count += 1;
1010 		cnt.v_active_count++;
1011 	}
1012 	splx(s);
1013 }
1014 
1015 /*
1016  *	vm_page_zero_fill:
1017  *
1018  *	Zero-fill the specified page.
1019  *	Written as a standard pagein routine, to
1020  *	be used by the zero-fill object.
1021  */
1022 
1023 boolean_t
1024 vm_page_zero_fill(m)
1025 	vm_page_t m;
1026 {
1027 	VM_PAGE_CHECK(m);
1028 
1029 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1030 	m->valid = VM_PAGE_BITS_ALL;
1031 	return (TRUE);
1032 }
1033 
1034 /*
1035  *	vm_page_copy:
1036  *
1037  *	Copy one page to another
1038  */
1039 void
1040 vm_page_copy(src_m, dest_m)
1041 	vm_page_t src_m;
1042 	vm_page_t dest_m;
1043 {
1044 	VM_PAGE_CHECK(src_m);
1045 	VM_PAGE_CHECK(dest_m);
1046 
1047 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1048 	dest_m->valid = VM_PAGE_BITS_ALL;
1049 }
1050 
1051 
1052 /*
1053  * mapping function for valid bits or for dirty bits in
1054  * a page
1055  */
1056 inline int
1057 vm_page_bits(int base, int size)
1058 {
1059 	u_short chunk;
1060 
1061 	if ((base == 0) && (size >= PAGE_SIZE))
1062 		return VM_PAGE_BITS_ALL;
1063 	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1064 	base = (base % PAGE_SIZE) / DEV_BSIZE;
1065 	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1066 	return (chunk << base) & VM_PAGE_BITS_ALL;
1067 }
1068 
1069 /*
1070  * set a page (partially) valid
1071  */
1072 void
1073 vm_page_set_valid(m, base, size)
1074 	vm_page_t m;
1075 	int base;
1076 	int size;
1077 {
1078 	m->valid |= vm_page_bits(base, size);
1079 }
1080 
1081 /*
1082  * set a page (partially) invalid
1083  */
1084 void
1085 vm_page_set_invalid(m, base, size)
1086 	vm_page_t m;
1087 	int base;
1088 	int size;
1089 {
1090 	int bits;
1091 
1092 	m->valid &= ~(bits = vm_page_bits(base, size));
1093 	if (m->valid == 0)
1094 		m->dirty &= ~bits;
1095 }
1096 
1097 /*
1098  * is (partial) page valid?
1099  */
1100 int
1101 vm_page_is_valid(m, base, size)
1102 	vm_page_t m;
1103 	int base;
1104 	int size;
1105 {
1106 	int bits;
1107 
1108 	if (m->valid && ((m->valid & (bits = vm_page_bits(base, size))) == bits))
1109 		return 1;
1110 	else
1111 		return 0;
1112 }
1113 
1114 
1115 /*
1116  * set a page (partially) dirty
1117  */
1118 void
1119 vm_page_set_dirty(m, base, size)
1120 	vm_page_t m;
1121 	int base;
1122 	int size;
1123 {
1124 	if ((base != 0) || (size != PAGE_SIZE)) {
1125 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1126 			m->dirty = VM_PAGE_BITS_ALL;
1127 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1128 			return;
1129 		}
1130 		m->dirty |= vm_page_bits(base, size);
1131 	} else {
1132 		m->dirty = VM_PAGE_BITS_ALL;
1133 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1134 	}
1135 }
1136 
1137 void
1138 vm_page_test_dirty(m)
1139 	vm_page_t m;
1140 {
1141 	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1142 		pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1143 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1144 		m->dirty = VM_PAGE_BITS_ALL;
1145 	}
1146 }
1147 
1148 /*
1149  * set a page (partially) clean
1150  */
1151 void
1152 vm_page_set_clean(m, base, size)
1153 	vm_page_t m;
1154 	int base;
1155 	int size;
1156 {
1157 	m->dirty &= ~vm_page_bits(base, size);
1158 }
1159 
1160 /*
1161  * is (partial) page clean
1162  */
1163 int
1164 vm_page_is_clean(m, base, size)
1165 	vm_page_t m;
1166 	int base;
1167 	int size;
1168 {
1169 	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1170 		m->dirty = VM_PAGE_BITS_ALL;
1171 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1172 	}
1173 	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1174 		return 1;
1175 	else
1176 		return 0;
1177 }
1178 
1179 void
1180 print_page_info()
1181 {
1182 	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1183 	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1184 	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1185 	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1186 	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1187 	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1188 	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1189 	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1190 	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1191 	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1192 }
1193