xref: /freebsd/sys/vm/vm_page.c (revision afe61c15161c324a7af299a9b8457aba5afc92db)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  *	$Id: vm_page.c,v 1.17 1994/04/20 07:07:14 davidg Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 
67 /*
68  *	Resident memory management module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_pageout.h>
79 
80 /*
81  *	Associated with page of user-allocatable memory is a
82  *	page structure.
83  */
84 
85 struct pglist	*vm_page_buckets;		/* Array of buckets */
86 int		vm_page_bucket_count = 0;	/* How big is array? */
87 int		vm_page_hash_mask;		/* Mask for hash function */
88 simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
89 
90 struct pglist	vm_page_queue_free;
91 struct pglist	vm_page_queue_active;
92 struct pglist	vm_page_queue_inactive;
93 simple_lock_data_t	vm_page_queue_lock;
94 simple_lock_data_t	vm_page_queue_free_lock;
95 
96 /* has physical page allocation been initialized? */
97 boolean_t vm_page_startup_initialized;
98 
99 vm_page_t	vm_page_array;
100 long		first_page;
101 long		last_page;
102 vm_offset_t	first_phys_addr;
103 vm_offset_t	last_phys_addr;
104 vm_size_t	page_mask;
105 int		page_shift;
106 
107 /*
108  *	vm_set_page_size:
109  *
110  *	Sets the page size, perhaps based upon the memory
111  *	size.  Must be called before any use of page-size
112  *	dependent functions.
113  *
114  *	Sets page_shift and page_mask from cnt.v_page_size.
115  */
116 void vm_set_page_size()
117 {
118 
119 	if (cnt.v_page_size == 0)
120 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
121 	page_mask = cnt.v_page_size - 1;
122 	if ((page_mask & cnt.v_page_size) != 0)
123 		panic("vm_set_page_size: page size not a power of two");
124 	for (page_shift = 0; ; page_shift++)
125 		if ((1 << page_shift) == cnt.v_page_size)
126 			break;
127 }
128 
129 /*
130  *	vm_page_startup:
131  *
132  *	Initializes the resident memory module.
133  *
134  *	Allocates memory for the page cells, and
135  *	for the object/offset-to-page hash table headers.
136  *	Each page cell is initialized and placed on the free list.
137  */
138 
139 vm_offset_t
140 vm_page_startup(starta, enda, vaddr)
141 	register vm_offset_t	starta;
142 	vm_offset_t	enda;
143 	register vm_offset_t	vaddr;
144 {
145 	register vm_offset_t	mapped;
146 	register vm_page_t	m;
147 	register struct pglist *bucket;
148 	vm_size_t		npages, page_range;
149 	register vm_offset_t	new_start;
150 	int			i;
151 	vm_offset_t		pa;
152 	int nblocks;
153 	vm_offset_t		first_managed_page;
154 	int			size;
155 
156 	extern	vm_offset_t	kentry_data;
157 	extern	vm_size_t	kentry_data_size;
158 	extern vm_offset_t phys_avail[];
159 /* the biggest memory array is the second group of pages */
160 	vm_offset_t start;
161 	vm_offset_t biggestone, biggestsize;
162 
163 	vm_offset_t total;
164 
165 	total = 0;
166 	biggestsize = 0;
167 	biggestone = 0;
168 	nblocks = 0;
169 	vaddr = round_page(vaddr);
170 
171 	for (i = 0; phys_avail[i + 1]; i += 2) {
172 		phys_avail[i] = round_page(phys_avail[i]);
173 		phys_avail[i+1] = trunc_page(phys_avail[i+1]);
174 	}
175 
176 	for (i = 0; phys_avail[i + 1]; i += 2) {
177 		int size = phys_avail[i+1] - phys_avail[i];
178 		if (size > biggestsize) {
179 			biggestone = i;
180 			biggestsize = size;
181 		}
182 		++nblocks;
183 		total += size;
184 	}
185 
186 	start = phys_avail[biggestone];
187 
188 
189 	/*
190 	 *	Initialize the locks
191 	 */
192 
193 	simple_lock_init(&vm_page_queue_free_lock);
194 	simple_lock_init(&vm_page_queue_lock);
195 
196 	/*
197 	 *	Initialize the queue headers for the free queue,
198 	 *	the active queue and the inactive queue.
199 	 */
200 
201 	TAILQ_INIT(&vm_page_queue_free);
202 	TAILQ_INIT(&vm_page_queue_active);
203 	TAILQ_INIT(&vm_page_queue_inactive);
204 
205 	/*
206 	 *	Allocate (and initialize) the hash table buckets.
207 	 *
208 	 *	The number of buckets MUST BE a power of 2, and
209 	 *	the actual value is the next power of 2 greater
210 	 *	than the number of physical pages in the system.
211 	 *
212 	 *	Note:
213 	 *		This computation can be tweaked if desired.
214 	 */
215 	vm_page_buckets = (struct pglist *)vaddr;
216 	bucket = vm_page_buckets;
217 	if (vm_page_bucket_count == 0) {
218 		vm_page_bucket_count = 1;
219 		while (vm_page_bucket_count < atop(total))
220 			vm_page_bucket_count <<= 1;
221 	}
222 
223 
224 	vm_page_hash_mask = vm_page_bucket_count - 1;
225 
226 	/*
227 	 *	Validate these addresses.
228 	 */
229 
230 	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
231 	new_start = round_page(new_start);
232 	mapped = vaddr;
233 	vaddr = pmap_map(mapped, start, new_start,
234 			VM_PROT_READ|VM_PROT_WRITE);
235 	start = new_start;
236 	bzero((caddr_t) mapped, vaddr - mapped);
237 	mapped = vaddr;
238 
239 	for (i = 0; i< vm_page_bucket_count; i++) {
240 		TAILQ_INIT(bucket);
241 		bucket++;
242 	}
243 
244 	simple_lock_init(&bucket_lock);
245 
246 	/*
247 	 *	round (or truncate) the addresses to our page size.
248 	 */
249 
250 	/*
251 	 *	Pre-allocate maps and map entries that cannot be dynamically
252 	 *	allocated via malloc().  The maps include the kernel_map and
253 	 *	kmem_map which must be initialized before malloc() will
254 	 *	work (obviously).  Also could include pager maps which would
255 	 *	be allocated before kmeminit.
256 	 *
257 	 *	Allow some kernel map entries... this should be plenty
258 	 *	since people shouldn't be cluttering up the kernel
259 	 *	map (they should use their own maps).
260 	 */
261 
262 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
263 			   MAX_KMAPENT * sizeof(struct vm_map_entry);
264 	kentry_data_size = round_page(kentry_data_size);
265 	kentry_data = (vm_offset_t) vaddr;
266 	vaddr += kentry_data_size;
267 
268 	/*
269 	 *	Validate these zone addresses.
270 	 */
271 
272 	new_start = start + (vaddr - mapped);
273 	pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
274 	bzero((caddr_t) mapped, (vaddr - mapped));
275 	start = round_page(new_start);
276 
277 	/*
278  	 *	Compute the number of pages of memory that will be
279 	 *	available for use (taking into account the overhead
280 	 *	of a page structure per page).
281 	 */
282 
283 	npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
284 	first_page = phys_avail[0] / PAGE_SIZE;
285 
286 	page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
287 	/*
288 	 *	Initialize the mem entry structures now, and
289 	 *	put them in the free queue.
290 	 */
291 
292 	vm_page_array = (vm_page_t) vaddr;
293 	mapped = vaddr;
294 
295 
296 	/*
297 	 *	Validate these addresses.
298 	 */
299 
300 	new_start = round_page(start + page_range * sizeof (struct vm_page));
301 	mapped = pmap_map(mapped, start, new_start,
302 			VM_PROT_READ|VM_PROT_WRITE);
303 	start = new_start;
304 
305 	first_managed_page = start / PAGE_SIZE;
306 
307 	/*
308 	 *	Clear all of the page structures
309 	 */
310 	bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
311 
312 	cnt.v_page_count = 0;
313 	cnt.v_free_count= 0;
314 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
315 		if (i == biggestone)
316 			pa = ptoa(first_managed_page);
317 		else
318 			pa = phys_avail[i];
319 		while (pa < phys_avail[i + 1] && npages-- > 0) {
320 			++cnt.v_page_count;
321 			++cnt.v_free_count;
322 			m = PHYS_TO_VM_PAGE(pa);
323 			m->flags = 0;
324 			m->object = 0;
325 			m->phys_addr = pa;
326 			m->hold_count = 0;
327 			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
328 			pa += PAGE_SIZE;
329 		}
330 	}
331 
332 	/*
333 	 *	Initialize vm_pages_needed lock here - don't wait for pageout
334 	 *	daemon	XXX
335 	 */
336 	simple_lock_init(&vm_pages_needed_lock);
337 
338 	return(mapped);
339 }
340 
341 /*
342  *	vm_page_hash:
343  *
344  *	Distributes the object/offset key pair among hash buckets.
345  *
346  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
347  */
348 inline const int
349 vm_page_hash(object, offset)
350 	vm_object_t object;
351 	vm_offset_t offset;
352 {
353 	return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
354 }
355 
356 /*
357  *	vm_page_insert:		[ internal use only ]
358  *
359  *	Inserts the given mem entry into the object/object-page
360  *	table and object list.
361  *
362  *	The object and page must be locked.
363  */
364 
365 void vm_page_insert(mem, object, offset)
366 	register vm_page_t	mem;
367 	register vm_object_t	object;
368 	register vm_offset_t	offset;
369 {
370 	register struct pglist	*bucket;
371 	int			s;
372 
373 	VM_PAGE_CHECK(mem);
374 
375 	if (mem->flags & PG_TABLED)
376 		panic("vm_page_insert: already inserted");
377 
378 	/*
379 	 *	Record the object/offset pair in this page
380 	 */
381 
382 	mem->object = object;
383 	mem->offset = offset;
384 
385 	/*
386 	 *	Insert it into the object_object/offset hash table
387 	 */
388 
389 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
390 	s = splimp();
391 	simple_lock(&bucket_lock);
392 	TAILQ_INSERT_TAIL(bucket, mem, hashq);
393 	simple_unlock(&bucket_lock);
394 	(void) splx(s);
395 
396 	/*
397 	 *	Now link into the object's list of backed pages.
398 	 */
399 
400 	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
401 	mem->flags |= PG_TABLED;
402 
403 	/*
404 	 *	And show that the object has one more resident
405 	 *	page.
406 	 */
407 
408 	object->resident_page_count++;
409 }
410 
411 /*
412  *	vm_page_remove:		[ internal use only ]
413  *				NOTE: used by device pager as well -wfj
414  *
415  *	Removes the given mem entry from the object/offset-page
416  *	table and the object page list.
417  *
418  *	The object and page must be locked.
419  */
420 
421 void vm_page_remove(mem)
422 	register vm_page_t	mem;
423 {
424 	register struct pglist	*bucket;
425 	int			s;
426 
427 	VM_PAGE_CHECK(mem);
428 
429 	if (!(mem->flags & PG_TABLED))
430 		return;
431 
432 	/*
433 	 *	Remove from the object_object/offset hash table
434 	 */
435 
436 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
437 	s = splimp();
438 	simple_lock(&bucket_lock);
439 	TAILQ_REMOVE(bucket, mem, hashq);
440 	simple_unlock(&bucket_lock);
441 	(void) splx(s);
442 
443 	/*
444 	 *	Now remove from the object's list of backed pages.
445 	 */
446 
447 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
448 
449 	/*
450 	 *	And show that the object has one fewer resident
451 	 *	page.
452 	 */
453 
454 	mem->object->resident_page_count--;
455 
456 	mem->flags &= ~PG_TABLED;
457 }
458 
459 /*
460  *	vm_page_lookup:
461  *
462  *	Returns the page associated with the object/offset
463  *	pair specified; if none is found, NULL is returned.
464  *
465  *	The object must be locked.  No side effects.
466  */
467 
468 vm_page_t vm_page_lookup(object, offset)
469 	register vm_object_t	object;
470 	register vm_offset_t	offset;
471 {
472 	register vm_page_t	mem;
473 	register struct pglist	*bucket;
474 	int			s;
475 
476 	/*
477 	 *	Search the hash table for this object/offset pair
478 	 */
479 
480 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
481 
482 	s = splimp();
483 	simple_lock(&bucket_lock);
484 	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
485 		VM_PAGE_CHECK(mem);
486 		if ((mem->object == object) && (mem->offset == offset)) {
487 			simple_unlock(&bucket_lock);
488 			splx(s);
489 			return(mem);
490 		}
491 	}
492 
493 	simple_unlock(&bucket_lock);
494 	splx(s);
495 	return(NULL);
496 }
497 
498 /*
499  *	vm_page_rename:
500  *
501  *	Move the given memory entry from its
502  *	current object to the specified target object/offset.
503  *
504  *	The object must be locked.
505  */
506 void vm_page_rename(mem, new_object, new_offset)
507 	register vm_page_t	mem;
508 	register vm_object_t	new_object;
509 	vm_offset_t		new_offset;
510 {
511 	if (mem->object == new_object)
512 		return;
513 
514 	vm_page_lock_queues();	/* keep page from moving out from
515 				   under pageout daemon */
516     	vm_page_remove(mem);
517 	vm_page_insert(mem, new_object, new_offset);
518 	vm_page_unlock_queues();
519 }
520 
521 /*
522  *	vm_page_alloc:
523  *
524  *	Allocate and return a memory cell associated
525  *	with this VM object/offset pair.
526  *
527  *	Object must be locked.
528  */
529 vm_page_t
530 vm_page_alloc(object, offset)
531 	vm_object_t	object;
532 	vm_offset_t	offset;
533 {
534 	register vm_page_t	mem;
535 	int		s;
536 
537 	s = splimp();
538 	simple_lock(&vm_page_queue_free_lock);
539 	if (	object != kernel_object &&
540 		object != kmem_object	&&
541 		curproc != pageproc && curproc != &proc0 &&
542 		cnt.v_free_count < cnt.v_free_reserved) {
543 
544 		simple_unlock(&vm_page_queue_free_lock);
545 		splx(s);
546 		/*
547 		 * this wakeup seems unnecessary, but there is code that
548 		 * might just check to see if there are free pages, and
549 		 * punt if there aren't.  VM_WAIT does this too, but
550 		 * redundant wakeups aren't that bad...
551 		 */
552 		if (curproc != pageproc)
553 			wakeup((caddr_t) &vm_pages_needed);
554 		return(NULL);
555 	}
556 	if (( mem = vm_page_queue_free.tqh_first) == 0) {
557 		simple_unlock(&vm_page_queue_free_lock);
558 		printf("No pages???\n");
559 		splx(s);
560 		/*
561 		 * comment above re: wakeups applies here too...
562 		 */
563 		if (curproc != pageproc)
564 			wakeup((caddr_t) &vm_pages_needed);
565 		return(NULL);
566 	}
567 
568 	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
569 
570 	cnt.v_free_count--;
571 	simple_unlock(&vm_page_queue_free_lock);
572 
573 	VM_PAGE_INIT(mem, object, offset);
574 	splx(s);
575 
576 /*
577  * don't wakeup too often, so we wakeup the pageout daemon when
578  * we would be nearly out of memory.
579  */
580 	if (curproc != pageproc &&
581 		(cnt.v_free_count < cnt.v_free_reserved))
582 		wakeup((caddr_t) &vm_pages_needed);
583 
584 	return(mem);
585 }
586 
587 /*
588  *	vm_page_free:
589  *
590  *	Returns the given page to the free list,
591  *	disassociating it with any VM object.
592  *
593  *	Object and page must be locked prior to entry.
594  */
595 void vm_page_free(mem)
596 	register vm_page_t	mem;
597 {
598 	int s;
599 	s = splimp();
600 	vm_page_remove(mem);
601 	if (mem->flags & PG_ACTIVE) {
602 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
603 		mem->flags &= ~PG_ACTIVE;
604 		cnt.v_active_count--;
605 	}
606 
607 	if (mem->flags & PG_INACTIVE) {
608 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
609 		mem->flags &= ~PG_INACTIVE;
610 		cnt.v_inactive_count--;
611 	}
612 
613 	if (!(mem->flags & PG_FICTITIOUS)) {
614 
615 		simple_lock(&vm_page_queue_free_lock);
616 		if (mem->wire_count) {
617 			cnt.v_wire_count--;
618 			mem->wire_count = 0;
619 		}
620 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
621 
622 		cnt.v_free_count++;
623 		simple_unlock(&vm_page_queue_free_lock);
624 		splx(s);
625 		/*
626 		 * if pageout daemon needs pages, then tell it that there
627 		 * are some free.
628 		 */
629 		if (vm_pageout_pages_needed)
630 			wakeup((caddr_t)&vm_pageout_pages_needed);
631 
632 		/*
633 		 * wakeup processes that are waiting on memory if we
634 		 * hit a high water mark.
635 		 */
636 		if (cnt.v_free_count == cnt.v_free_min) {
637 			wakeup((caddr_t)&cnt.v_free_count);
638 		}
639 
640 		/*
641 		 * wakeup scheduler process if we have lots of memory.
642 		 * this process will swapin processes.
643 		 */
644 		if (cnt.v_free_count == cnt.v_free_target) {
645 			wakeup((caddr_t)&proc0);
646 		}
647 	} else {
648 		splx(s);
649 	}
650 	wakeup((caddr_t) mem);
651 }
652 
653 
654 /*
655  *	vm_page_wire:
656  *
657  *	Mark this page as wired down by yet
658  *	another map, removing it from paging queues
659  *	as necessary.
660  *
661  *	The page queues must be locked.
662  */
663 void vm_page_wire(mem)
664 	register vm_page_t	mem;
665 {
666 	int s;
667 	VM_PAGE_CHECK(mem);
668 
669 	if (mem->wire_count == 0) {
670 		s = splimp();
671 		if (mem->flags & PG_ACTIVE) {
672 			TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
673 			cnt.v_active_count--;
674 			mem->flags &= ~PG_ACTIVE;
675 		}
676 		if (mem->flags & PG_INACTIVE) {
677 			TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
678 			cnt.v_inactive_count--;
679 			mem->flags &= ~PG_INACTIVE;
680 		}
681 		splx(s);
682 		cnt.v_wire_count++;
683 	}
684 	mem->wire_count++;
685 }
686 
687 /*
688  *	vm_page_unwire:
689  *
690  *	Release one wiring of this page, potentially
691  *	enabling it to be paged again.
692  *
693  *	The page queues must be locked.
694  */
695 void vm_page_unwire(mem)
696 	register vm_page_t	mem;
697 {
698 	int s;
699 	VM_PAGE_CHECK(mem);
700 
701 	s = splimp();
702 
703 	if( mem->wire_count)
704 		mem->wire_count--;
705 	if (mem->wire_count == 0) {
706 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
707 		cnt.v_active_count++;
708 		mem->flags |= PG_ACTIVE;
709 		cnt.v_wire_count--;
710 	}
711 	splx(s);
712 }
713 
714 #if 0
715 /*
716  *	vm_page_deactivate:
717  *
718  *	Returns the given page to the inactive list,
719  *	indicating that no physical maps have access
720  *	to this page.  [Used by the physical mapping system.]
721  *
722  *	The page queues must be locked.
723  */
724 void
725 vm_page_deactivate(m)
726 	register vm_page_t	m;
727 {
728 	int spl;
729 	VM_PAGE_CHECK(m);
730 
731 	/*
732 	 *	Only move active pages -- ignore locked or already
733 	 *	inactive ones.
734 	 *
735 	 *	XXX: sometimes we get pages which aren't wired down
736 	 *	or on any queue - we need to put them on the inactive
737 	 *	queue also, otherwise we lose track of them.
738 	 *	Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
739 	 */
740 
741 	spl = splimp();
742 	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
743 		m->hold_count == 0) {
744 
745 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
746 		if (m->flags & PG_ACTIVE) {
747 			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
748 			m->flags &= ~PG_ACTIVE;
749 			cnt.v_active_count--;
750 		}
751 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
752 		m->flags |= PG_INACTIVE;
753 		cnt.v_inactive_count++;
754 #define NOT_DEACTIVATE_PROTECTS
755 #ifndef NOT_DEACTIVATE_PROTECTS
756 		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
757 #else
758 		if ((m->flags & PG_CLEAN) &&
759 			pmap_is_modified(VM_PAGE_TO_PHYS(m)))
760 			m->flags &= ~PG_CLEAN;
761 #endif
762 		if ((m->flags & PG_CLEAN) == 0)
763 			m->flags |= PG_LAUNDRY;
764 	}
765 	splx(spl);
766 }
767 #endif
768 #if 1
769 /*
770  *	vm_page_deactivate:
771  *
772  *	Returns the given page to the inactive list,
773  *	indicating that no physical maps have access
774  *	to this page.  [Used by the physical mapping system.]
775  *
776  *	The page queues must be locked.
777  */
778 void vm_page_deactivate(m)
779 	register vm_page_t	m;
780 {
781 	int s;
782 	VM_PAGE_CHECK(m);
783 
784 	s = splimp();
785 	/*
786 	 *	Only move active pages -- ignore locked or already
787 	 *	inactive ones.
788 	 */
789 
790 	if ((m->flags & PG_ACTIVE) && (m->hold_count == 0)) {
791 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
792 		TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
793 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
794 		m->flags &= ~PG_ACTIVE;
795 		m->flags |= PG_INACTIVE;
796 		cnt.v_active_count--;
797 		cnt.v_inactive_count++;
798 #define NOT_DEACTIVATE_PROTECTS
799 #ifndef NOT_DEACTIVATE_PROTECTS
800 		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
801 #else
802 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
803 			m->flags &= ~PG_CLEAN;
804 #endif
805 		if (m->flags & PG_CLEAN)
806 			m->flags &= ~PG_LAUNDRY;
807 		else
808 			m->flags |= PG_LAUNDRY;
809 	}
810 	splx(s);
811 }
812 #endif
813 /*
814  *	vm_page_activate:
815  *
816  *	Put the specified page on the active list (if appropriate).
817  *
818  *	The page queues must be locked.
819  */
820 
821 void vm_page_activate(m)
822 	register vm_page_t	m;
823 {
824 	int s;
825 	VM_PAGE_CHECK(m);
826 
827 	s = splimp();
828 	if (m->flags & PG_INACTIVE) {
829 		TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
830 		cnt.v_inactive_count--;
831 		m->flags &= ~PG_INACTIVE;
832 	}
833 	if (m->wire_count == 0) {
834 		if (m->flags & PG_ACTIVE)
835 			panic("vm_page_activate: already active");
836 
837 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
838 		m->flags |= PG_ACTIVE;
839 		TAILQ_REMOVE(&m->object->memq, m, listq);
840 		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
841 		m->act_count = 10;
842 		cnt.v_active_count++;
843 	}
844 	splx(s);
845 }
846 
847 /*
848  *	vm_page_zero_fill:
849  *
850  *	Zero-fill the specified page.
851  *	Written as a standard pagein routine, to
852  *	be used by the zero-fill object.
853  */
854 
855 boolean_t
856 vm_page_zero_fill(m)
857 	vm_page_t	m;
858 {
859 	VM_PAGE_CHECK(m);
860 
861 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
862 	return(TRUE);
863 }
864 
865 /*
866  *	vm_page_copy:
867  *
868  *	Copy one page to another
869  */
870 void
871 vm_page_copy(src_m, dest_m)
872 	vm_page_t	src_m;
873 	vm_page_t	dest_m;
874 {
875 	VM_PAGE_CHECK(src_m);
876 	VM_PAGE_CHECK(dest_m);
877 
878 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
879 }
880