xref: /freebsd/sys/vm/vm_page.h (revision a79b71281cd63ad7a6cc43a6d5673a2510b51630)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Resident memory system definitions.
69  */
70 
71 #ifndef	_VM_PAGE_
72 #define	_VM_PAGE_
73 
74 #if !defined(KLD_MODULE)
75 #include "opt_vmpage.h"
76 #endif
77 
78 #include <vm/pmap.h>
79 #include <machine/atomic.h>
80 
81 /*
82  *	Management of resident (logical) pages.
83  *
84  *	A small structure is kept for each resident
85  *	page, indexed by page number.  Each structure
86  *	is an element of several lists:
87  *
88  *		A hash table bucket used to quickly
89  *		perform object/offset lookups
90  *
91  *		A list of all pages for a given object,
92  *		so they can be quickly deactivated at
93  *		time of deallocation.
94  *
95  *		An ordered list of pages due for pageout.
96  *
97  *	In addition, the structure contains the object
98  *	and offset to which this page belongs (for pageout),
99  *	and sundry status bits.
100  *
101  *	Fields in this structure are locked either by the lock on the
102  *	object that the page belongs to (O) or by the lock on the page
103  *	queues (P).
104  *
105  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
106  *	bits set without having associated valid bits set.  This is used by
107  *	NFS to implement piecemeal writes.
108  */
109 
110 TAILQ_HEAD(pglist, vm_page);
111 
112 struct vm_page {
113 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
114 	struct vm_page	*hnext;		/* hash table link (O,P)	*/
115 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
116 
117 	vm_object_t object;		/* which object am I in (O,P)*/
118 	vm_pindex_t pindex;		/* offset into object (O,P) */
119 	vm_offset_t phys_addr;		/* physical address of page */
120 	struct md_page md;		/* machine dependant stuff */
121 	u_short	queue;			/* page queue index */
122 	u_short	flags,			/* see below */
123 		pc;			/* page color */
124 	u_short wire_count;		/* wired down maps refs (P) */
125 	short hold_count;		/* page hold count */
126 	u_char	act_count;		/* page usage count */
127 	u_char	busy;			/* page busy count */
128 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
129 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
130 #if PAGE_SIZE == 4096
131 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
132 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
133 #elif PAGE_SIZE == 8192
134 	u_short	valid;			/* map of valid DEV_BSIZE chunks */
135 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
136 #endif
137 };
138 
139 /*
140  * note: currently use SWAPBLK_NONE as an absolute value rather then
141  * a flag bit.
142  */
143 
144 #define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
145 #define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
146 
147 #if !defined(KLD_MODULE)
148 
149 /*
150  * Page coloring parameters
151  */
152 /* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
153 
154 /* Define one of the following */
155 #if defined(PQ_HUGECACHE)
156 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
157 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
158 #define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
159 #endif
160 
161 /* Define one of the following */
162 #if defined(PQ_LARGECACHE)
163 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
164 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
165 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
166 #endif
167 
168 
169 /*
170  * Use 'options PQ_NOOPT' to disable page coloring
171  */
172 #if defined(PQ_NOOPT)
173 #define PQ_PRIME1 1
174 #define PQ_PRIME2 1
175 #define PQ_L2_SIZE 1
176 #endif
177 
178 #if defined(PQ_NORMALCACHE)
179 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
180 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
181 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
182 #endif
183 
184 #if defined(PQ_MEDIUMCACHE)
185 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
186 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
187 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
188 #endif
189 
190 #if !defined(PQ_L2_SIZE)
191 #define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
192 #define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
193 #define PQ_L2_SIZE 32	/* 512KB or smaller, 4-way set-associative cache */
194 #endif
195 
196 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
197 
198 #if 1
199 #define PQ_NONE 0
200 #define PQ_FREE	1
201 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
202 #define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
203 #define PQ_CACHE (3 + 1*PQ_L2_SIZE)
204 #define PQ_COUNT (3 + 2*PQ_L2_SIZE)
205 #else
206 #define PQ_NONE		PQ_COUNT
207 #define PQ_FREE		0
208 #define PQ_INACTIVE	PQ_L2_SIZE
209 #define PQ_ACTIVE	(1 +   PQ_L2_SIZE)
210 #define PQ_CACHE	(2 +   PQ_L2_SIZE)
211 #define PQ_COUNT	(2 + 2*PQ_L2_SIZE)
212 #endif
213 
214 struct vpgqueues {
215 	struct pglist pl;
216 	int	*cnt;
217 	int	lcnt;
218 };
219 
220 extern struct vpgqueues vm_page_queues[PQ_COUNT];
221 
222 #endif
223 
224 /*
225  * These are the flags defined for vm_page.
226  *
227  * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
228  *
229  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
230  * 	 not under PV management but otherwise should be treated as a
231  *	 normal page.  Pages not under PV management cannot be paged out
232  *	 via the object/vm_page_t because there is no knowledge of their
233  *	 pte mappings, nor can they be removed from their objects via
234  *	 the object, and such pages are also not on any PQ queue.
235  */
236 #define	PG_BUSY		0x0001		/* page is in transit (O) */
237 #define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
238 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
239 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
240 #define PG_MAPPED	0x0020		/* page is mapped */
241 #define	PG_ZERO		0x0040		/* page is zeroed */
242 #define PG_REFERENCED	0x0080		/* page has been referenced */
243 #define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
244 #define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
245 #define PG_NOSYNC	0x0400		/* do not collect for syncer */
246 #define PG_UNMANAGED	0x0800		/* No PV management for page */
247 
248 /*
249  * Misc constants.
250  */
251 
252 #define ACT_DECLINE		1
253 #define ACT_ADVANCE		3
254 #define ACT_INIT		5
255 #define ACT_MAX			64
256 #define PFCLUSTER_BEHIND	3
257 #define PFCLUSTER_AHEAD		3
258 
259 #ifdef _KERNEL
260 /*
261  * Each pageable resident page falls into one of four lists:
262  *
263  *	free
264  *		Available for allocation now.
265  *
266  * The following are all LRU sorted:
267  *
268  *	cache
269  *		Almost available for allocation. Still in an
270  *		object, but clean and immediately freeable at
271  *		non-interrupt times.
272  *
273  *	inactive
274  *		Low activity, candidates for reclamation.
275  *		This is the list of pages that should be
276  *		paged out next.
277  *
278  *	active
279  *		Pages that are "active" i.e. they have been
280  *		recently referenced.
281  *
282  *	zero
283  *		Pages that are really free and have been pre-zeroed
284  *
285  */
286 
287 extern int vm_page_zero_count;
288 
289 extern vm_page_t vm_page_array;		/* First resident page in table */
290 extern int vm_page_array_size;		/* number of vm_page_t's */
291 extern long first_page;			/* first physical page number */
292 
293 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
294 
295 #define PHYS_TO_VM_PAGE(pa) \
296 		(&vm_page_array[atop(pa) - first_page ])
297 
298 /*
299  *	Functions implemented as macros
300  */
301 
302 static __inline void
303 vm_page_flag_set(vm_page_t m, unsigned int bits)
304 {
305 	atomic_set_short(&(m)->flags, bits);
306 }
307 
308 static __inline void
309 vm_page_flag_clear(vm_page_t m, unsigned int bits)
310 {
311 	atomic_clear_short(&(m)->flags, bits);
312 }
313 
314 #if 0
315 static __inline void
316 vm_page_assert_wait(vm_page_t m, int interruptible)
317 {
318 	vm_page_flag_set(m, PG_WANTED);
319 	assert_wait((int) m, interruptible);
320 }
321 #endif
322 
323 static __inline void
324 vm_page_busy(vm_page_t m)
325 {
326 	KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!"));
327 	vm_page_flag_set(m, PG_BUSY);
328 }
329 
330 /*
331  *	vm_page_flash:
332  *
333  *	wakeup anyone waiting for the page.
334  */
335 
336 static __inline void
337 vm_page_flash(vm_page_t m)
338 {
339 	if (m->flags & PG_WANTED) {
340 		vm_page_flag_clear(m, PG_WANTED);
341 		wakeup(m);
342 	}
343 }
344 
345 /*
346  *	vm_page_wakeup:
347  *
348  *	clear the PG_BUSY flag and wakeup anyone waiting for the
349  *	page.
350  *
351  */
352 
353 static __inline void
354 vm_page_wakeup(vm_page_t m)
355 {
356 	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
357 	vm_page_flag_clear(m, PG_BUSY);
358 	vm_page_flash(m);
359 }
360 
361 /*
362  *
363  *
364  */
365 
366 static __inline void
367 vm_page_io_start(vm_page_t m)
368 {
369 	atomic_add_char(&(m)->busy, 1);
370 }
371 
372 static __inline void
373 vm_page_io_finish(vm_page_t m)
374 {
375 	atomic_subtract_char(&m->busy, 1);
376 	if (m->busy == 0)
377 		vm_page_flash(m);
378 }
379 
380 
381 #if PAGE_SIZE == 4096
382 #define VM_PAGE_BITS_ALL 0xff
383 #endif
384 
385 #if PAGE_SIZE == 8192
386 #define VM_PAGE_BITS_ALL 0xffff
387 #endif
388 
389 #define VM_ALLOC_NORMAL		0
390 #define VM_ALLOC_INTERRUPT	1
391 #define VM_ALLOC_SYSTEM		2
392 #define	VM_ALLOC_ZERO		3
393 #define	VM_ALLOC_RETRY		0x80
394 
395 void vm_page_activate __P((vm_page_t));
396 vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
397 vm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int));
398 void vm_page_cache __P((register vm_page_t));
399 void vm_page_dontneed __P((register vm_page_t));
400 static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
401 static __inline void vm_page_free __P((vm_page_t));
402 static __inline void vm_page_free_zero __P((vm_page_t));
403 void vm_page_deactivate __P((vm_page_t));
404 void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
405 vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
406 void vm_page_remove __P((vm_page_t));
407 void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
408 vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
409 vm_page_t vm_add_new_page __P((vm_offset_t pa));
410 void vm_page_unmanage __P((vm_page_t));
411 void vm_page_unwire __P((vm_page_t, int));
412 void vm_page_wire __P((vm_page_t));
413 void vm_page_unqueue __P((vm_page_t));
414 void vm_page_unqueue_nowakeup __P((vm_page_t));
415 void vm_page_set_validclean __P((vm_page_t, int, int));
416 void vm_page_set_dirty __P((vm_page_t, int, int));
417 void vm_page_clear_dirty __P((vm_page_t, int, int));
418 void vm_page_set_invalid __P((vm_page_t, int, int));
419 static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
420 int vm_page_is_valid __P((vm_page_t, int, int));
421 void vm_page_test_dirty __P((vm_page_t));
422 int vm_page_bits __P((int, int));
423 vm_page_t _vm_page_list_find __P((int, int));
424 #if 0
425 int vm_page_sleep(vm_page_t m, char *msg, char *busy);
426 int vm_page_asleep(vm_page_t m, char *msg, char *busy);
427 #endif
428 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
429 void vm_page_free_toq(vm_page_t m);
430 
431 /*
432  * Keep page from being freed by the page daemon
433  * much of the same effect as wiring, except much lower
434  * overhead and should be used only for *very* temporary
435  * holding ("wiring").
436  */
437 static __inline void
438 vm_page_hold(vm_page_t mem)
439 {
440 	mem->hold_count++;
441 }
442 
443 static __inline void
444 vm_page_unhold(vm_page_t mem)
445 {
446 	--mem->hold_count;
447 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
448 }
449 
450 /*
451  * 	vm_page_protect:
452  *
453  *	Reduce the protection of a page.  This routine never raises the
454  *	protection and therefore can be safely called if the page is already
455  *	at VM_PROT_NONE (it will be a NOP effectively ).
456  */
457 
458 static __inline void
459 vm_page_protect(vm_page_t mem, int prot)
460 {
461 	if (prot == VM_PROT_NONE) {
462 		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
463 			pmap_page_protect(mem, VM_PROT_NONE);
464 			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
465 		}
466 	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
467 		pmap_page_protect(mem, VM_PROT_READ);
468 		vm_page_flag_clear(mem, PG_WRITEABLE);
469 	}
470 }
471 
472 /*
473  *	vm_page_zero_fill:
474  *
475  *	Zero-fill the specified page.
476  *	Written as a standard pagein routine, to
477  *	be used by the zero-fill object.
478  */
479 static __inline boolean_t
480 vm_page_zero_fill(m)
481 	vm_page_t m;
482 {
483 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
484 	return (TRUE);
485 }
486 
487 /*
488  *	vm_page_copy:
489  *
490  *	Copy one page to another
491  */
492 static __inline void
493 vm_page_copy(src_m, dest_m)
494 	vm_page_t src_m;
495 	vm_page_t dest_m;
496 {
497 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
498 	dest_m->valid = VM_PAGE_BITS_ALL;
499 }
500 
501 /*
502  *	vm_page_free:
503  *
504  *	Free a page
505  *
506  *	The clearing of PG_ZERO is a temporary safety until the code can be
507  *	reviewed to determine that PG_ZERO is being properly cleared on
508  *	write faults or maps.  PG_ZERO was previously cleared in
509  *	vm_page_alloc().
510  */
511 static __inline void
512 vm_page_free(m)
513 	vm_page_t m;
514 {
515 	vm_page_flag_clear(m, PG_ZERO);
516 	vm_page_free_toq(m);
517 }
518 
519 /*
520  *	vm_page_free_zero:
521  *
522  *	Free a page to the zerod-pages queue
523  */
524 static __inline void
525 vm_page_free_zero(m)
526 	vm_page_t m;
527 {
528 	vm_page_flag_set(m, PG_ZERO);
529 	vm_page_free_toq(m);
530 }
531 
532 /*
533  *	vm_page_sleep_busy:
534  *
535  *	Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
536  *	m->busy is zero.  Returns TRUE if it had to sleep ( including if
537  *	it almost had to sleep and made temporary spl*() mods), FALSE
538  *	otherwise.
539  *
540  *	This routine assumes that interrupts can only remove the busy
541  *	status from a page, not set the busy status or change it from
542  *	PG_BUSY to m->busy or vise versa (which would create a timing
543  *	window).
544  *
545  *	Note that being an inline, this code will be well optimized.
546  */
547 
548 static __inline int
549 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
550 {
551 	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
552 		int s = splvm();
553 		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
554 			/*
555 			 * Page is busy. Wait and retry.
556 			 */
557 			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
558 			tsleep(m, PVM, msg, 0);
559 		}
560 		splx(s);
561 		return(TRUE);
562 		/* not reached */
563 	}
564 	return(FALSE);
565 }
566 
567 /*
568  *	vm_page_dirty:
569  *
570  *	make page all dirty
571  */
572 
573 static __inline void
574 vm_page_dirty(vm_page_t m)
575 {
576 #if !defined(KLD_MODULE)
577 	KASSERT(m->queue - m->pc != PQ_CACHE, ("vm_page_dirty: page in cache!"));
578 #endif
579 	m->dirty = VM_PAGE_BITS_ALL;
580 }
581 
582 /*
583  *	vm_page_undirty:
584  *
585  *	Set page to not be dirty.  Note: does not clear pmap modify bits
586  */
587 
588 static __inline void
589 vm_page_undirty(vm_page_t m)
590 {
591 	m->dirty = 0;
592 }
593 
594 #if !defined(KLD_MODULE)
595 
596 static __inline vm_page_t
597 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
598 {
599 	vm_page_t m;
600 
601 #if PQ_L2_SIZE > 1
602 	if (prefer_zero) {
603 		m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
604 	} else {
605 		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
606 	}
607 	if (m == NULL)
608 		m = _vm_page_list_find(basequeue, index);
609 #else
610 	if (prefer_zero) {
611 		m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
612 	} else {
613 		m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
614 	}
615 #endif
616 	return(m);
617 }
618 
619 #endif
620 
621 #endif				/* _KERNEL */
622 #endif				/* !_VM_PAGE_ */
623