xref: /freebsd/sys/vm/vm_page.h (revision 230f8c40e55e3462e90151e30f61bd0fdd4dcda3)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id$
65  */
66 
67 /*
68  *	Resident memory system definitions.
69  */
70 
71 #ifndef	_VM_PAGE_
72 #define	_VM_PAGE_
73 
74 #include <vm/pmap.h>
75 /*
76  *	Management of resident (logical) pages.
77  *
78  *	A small structure is kept for each resident
79  *	page, indexed by page number.  Each structure
80  *	is an element of several lists:
81  *
82  *		A hash table bucket used to quickly
83  *		perform object/offset lookups
84  *
85  *		A list of all pages for a given object,
86  *		so they can be quickly deactivated at
87  *		time of deallocation.
88  *
89  *		An ordered list of pages due for pageout.
90  *
91  *	In addition, the structure contains the object
92  *	and offset to which this page belongs (for pageout),
93  *	and sundry status bits.
94  *
95  *	Fields in this structure are locked either by the lock on the
96  *	object that the page belongs to (O) or by the lock on the page
97  *	queues (P).
98  */
99 
100 TAILQ_HEAD(pglist, vm_page);
101 
102 struct vm_page {
103 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
104 	TAILQ_ENTRY(vm_page) hashq;	/* hash table links (O) */
105 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
106 
107 	vm_object_t object;		/* which object am I in (O,P) */
108 	vm_pindex_t pindex;		/* offset into object (O,P) */
109 	vm_offset_t phys_addr;		/* physical address of page */
110 	u_short	queue;			/* page queue index */
111 	u_short	flags,			/* see below */
112 		pc;			/* page color */
113 	u_short wire_count;		/* wired down maps refs (P) */
114 	short hold_count;		/* page hold count */
115 	u_char	act_count;		/* page usage count */
116 	u_char	busy;			/* page busy count */
117 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
118 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
119 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
120 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
121 };
122 
123 /*
124  * Page coloring parameters
125  */
126 /* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */
127 
128 /* Define one of the following */
129 #if defined(PQ_LARGECACHE)
130 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
131 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
132 #define PQ_PRIME3 17	/* Prime number somewhat less than PQ_HASH_SIZE */
133 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
134 #define PQ_L1_SIZE 2	/* Two page L1 cache */
135 #endif
136 
137 
138 /*
139  * Use 'options PQ_NOOPT' to disable page coloring
140  */
141 #if defined(PQ_NOOPT)
142 #define PQ_PRIME1 1
143 #define PQ_PRIME2 1
144 #define PQ_PRIME3 1
145 #define PQ_L2_SIZE 1
146 #define PQ_L1_SIZE 1
147 #endif
148 
149 #if defined(PQ_NORMALCACHE)
150 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
151 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
152 #define PQ_PRIME3 11	/* Prime number somewhat less than PQ_HASH_SIZE */
153 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
154 #define PQ_L1_SIZE 2	/* Two page L1 cache */
155 #endif
156 
157 #if defined(PQ_MEDIUMCACHE) || !defined(PQ_L2_SIZE)
158 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
159 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
160 #define PQ_PRIME3 5	/* Prime number somewhat less than PQ_HASH_SIZE */
161 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
162 #define PQ_L1_SIZE 2	/* Two page L1 cache */
163 #endif
164 
165 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
166 
167 #define PQ_NONE 0
168 #define PQ_FREE	1
169 #define PQ_ZERO (1 + PQ_L2_SIZE)
170 #define PQ_INACTIVE (1 + 2*PQ_L2_SIZE)
171 #define PQ_ACTIVE (2 + 2*PQ_L2_SIZE)
172 #define PQ_CACHE (3 + 2*PQ_L2_SIZE)
173 #define PQ_COUNT (3 + 3*PQ_L2_SIZE)
174 
175 extern struct vpgqueues {
176 	struct pglist *pl;
177 	int	*cnt;
178 	int	*lcnt;
179 } vm_page_queues[PQ_COUNT];
180 
181 /*
182  * These are the flags defined for vm_page.
183  *
184  * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
185  */
186 #define	PG_BUSY		0x01		/* page is in transit (O) */
187 #define	PG_WANTED	0x02		/* someone is waiting for page (O) */
188 #define	PG_TABLED	0x04		/* page is in VP table (O) */
189 #define	PG_FICTITIOUS	0x08		/* physical page doesn't exist (O) */
190 #define	PG_WRITEABLE	0x10		/* page is mapped writeable */
191 #define PG_MAPPED	0x20		/* page is mapped */
192 #define	PG_ZERO		0x40		/* page is zeroed */
193 #define PG_REFERENCED	0x80		/* page has been referenced */
194 #define PG_CLEANCHK	0x100		/* page has been checked for cleaning */
195 
196 /*
197  * Misc constants.
198  */
199 
200 #define ACT_DECLINE		1
201 #define ACT_ADVANCE		3
202 #define ACT_INIT		5
203 #define ACT_MAX			64
204 #define PFCLUSTER_BEHIND	3
205 #define PFCLUSTER_AHEAD		3
206 
207 #ifdef KERNEL
208 /*
209  * Each pageable resident page falls into one of four lists:
210  *
211  *	free
212  *		Available for allocation now.
213  *
214  * The following are all LRU sorted:
215  *
216  *	cache
217  *		Almost available for allocation. Still in an
218  *		object, but clean and immediately freeable at
219  *		non-interrupt times.
220  *
221  *	inactive
222  *		Low activity, candidates for reclamation.
223  *		This is the list of pages that should be
224  *		paged out next.
225  *
226  *	active
227  *		Pages that are "active" i.e. they have been
228  *		recently referenced.
229  *
230  *	zero
231  *		Pages that are really free and have been pre-zeroed
232  *
233  */
234 
235 extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */
236 extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */
237 extern struct pglist vm_page_queue_active;	/* active memory queue */
238 extern struct pglist vm_page_queue_inactive;	/* inactive memory queue */
239 extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */
240 
241 extern int vm_page_zero_count;
242 
243 extern vm_page_t vm_page_array;		/* First resident page in table */
244 extern long first_page;			/* first physical page number */
245 
246  /* ... represented in vm_page_array */
247 extern long last_page;			/* last physical page number */
248 
249  /* ... represented in vm_page_array */
250  /* [INCLUSIVE] */
251 extern vm_offset_t first_phys_addr;	/* physical address for first_page */
252 extern vm_offset_t last_phys_addr;	/* physical address for last_page */
253 
254 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
255 
256 #define IS_VM_PHYSADDR(pa) \
257 		((pa) >= first_phys_addr && (pa) <= last_phys_addr)
258 
259 #define PHYS_TO_VM_PAGE(pa) \
260 		(&vm_page_array[atop(pa) - first_page ])
261 
262 /*
263  *	Functions implemented as macros
264  */
265 
266 #define PAGE_ASSERT_WAIT(m, interruptible)	{ \
267 				(m)->flags |= PG_WANTED; \
268 				assert_wait((int) (m), (interruptible)); \
269 			}
270 
271 #define PAGE_WAKEUP(m)	{ \
272 				(m)->flags &= ~PG_BUSY; \
273 				if ((m)->flags & PG_WANTED) { \
274 					(m)->flags &= ~PG_WANTED; \
275 					wakeup((caddr_t) (m)); \
276 				} \
277 			}
278 
279 #if PAGE_SIZE == 4096
280 #define VM_PAGE_BITS_ALL 0xff
281 #endif
282 
283 #if PAGE_SIZE == 8192
284 #define VM_PAGE_BITS_ALL 0xffff
285 #endif
286 
287 #define VM_ALLOC_NORMAL 0
288 #define VM_ALLOC_INTERRUPT 1
289 #define VM_ALLOC_SYSTEM 2
290 #define	VM_ALLOC_ZERO	3
291 
292 void vm_page_activate __P((vm_page_t));
293 vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
294 void vm_page_cache __P((register vm_page_t));
295 static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
296 void vm_page_deactivate __P((vm_page_t));
297 void vm_page_free __P((vm_page_t));
298 void vm_page_free_zero __P((vm_page_t));
299 void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
300 vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
301 void vm_page_remove __P((vm_page_t));
302 void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
303 vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
304 void vm_page_unwire __P((vm_page_t));
305 void vm_page_wire __P((vm_page_t));
306 void vm_page_unqueue __P((vm_page_t));
307 void vm_page_unqueue_nowakeup __P((vm_page_t));
308 void vm_page_set_validclean __P((vm_page_t, int, int));
309 void vm_page_set_invalid __P((vm_page_t, int, int));
310 static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
311 int vm_page_is_valid __P((vm_page_t, int, int));
312 void vm_page_test_dirty __P((vm_page_t));
313 int vm_page_bits __P((int, int));
314 vm_page_t vm_page_list_find __P((int, int));
315 int vm_page_queue_index __P((vm_offset_t, int));
316 vm_page_t vm_page_select __P((vm_object_t, vm_pindex_t, int));
317 
318 /*
319  * Keep page from being freed by the page daemon
320  * much of the same effect as wiring, except much lower
321  * overhead and should be used only for *very* temporary
322  * holding ("wiring").
323  */
324 static __inline void
325 vm_page_hold(vm_page_t mem)
326 {
327 	mem->hold_count++;
328 }
329 
330 #ifdef DIAGNOSTIC
331 #include <sys/systm.h>		/* make GCC shut up */
332 #endif
333 
334 static __inline void
335 vm_page_unhold(vm_page_t mem)
336 {
337 #ifdef DIAGNOSTIC
338 	if (--mem->hold_count < 0)
339 		panic("vm_page_unhold: hold count < 0!!!");
340 #else
341 	--mem->hold_count;
342 #endif
343 }
344 
345 static __inline void
346 vm_page_protect(vm_page_t mem, int prot)
347 {
348 	if (prot == VM_PROT_NONE) {
349 		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
350 			pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
351 			mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
352 		}
353 	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
354 		pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
355 		mem->flags &= ~PG_WRITEABLE;
356 	}
357 }
358 
359 /*
360  *	vm_page_zero_fill:
361  *
362  *	Zero-fill the specified page.
363  *	Written as a standard pagein routine, to
364  *	be used by the zero-fill object.
365  */
366 static __inline boolean_t
367 vm_page_zero_fill(m)
368 	vm_page_t m;
369 {
370 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
371 	return (TRUE);
372 }
373 
374 /*
375  *	vm_page_copy:
376  *
377  *	Copy one page to another
378  */
379 static __inline void
380 vm_page_copy(src_m, dest_m)
381 	vm_page_t src_m;
382 	vm_page_t dest_m;
383 {
384 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
385 	dest_m->valid = VM_PAGE_BITS_ALL;
386 }
387 
388 #endif				/* KERNEL */
389 #endif				/* !_VM_PAGE_ */
390