xref: /freebsd/sys/vm/vm_page.h (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Resident memory system definitions.
69  */
70 
71 #ifndef	_VM_PAGE_
72 #define	_VM_PAGE_
73 
74 #if !defined(KLD_MODULE)
75 #include "opt_vmpage.h"
76 #endif
77 
78 #include <vm/pmap.h>
79 
80 /*
81  *	Management of resident (logical) pages.
82  *
83  *	A small structure is kept for each resident
84  *	page, indexed by page number.  Each structure
85  *	is an element of several lists:
86  *
87  *		A hash table bucket used to quickly
88  *		perform object/offset lookups
89  *
90  *		A list of all pages for a given object,
91  *		so they can be quickly deactivated at
92  *		time of deallocation.
93  *
94  *		An ordered list of pages due for pageout.
95  *
96  *	In addition, the structure contains the object
97  *	and offset to which this page belongs (for pageout),
98  *	and sundry status bits.
99  *
100  *	Fields in this structure are locked either by the lock on the
101  *	object that the page belongs to (O) or by the lock on the page
102  *	queues (P).
103  *
104  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
105  *	bits set without having associated valid bits set.  This is used by
106  *	NFS to implement piecemeal writes.
107  */
108 
109 TAILQ_HEAD(pglist, vm_page);
110 
111 struct vm_page {
112 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
113 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
114 	struct vm_page *left;		/* splay tree link (O)		*/
115 	struct vm_page *right;		/* splay tree link (O)		*/
116 
117 	vm_object_t object;		/* which object am I in (O,P)*/
118 	vm_pindex_t pindex;		/* offset into object (O,P) */
119 	vm_paddr_t phys_addr;	/* physical address of page */
120 	struct md_page md;		/* machine dependant stuff */
121 	u_short	queue;			/* page queue index */
122 	u_short	flags,			/* see below */
123 		pc;			/* page color */
124 	u_short wire_count;		/* wired down maps refs (P) */
125 	short hold_count;		/* page hold count */
126 	u_char	act_count;		/* page usage count */
127 	u_char	busy;			/* page busy count */
128 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
129 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
130 #if PAGE_SIZE == 4096
131 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
132 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
133 #elif PAGE_SIZE == 8192
134 	u_short	valid;			/* map of valid DEV_BSIZE chunks */
135 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
136 #elif PAGE_SIZE == 16384
137 	u_int valid;			/* map of valid DEV_BSIZE chunks */
138 	u_int dirty;			/* map of dirty DEV_BSIZE chunks */
139 #elif PAGE_SIZE == 32768
140 	u_long valid;			/* map of valid DEV_BSIZE chunks */
141 	u_long dirty;			/* map of dirty DEV_BSIZE chunks */
142 #endif
143 	u_int cow;			/* page cow mapping count */
144 };
145 
146 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
147 #if PAGE_SIZE == 32768
148 #ifdef CTASSERT
149 CTASSERT(sizeof(u_long) >= 8);
150 #endif
151 #endif
152 
153 /*
154  * note: currently use SWAPBLK_NONE as an absolute value rather then
155  * a flag bit.
156  */
157 
158 #define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
159 #define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
160 
161 #if !defined(KLD_MODULE)
162 /*
163  * Page coloring parameters
164  */
165 /* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
166 
167 /* Backward compatibility for existing PQ_*CACHE config options. */
168 #if !defined(PQ_CACHESIZE)
169 #if defined(PQ_HUGECACHE)
170 #define PQ_CACHESIZE 1024
171 #elif defined(PQ_LARGECACHE)
172 #define PQ_CACHESIZE 512
173 #elif defined(PQ_MEDIUMCACHE)
174 #define PQ_CACHESIZE 256
175 #elif defined(PQ_NORMALCACHE)
176 #define PQ_CACHESIZE 64
177 #elif defined(PQ_NOOPT)
178 #define PQ_CACHESIZE 0
179 #else
180 #define PQ_CACHESIZE 128
181 #endif
182 #endif			/* !defined(PQ_CACHESIZE) */
183 
184 #if PQ_CACHESIZE >= 1024
185 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
186 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
187 #define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
188 
189 #elif PQ_CACHESIZE >= 512
190 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
191 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
192 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
193 
194 #elif PQ_CACHESIZE >= 256
195 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
196 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
197 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
198 
199 #elif PQ_CACHESIZE >= 128
200 #define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
201 #define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
202 #define PQ_L2_SIZE 32	/* A number of colors opt for 128k cache */
203 
204 #elif PQ_CACHESIZE >= 64
205 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
206 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
207 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
208 
209 #else
210 #define PQ_PRIME1 1	/* Disable page coloring. */
211 #define PQ_PRIME2 1
212 #define PQ_L2_SIZE 1
213 
214 #endif
215 
216 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
217 
218 #define PQ_NONE 0
219 #define PQ_FREE	1
220 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
221 #define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
222 #define PQ_CACHE (3 + 1*PQ_L2_SIZE)
223 #define PQ_HOLD  (3 + 2*PQ_L2_SIZE)
224 #define PQ_COUNT (4 + 2*PQ_L2_SIZE)
225 
226 struct vpgqueues {
227 	struct pglist pl;
228 	int	*cnt;
229 	int	lcnt;
230 };
231 
232 extern struct vpgqueues vm_page_queues[PQ_COUNT];
233 extern struct mtx vm_page_queue_free_mtx;
234 
235 #endif			/* !defined(KLD_MODULE) */
236 
237 /*
238  * These are the flags defined for vm_page.
239  *
240  * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
241  *
242  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
243  * 	 not under PV management but otherwise should be treated as a
244  *	 normal page.  Pages not under PV management cannot be paged out
245  *	 via the object/vm_page_t because there is no knowledge of their
246  *	 pte mappings, nor can they be removed from their objects via
247  *	 the object, and such pages are also not on any PQ queue.
248  */
249 #define	PG_BUSY		0x0001		/* page is in transit (O) */
250 #define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
251 #define PG_WINATCFLS	0x0004		/* flush dirty page on inactive q */
252 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
253 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
254 #define	PG_ZERO		0x0040		/* page is zeroed */
255 #define PG_REFERENCED	0x0080		/* page has been referenced */
256 #define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
257 #define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
258 #define PG_NOSYNC	0x0400		/* do not collect for syncer */
259 #define PG_UNMANAGED	0x0800		/* No PV management for page */
260 #define PG_MARKER	0x1000		/* special queue marker page */
261 #define	PG_SLAB		0x2000		/* object pointer is actually a slab */
262 
263 /*
264  * Misc constants.
265  */
266 #define ACT_DECLINE		1
267 #define ACT_ADVANCE		3
268 #define ACT_INIT		5
269 #define ACT_MAX			64
270 #define PFCLUSTER_BEHIND	3
271 #define PFCLUSTER_AHEAD		3
272 
273 #ifdef _KERNEL
274 /*
275  * Each pageable resident page falls into one of four lists:
276  *
277  *	free
278  *		Available for allocation now.
279  *
280  * The following are all LRU sorted:
281  *
282  *	cache
283  *		Almost available for allocation. Still in an
284  *		object, but clean and immediately freeable at
285  *		non-interrupt times.
286  *
287  *	inactive
288  *		Low activity, candidates for reclamation.
289  *		This is the list of pages that should be
290  *		paged out next.
291  *
292  *	active
293  *		Pages that are "active" i.e. they have been
294  *		recently referenced.
295  *
296  *	zero
297  *		Pages that are really free and have been pre-zeroed
298  *
299  */
300 
301 extern int vm_page_zero_count;
302 
303 extern vm_page_t vm_page_array;		/* First resident page in table */
304 extern int vm_page_array_size;		/* number of vm_page_t's */
305 extern long first_page;			/* first physical page number */
306 
307 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
308 
309 #define PHYS_TO_VM_PAGE(pa) \
310 		(&vm_page_array[atop(pa) - first_page ])
311 
312 extern struct mtx vm_page_queue_mtx;
313 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
314 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
315 
316 #if PAGE_SIZE == 4096
317 #define VM_PAGE_BITS_ALL 0xffu
318 #elif PAGE_SIZE == 8192
319 #define VM_PAGE_BITS_ALL 0xffffu
320 #elif PAGE_SIZE == 16384
321 #define VM_PAGE_BITS_ALL 0xffffffffu
322 #elif PAGE_SIZE == 32768
323 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
324 #endif
325 
326 /* page allocation classes: */
327 #define VM_ALLOC_NORMAL		0
328 #define VM_ALLOC_INTERRUPT	1
329 #define VM_ALLOC_SYSTEM		2
330 #define	VM_ALLOC_CLASS_MASK	3
331 /* page allocation flags: */
332 #define	VM_ALLOC_WIRED		0x0020	/* non pageable */
333 #define	VM_ALLOC_ZERO		0x0040	/* Try to obtain a zeroed page */
334 #define	VM_ALLOC_RETRY		0x0080	/* vm_page_grab() only */
335 #define	VM_ALLOC_NOOBJ		0x0100	/* No associated object */
336 
337 void vm_page_flag_set(vm_page_t m, unsigned short bits);
338 void vm_page_flag_clear(vm_page_t m, unsigned short bits);
339 void vm_page_busy(vm_page_t m);
340 void vm_page_flash(vm_page_t m);
341 void vm_page_io_start(vm_page_t m);
342 void vm_page_io_finish(vm_page_t m);
343 void vm_page_hold(vm_page_t mem);
344 void vm_page_unhold(vm_page_t mem);
345 void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
346 void vm_page_free(vm_page_t m);
347 void vm_page_free_zero(vm_page_t m);
348 int vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg);
349 void vm_page_dirty(vm_page_t m);
350 void vm_page_wakeup(vm_page_t m);
351 
352 void vm_pageq_init(void);
353 vm_page_t vm_pageq_add_new_page(vm_paddr_t pa);
354 void vm_pageq_enqueue(int queue, vm_page_t m);
355 void vm_pageq_remove_nowakeup(vm_page_t m);
356 void vm_pageq_remove(vm_page_t m);
357 vm_page_t vm_pageq_find(int basequeue, int index, boolean_t prefer_zero);
358 void vm_pageq_requeue(vm_page_t m);
359 
360 void vm_page_activate (vm_page_t);
361 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
362 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
363 void vm_page_cache (register vm_page_t);
364 int vm_page_try_to_cache (vm_page_t);
365 int vm_page_try_to_free (vm_page_t);
366 void vm_page_dontneed (register vm_page_t);
367 void vm_page_deactivate (vm_page_t);
368 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
369 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
370 void vm_page_remove (vm_page_t);
371 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
372 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
373 vm_offset_t vm_page_startup (vm_offset_t, vm_offset_t, vm_offset_t);
374 void vm_page_unmanage (vm_page_t);
375 void vm_page_unwire (vm_page_t, int);
376 void vm_page_wire (vm_page_t);
377 void vm_page_set_validclean (vm_page_t, int, int);
378 void vm_page_set_dirty (vm_page_t, int, int);
379 void vm_page_clear_dirty (vm_page_t, int, int);
380 void vm_page_set_invalid (vm_page_t, int, int);
381 int vm_page_is_valid (vm_page_t, int, int);
382 void vm_page_test_dirty (vm_page_t);
383 int vm_page_bits (int, int);
384 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
385 void vm_page_free_toq(vm_page_t m);
386 void vm_page_zero_idle_wakeup(void);
387 void vm_page_cowfault (vm_page_t);
388 void vm_page_cowsetup (vm_page_t);
389 void vm_page_cowclear (vm_page_t);
390 
391 /*
392  *	vm_page_undirty:
393  *
394  *	Set page to not be dirty.  Note: does not clear pmap modify bits
395  */
396 static __inline void
397 vm_page_undirty(vm_page_t m)
398 {
399 	m->dirty = 0;
400 }
401 
402 #endif				/* _KERNEL */
403 #endif				/* !_VM_PAGE_ */
404