xref: /freebsd/sys/vm/vm_page.h (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD$
61  */
62 
63 /*
64  *	Resident memory system definitions.
65  */
66 
67 #ifndef	_VM_PAGE_
68 #define	_VM_PAGE_
69 
70 #include <vm/pmap.h>
71 
72 /*
73  *	Management of resident (logical) pages.
74  *
75  *	A small structure is kept for each resident
76  *	page, indexed by page number.  Each structure
77  *	is an element of several lists:
78  *
79  *		A hash table bucket used to quickly
80  *		perform object/offset lookups
81  *
82  *		A list of all pages for a given object,
83  *		so they can be quickly deactivated at
84  *		time of deallocation.
85  *
86  *		An ordered list of pages due for pageout.
87  *
88  *	In addition, the structure contains the object
89  *	and offset to which this page belongs (for pageout),
90  *	and sundry status bits.
91  *
92  *	In general, operations on this structure's mutable fields are
93  *	synchronized using either one of or a combination of the lock on the
94  *	object that the page belongs to (O), the pool lock for the page (P),
95  *	or the lock for either the free or paging queues (Q).  If a field is
96  *	annotated below with two of these locks, then holding either lock is
97  *	sufficient for read access, but both locks are required for write
98  *	access.
99  *
100  *	In contrast, the synchronization of accesses to the page's
101  *	dirty field is machine dependent (M).  In the
102  *	machine-independent layer, the lock on the object that the
103  *	page belongs to must be held in order to operate on the field.
104  *	However, the pmap layer is permitted to set all bits within
105  *	the field without holding that lock.  If the underlying
106  *	architecture does not support atomic read-modify-write
107  *	operations on the field's type, then the machine-independent
108  *	layer uses a 32-bit atomic on the aligned 32-bit word that
109  *	contains the dirty field.  In the machine-independent layer,
110  *	the implementation of read-modify-write operations on the
111  *	field is encapsulated in vm_page_clear_dirty_mask().
112  */
113 
114 TAILQ_HEAD(pglist, vm_page);
115 
116 #if PAGE_SIZE == 4096
117 #define VM_PAGE_BITS_ALL 0xffu
118 typedef uint8_t vm_page_bits_t;
119 #elif PAGE_SIZE == 8192
120 #define VM_PAGE_BITS_ALL 0xffffu
121 typedef uint16_t vm_page_bits_t;
122 #elif PAGE_SIZE == 16384
123 #define VM_PAGE_BITS_ALL 0xffffffffu
124 typedef uint32_t vm_page_bits_t;
125 #elif PAGE_SIZE == 32768
126 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
127 typedef uint64_t vm_page_bits_t;
128 #endif
129 
130 struct vm_page {
131 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (Q) */
132 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
133 	struct vm_page *left;		/* splay tree link (O)		*/
134 	struct vm_page *right;		/* splay tree link (O)		*/
135 
136 	vm_object_t object;		/* which object am I in (O,P)*/
137 	vm_pindex_t pindex;		/* offset into object (O,P) */
138 	vm_paddr_t phys_addr;		/* physical address of page */
139 	struct md_page md;		/* machine dependant stuff */
140 	uint8_t	queue;			/* page queue index (P,Q) */
141 	int8_t segind;
142 	short hold_count;		/* page hold count (P) */
143 	uint8_t	order;			/* index of the buddy queue */
144 	uint8_t pool;
145 	u_short cow;			/* page cow mapping count (P) */
146 	u_int wire_count;		/* wired down maps refs (P) */
147 	uint8_t aflags;			/* access is atomic */
148 	uint8_t flags;			/* see below, often immutable after alloc */
149 	u_short oflags;			/* page flags (O) */
150 	u_char	act_count;		/* page usage count (O) */
151 	u_char	busy;			/* page busy count (O) */
152 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
153 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
154 	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
155 	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
156 };
157 
158 /*
159  * Page flags stored in oflags:
160  *
161  * Access to these page flags is synchronized by the lock on the object
162  * containing the page (O).
163  *
164  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
165  * 	 indicates that the page is not under PV management but
166  * 	 otherwise should be treated as a normal page.  Pages not
167  * 	 under PV management cannot be paged out via the
168  * 	 object/vm_page_t because there is no knowledge of their pte
169  * 	 mappings, and such pages are also not on any PQ queue.
170  *
171  */
172 #define	VPO_BUSY	0x0001	/* page is in transit */
173 #define	VPO_WANTED	0x0002	/* someone is waiting for page */
174 #define	VPO_UNMANAGED	0x0004		/* No PV management for page */
175 #define	VPO_SWAPINPROG	0x0200	/* swap I/O in progress on page */
176 #define	VPO_NOSYNC	0x0400	/* do not collect for syncer */
177 
178 #define	PQ_NONE		255
179 #define	PQ_INACTIVE	0
180 #define	PQ_ACTIVE	1
181 #define	PQ_HOLD		2
182 #define	PQ_COUNT	3
183 
184 struct vpgqueues {
185 	struct pglist pl;
186 	int	*cnt;
187 };
188 
189 extern struct vpgqueues vm_page_queues[PQ_COUNT];
190 
191 struct vpglocks {
192 	struct mtx	data;
193 	char		pad[CACHE_LINE_SIZE - sizeof(struct mtx)];
194 } __aligned(CACHE_LINE_SIZE);
195 
196 extern struct vpglocks vm_page_queue_free_lock;
197 extern struct vpglocks pa_lock[];
198 
199 #if defined(__arm__)
200 #define	PDRSHIFT	PDR_SHIFT
201 #elif !defined(PDRSHIFT)
202 #define PDRSHIFT	21
203 #endif
204 
205 #define	pa_index(pa)	((pa) >> PDRSHIFT)
206 #define	PA_LOCKPTR(pa)	&pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data
207 #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
208 #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
209 #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
210 #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
211 #define	PA_UNLOCK_COND(pa) 			\
212 	do {		   			\
213 		if ((pa) != 0) {		\
214 			PA_UNLOCK((pa));	\
215 			(pa) = 0;		\
216 		}				\
217 	} while (0)
218 
219 #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
220 
221 #ifdef KLD_MODULE
222 #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
223 #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
224 #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
225 #if defined(INVARIANTS)
226 #define	vm_page_lock_assert(m, a)		\
227     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
228 #else
229 #define	vm_page_lock_assert(m, a)
230 #endif
231 #else	/* !KLD_MODULE */
232 #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
233 #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
234 #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
235 #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
236 #define	vm_page_lock_assert(m, a)	mtx_assert(vm_page_lockptr((m)), (a))
237 #endif
238 
239 #define	vm_page_queue_free_mtx	vm_page_queue_free_lock.data
240 /*
241  * These are the flags defined for vm_page.
242  *
243  * aflags are updated by atomic accesses. Use the vm_page_aflag_set()
244  * and vm_page_aflag_clear() functions to set and clear the flags.
245  *
246  * PGA_REFERENCED may be cleared only if the object containing the page is
247  * locked.
248  *
249  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().  When it
250  * does so, the page must be VPO_BUSY.
251  */
252 #define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
253 #define	PGA_REFERENCED	0x02		/* page has been referenced */
254 
255 /*
256  * Page flags.  If changed at any other time than page allocation or
257  * freeing, the modification must be protected by the vm_page lock.
258  */
259 #define	PG_CACHED	0x01		/* page is cached */
260 #define	PG_FREE		0x02		/* page is free */
261 #define	PG_FICTITIOUS	0x04		/* physical page doesn't exist (O) */
262 #define	PG_ZERO		0x08		/* page is zeroed */
263 #define	PG_MARKER	0x10		/* special queue marker page */
264 #define	PG_SLAB		0x20		/* object pointer is actually a slab */
265 #define	PG_WINATCFLS	0x40		/* flush dirty page on inactive q */
266 
267 /*
268  * Misc constants.
269  */
270 #define ACT_DECLINE		1
271 #define ACT_ADVANCE		3
272 #define ACT_INIT		5
273 #define ACT_MAX			64
274 
275 #ifdef _KERNEL
276 
277 #include <vm/vm_param.h>
278 
279 /*
280  * Each pageable resident page falls into one of five lists:
281  *
282  *	free
283  *		Available for allocation now.
284  *
285  *	cache
286  *		Almost available for allocation. Still associated with
287  *		an object, but clean and immediately freeable.
288  *
289  *	hold
290  *		Will become free after a pending I/O operation
291  *		completes.
292  *
293  * The following lists are LRU sorted:
294  *
295  *	inactive
296  *		Low activity, candidates for reclamation.
297  *		This is the list of pages that should be
298  *		paged out next.
299  *
300  *	active
301  *		Pages that are "active" i.e. they have been
302  *		recently referenced.
303  *
304  */
305 
306 struct vnode;
307 extern int vm_page_zero_count;
308 
309 extern vm_page_t vm_page_array;		/* First resident page in table */
310 extern int vm_page_array_size;		/* number of vm_page_t's */
311 extern long first_page;			/* first physical page number */
312 
313 #define	VM_PAGE_IS_FREE(m)	(((m)->flags & PG_FREE) != 0)
314 
315 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
316 
317 vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
318 
319 static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
320 
321 static __inline vm_page_t
322 PHYS_TO_VM_PAGE(vm_paddr_t pa)
323 {
324 #ifdef VM_PHYSSEG_SPARSE
325 	return (vm_phys_paddr_to_vm_page(pa));
326 #elif defined(VM_PHYSSEG_DENSE)
327 	return (&vm_page_array[atop(pa) - first_page]);
328 #else
329 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
330 #endif
331 }
332 
333 extern struct vpglocks vm_page_queue_lock;
334 
335 #define	vm_page_queue_mtx	vm_page_queue_lock.data
336 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
337 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
338 
339 /* page allocation classes: */
340 #define VM_ALLOC_NORMAL		0
341 #define VM_ALLOC_INTERRUPT	1
342 #define VM_ALLOC_SYSTEM		2
343 #define	VM_ALLOC_CLASS_MASK	3
344 /* page allocation flags: */
345 #define	VM_ALLOC_WIRED		0x0020	/* non pageable */
346 #define	VM_ALLOC_ZERO		0x0040	/* Try to obtain a zeroed page */
347 #define	VM_ALLOC_RETRY		0x0080	/* Mandatory with vm_page_grab() */
348 #define	VM_ALLOC_NOOBJ		0x0100	/* No associated object */
349 #define	VM_ALLOC_NOBUSY		0x0200	/* Do not busy the page */
350 #define	VM_ALLOC_IFCACHED	0x0400	/* Fail if the page is not cached */
351 #define	VM_ALLOC_IFNOTCACHED	0x0800	/* Fail if the page is cached */
352 #define	VM_ALLOC_IGN_SBUSY	0x1000	/* vm_page_grab() only */
353 
354 #define	VM_ALLOC_COUNT_SHIFT	16
355 #define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
356 
357 void vm_page_aflag_set(vm_page_t m, uint8_t bits);
358 void vm_page_aflag_clear(vm_page_t m, uint8_t bits);
359 void vm_page_busy(vm_page_t m);
360 void vm_page_flash(vm_page_t m);
361 void vm_page_io_start(vm_page_t m);
362 void vm_page_io_finish(vm_page_t m);
363 void vm_page_hold(vm_page_t mem);
364 void vm_page_unhold(vm_page_t mem);
365 void vm_page_free(vm_page_t m);
366 void vm_page_free_zero(vm_page_t m);
367 void vm_page_dirty(vm_page_t m);
368 void vm_page_wakeup(vm_page_t m);
369 
370 void vm_pageq_remove(vm_page_t m);
371 
372 void vm_page_activate (vm_page_t);
373 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
374 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
375     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
376     vm_paddr_t boundary, vm_memattr_t memattr);
377 vm_page_t vm_page_alloc_freelist(int, int);
378 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
379 void vm_page_cache(vm_page_t);
380 void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
381 void vm_page_cache_remove(vm_page_t);
382 void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
383 int vm_page_try_to_cache (vm_page_t);
384 int vm_page_try_to_free (vm_page_t);
385 void vm_page_dontneed(vm_page_t);
386 void vm_page_deactivate (vm_page_t);
387 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
388 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
389 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
390 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
391 vm_page_t vm_page_next(vm_page_t m);
392 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
393 vm_page_t vm_page_prev(vm_page_t m);
394 void vm_page_putfake(vm_page_t m);
395 void vm_page_reference(vm_page_t m);
396 void vm_page_remove (vm_page_t);
397 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
398 void vm_page_requeue(vm_page_t m);
399 void vm_page_set_valid_range(vm_page_t m, int base, int size);
400 void vm_page_sleep(vm_page_t m, const char *msg);
401 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
402 vm_offset_t vm_page_startup(vm_offset_t vaddr);
403 void vm_page_unhold_pages(vm_page_t *ma, int count);
404 void vm_page_unwire (vm_page_t, int);
405 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
406 void vm_page_wire (vm_page_t);
407 void vm_page_set_validclean (vm_page_t, int, int);
408 void vm_page_clear_dirty (vm_page_t, int, int);
409 void vm_page_set_invalid (vm_page_t, int, int);
410 int vm_page_is_valid (vm_page_t, int, int);
411 void vm_page_test_dirty (vm_page_t);
412 vm_page_bits_t vm_page_bits(int base, int size);
413 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
414 void vm_page_free_toq(vm_page_t m);
415 void vm_page_zero_idle_wakeup(void);
416 void vm_page_cowfault (vm_page_t);
417 int vm_page_cowsetup(vm_page_t);
418 void vm_page_cowclear (vm_page_t);
419 
420 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
421 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
422 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
423 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
424 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
425 #endif
426 
427 #ifdef INVARIANTS
428 void vm_page_object_lock_assert(vm_page_t m);
429 #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)
430 #else
431 #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	(void)0
432 #endif
433 
434 /*
435  *	vm_page_sleep_if_busy:
436  *
437  *	Sleep and release the page queues lock if VPO_BUSY is set or,
438  *	if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
439  *	thread slept and the page queues lock was released.
440  *	Otherwise, retains the page queues lock and returns FALSE.
441  *
442  *	The object containing the given page must be locked.
443  */
444 static __inline int
445 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
446 {
447 
448 	if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
449 		vm_page_sleep(m, msg);
450 		return (TRUE);
451 	}
452 	return (FALSE);
453 }
454 
455 /*
456  *	vm_page_undirty:
457  *
458  *	Set page to not be dirty.  Note: does not clear pmap modify bits
459  */
460 static __inline void
461 vm_page_undirty(vm_page_t m)
462 {
463 
464 	VM_PAGE_OBJECT_LOCK_ASSERT(m);
465 	m->dirty = 0;
466 }
467 
468 #endif				/* _KERNEL */
469 #endif				/* !_VM_PAGE_ */
470