xref: /titanic_51/usr/src/uts/common/vm/page.h (revision 1a7c1b724419d3cb5fa6eea75123c6b2060ba31b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 /*
31  * University Copyright- Copyright (c) 1982, 1986, 1988
32  * The Regents of the University of California
33  * All Rights Reserved
34  *
35  * University Acknowledgment- Portions of this document are derived from
36  * software developed by the University of California, Berkeley, and its
37  * contributors.
38  */
39 
40 #ifndef	_VM_PAGE_H
41 #define	_VM_PAGE_H
42 
43 #pragma ident	"%Z%%M%	%I%	%E% SMI"
44 
45 #include <vm/seg.h>
46 
47 #ifdef	__cplusplus
48 extern "C" {
49 #endif
50 
51 #if defined(_KERNEL) || defined(_KMEMUSER)
52 
53 /*
54  * Shared/Exclusive lock.
55  */
56 
57 /*
58  * Types of page locking supported by page_lock & friends.
59  */
60 typedef enum {
61 	SE_SHARED,
62 	SE_EXCL			/* exclusive lock (value == -1) */
63 } se_t;
64 
65 /*
66  * For requesting that page_lock reclaim the page from the free list.
67  */
68 typedef enum {
69 	P_RECLAIM,		/* reclaim page from free list */
70 	P_NO_RECLAIM		/* DON`T reclaim the page	*/
71 } reclaim_t;
72 
73 /*
74  * Callers of page_try_reclaim_lock and page_lock_es can use this flag
75  * to get SE_EXCL access before reader/writers are given access.
76  */
77 #define	SE_EXCL_WANTED	0x02
78 
79 #endif	/* _KERNEL | _KMEMUSER */
80 
81 typedef int	selock_t;
82 
83 /*
84  * Define VM_STATS to turn on all sorts of statistic gathering about
85  * the VM layer.  By default, it is only turned on when DEBUG is
86  * also defined.
87  */
88 #ifdef DEBUG
89 #define	VM_STATS
90 #endif	/* DEBUG */
91 
92 #ifdef VM_STATS
93 #define	VM_STAT_ADD(stat)			(stat)++
94 #define	VM_STAT_COND_ADD(cond, stat)		((void) (!(cond) || (stat)++))
95 #else
96 #define	VM_STAT_ADD(stat)
97 #define	VM_STAT_COND_ADD(cond, stat)
98 #endif	/* VM_STATS */
99 
100 #ifdef _KERNEL
101 
102 /*
103  * Macros to acquire and release the page logical lock.
104  */
105 #define	page_struct_lock(pp)	mutex_enter(&page_llock)
106 #define	page_struct_unlock(pp)	mutex_exit(&page_llock)
107 
108 #endif	/* _KERNEL */
109 
110 #include <sys/t_lock.h>
111 
112 struct as;
113 
114 /*
115  * Each physical page has a page structure, which is used to maintain
116  * these pages as a cache.  A page can be found via a hashed lookup
117  * based on the [vp, offset].  If a page has an [vp, offset] identity,
118  * then it is entered on a doubly linked circular list off the
119  * vnode using the vpnext/vpprev pointers.   If the p_free bit
120  * is on, then the page is also on a doubly linked circular free
121  * list using next/prev pointers.  If the "p_selock" and "p_iolock"
122  * are held, then the page is currently being read in (exclusive p_selock)
123  * or written back (shared p_selock).  In this case, the next/prev pointers
124  * are used to link the pages together for a consecutive i/o request.  If
125  * the page is being brought in from its backing store, then other processes
126  * will wait for the i/o to complete before attaching to the page since it
127  * will have an "exclusive" lock.
128  *
129  * Each page structure has the locks described below along with
130  * the fields they protect:
131  *
132  *	p_selock	This is a per-page shared/exclusive lock that is
133  *			used to implement the logical shared/exclusive
134  *			lock for each page.  The "shared" lock is normally
135  *			used in most cases while the "exclusive" lock is
136  *			required to destroy or retain exclusive access to
137  *			a page (e.g., while reading in pages).  The appropriate
138  *			lock is always held whenever there is any reference
139  *			to a page structure (e.g., during i/o).
140  *			(Note that with the addition of the "writer-lock-wanted"
141  *			semantics (via SE_EWANTED), threads must not acquire
142  *			multiple reader locks or else a deadly embrace will
143  *			occur in the following situation: thread 1 obtains a
144  *			reader lock; next thread 2 fails to get a writer lock
145  *			but specified SE_EWANTED so it will wait by either
146  *			blocking (when using page_lock_es) or spinning while
147  *			retrying (when using page_try_reclaim_lock) until the
148  *			reader lock is released; then thread 1 attempts to
149  *			get another reader lock but is denied due to
150  *			SE_EWANTED being set, and now both threads are in a
151  *			deadly embrace.)
152  *
153  *				p_hash
154  *				p_vnode
155  *				p_offset
156  *
157  *				p_free
158  *				p_age
159  *
160  *	p_iolock	This is a binary semaphore lock that provides
161  *			exclusive access to the i/o list links in each
162  *			page structure.  It is always held while the page
163  *			is on an i/o list (i.e., involved in i/o).  That is,
164  *			even though a page may be only `shared' locked
165  *			while it is doing a write, the following fields may
166  *			change anyway.  Normally, the page must be
167  *			`exclusively' locked to change anything in it.
168  *
169  *				p_next
170  *				p_prev
171  *
172  * The following fields are protected by the global page_llock:
173  *
174  *				p_lckcnt
175  *				p_cowcnt
176  *
177  * The following lists are protected by the global page_freelock:
178  *
179  *				page_cachelist
180  *				page_freelist
181  *
182  * The following, for our purposes, are protected by
183  * the global freemem_lock:
184  *
185  *				freemem
186  *				freemem_wait
187  *				freemem_cv
188  *
189  * The following fields are protected by hat layer lock(s).  When a page
190  * structure is not mapped and is not associated with a vnode (after a call
191  * to page_hashout() for example) the p_nrm field may be modified with out
192  * holding the hat layer lock:
193  *
194  *				p_nrm
195  *				p_mapping
196  *				p_share
197  *
198  * The following field is file system dependent.  How it is used and
199  * the locking strategies applied are up to the individual file system
200  * implementation.
201  *
202  *				p_fsdata
203  *
204  * The page structure is used to represent and control the system's
205  * physical pages.  There is one instance of the structure for each
206  * page that is not permenately allocated.  For example, the pages that
207  * hold the page structures are permanently held by the kernel
208  * and hence do not need page structures to track them.  The array
209  * of page structures is allocated early on in the kernel's life and
210  * is based on the amount of available physical memory.
211  *
212  * Each page structure may simultaneously appear on several linked lists.
213  * The lists are:  hash list, free or in i/o list, and a vnode's page list.
214  * Each type of list is protected by a different group of mutexes as described
215  * below:
216  *
217  * The hash list is used to quickly find a page when the page's vnode and
218  * offset within the vnode are known.  Each page that is hashed is
219  * connected via the `p_hash' field.  The anchor for each hash is in the
220  * array `page_hash'.  An array of mutexes, `ph_mutex', protects the
221  * lists anchored by page_hash[].  To either search or modify a given hash
222  * list, the appropriate mutex in the ph_mutex array must be held.
223  *
224  * The free list contains pages that are `free to be given away'.  For
225  * efficiency reasons, pages on this list are placed in two catagories:
226  * pages that are still associated with a vnode, and pages that are not
227  * associated with a vnode.  Free pages always have their `p_free' bit set,
228  * free pages that are still associated with a vnode also have their
229  * `p_age' bit set.  Pages on the free list are connected via their
230  * `p_next' and `p_prev' fields.  When a page is involved in some sort
231  * of i/o, it is not free and these fields may be used to link associated
232  * pages together.  At the moment, the free list is protected by a
233  * single mutex `page_freelock'.  The list of free pages still associated
234  * with a vnode is anchored by `page_cachelist' while other free pages
235  * are anchored in architecture dependent ways (to handle page coloring etc.).
236  *
237  * Pages associated with a given vnode appear on a list anchored in the
238  * vnode by the `v_pages' field.  They are linked together with
239  * `p_vpnext' and `p_vpprev'.  The field `p_offset' contains a page's
240  * offset within the vnode.  The pages on this list are not kept in
241  * offset order.  These lists, in a manner similar to the hash lists,
242  * are protected by an array of mutexes called `vph_hash'.  Before
243  * searching or modifying this chain the appropriate mutex in the
244  * vph_hash[] array must be held.
245  *
246  * Again, each of the lists that a page can appear on is protected by a
247  * mutex.  Before reading or writing any of the fields comprising the
248  * list, the appropriate lock must be held.  These list locks should only
249  * be held for very short intervals.
250  *
251  * In addition to the list locks, each page structure contains a
252  * shared/exclusive lock that protects various fields within it.
253  * To modify one of these fields, the `p_selock' must be exclusively held.
254  * To read a field with a degree of certainty, the lock must be at least
255  * held shared.
256  *
257  * Removing a page structure from one of the lists requires holding
258  * the appropriate list lock and the page's p_selock.  A page may be
259  * prevented from changing identity, being freed, or otherwise modified
260  * by acquiring p_selock shared.
261  *
262  * To avoid deadlocks, a strict locking protocol must be followed.  Basically
263  * there are two cases:  In the first case, the page structure in question
264  * is known ahead of time (e.g., when the page is to be added or removed
265  * from a list).  In the second case, the page structure is not known and
266  * must be found by searching one of the lists.
267  *
268  * When adding or removing a known page to one of the lists, first the
269  * page must be exclusively locked (since at least one of its fields
270  * will be modified), second the lock protecting the list must be acquired,
271  * third the page inserted or deleted, and finally the list lock dropped.
272  *
273  * The more interesting case occures when the particular page structure
274  * is not known ahead of time.  For example, when a call is made to
275  * page_lookup(), it is not known if a page with the desired (vnode and
276  * offset pair) identity exists.  So the appropriate mutex in ph_mutex is
277  * acquired, the hash list searched, and if the desired page is found
278  * an attempt is made to lock it.  The attempt to acquire p_selock must
279  * not block while the hash list lock is held.  A deadlock could occure
280  * if some other process was trying to remove the page from the list.
281  * The removing process (following the above protocol) would have exclusively
282  * locked the page, and be spinning waiting to acquire the lock protecting
283  * the hash list.  Since the searching process holds the hash list lock
284  * and is waiting to acquire the page lock, a deadlock occurs.
285  *
286  * The proper scheme to follow is: first, lock the appropriate list,
287  * search the list, and if the desired page is found either use
288  * page_trylock() (which will not block) or pass the address of the
289  * list lock to page_lock().  If page_lock() can not acquire the page's
290  * lock, it will drop the list lock before going to sleep.  page_lock()
291  * returns a value to indicate if the list lock was dropped allowing the
292  * calling program to react appropriately (i.e., retry the operation).
293  *
294  * If the list lock was dropped before the attempt at locking the page
295  * was made, checks would have to be made to ensure that the page had
296  * not changed identity before its lock was obtained.  This is because
297  * the interval between dropping the list lock and acquiring the page
298  * lock is indeterminate.
299  *
300  * In addition, when both a hash list lock (ph_mutex[]) and a vnode list
301  * lock (vph_mutex[]) are needed, the hash list lock must be acquired first.
302  * The routine page_hashin() is a good example of this sequence.
303  * This sequence is ASSERTed by checking that the vph_mutex[] is not held
304  * just before each acquisition of one of the mutexs in ph_mutex[].
305  *
306  * So, as a quick summary:
307  *
308  * 	pse_mutex[]'s protect the p_selock and p_cv fields.
309  *
310  * 	p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash,
311  *
312  * 	ph_mutex[]'s protect the page_hash[] array and its chains.
313  *
314  * 	vph_mutex[]'s protect the v_pages field and the vp page chains.
315  *
316  *	First lock the page, then the hash chain, then the vnode chain.  When
317  *	this is not possible `trylocks' must be used.  Sleeping while holding
318  *	any of these mutexes (p_selock is not a mutex) is not allowed.
319  *
320  *
321  *	field		reading		writing		    ordering
322  *	======================================================================
323  *	p_vnode		p_selock(E,S)	p_selock(E)
324  *	p_offset
325  *	p_free
326  *	p_age
327  *	=====================================================================
328  *	p_hash		p_selock(E,S)	p_selock(E) &&	    p_selock, ph_mutex
329  *					ph_mutex[]
330  *	=====================================================================
331  *	p_vpnext	p_selock(E,S)	p_selock(E) &&	    p_selock, vph_mutex
332  *	p_vpprev			vph_mutex[]
333  *	=====================================================================
334  *	When the p_free bit is set:
335  *
336  *	p_next		p_selock(E,S)	p_selock(E) &&	    p_selock,
337  *	p_prev				page_freelock	    page_freelock
338  *
339  *	When the p_free bit is not set:
340  *
341  *	p_next		p_selock(E,S)	p_selock(E) &&	    p_selock, p_iolock
342  *	p_prev				p_iolock
343  *	=====================================================================
344  *	p_selock	pse_mutex[]	pse_mutex[]	    can`t acquire any
345  *	p_cv						    other mutexes or
346  *							    sleep while holding
347  *							    this lock.
348  *	=====================================================================
349  *	p_lckcnt	p_selock(E,S)	p_selock(E) &&
350  *	p_cowcnt			page_llock
351  *	=====================================================================
352  *	p_nrm		hat layer lock	hat layer lock
353  *	p_mapping
354  *	p_pagenum
355  *	=====================================================================
356  *
357  *	where:
358  *		E----> exclusive version of p_selock.
359  *		S----> shared version of p_selock.
360  *
361  *
362  *	Global data structures and variable:
363  *
364  *	field		reading		writing		    ordering
365  *	=====================================================================
366  *	page_hash[]	ph_mutex[]	ph_mutex[]	    can hold this lock
367  *							    before acquiring
368  *							    a vph_mutex or
369  *							    pse_mutex.
370  *	=====================================================================
371  *	vp->v_pages	vph_mutex[]	vph_mutex[]	    can only acquire
372  *							    a pse_mutex while
373  *							    holding this lock.
374  *	=====================================================================
375  *	page_cachelist	page_freelock	page_freelock	    can't acquire any
376  *	page_freelist	page_freelock	page_freelock
377  *	=====================================================================
378  *	freemem		freemem_lock	freemem_lock	    can't acquire any
379  *	freemem_wait					    other mutexes while
380  *	freemem_cv					    holding this mutex.
381  *	=====================================================================
382  *
383  * Page relocation, PG_NORELOC and P_NORELOC.
384  *
385  * Pages may be relocated using the page_relocate() interface. Relocation
386  * involves moving the contents and identity of a page to another, free page.
387  * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent
388  * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL
389  * lock must not be held indefinitely). If the page is going to be held
390  * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed
391  * to page_create_va so that pages that are prevented from being relocated
392  * can be managed differently by the platform specific layer.
393  *
394  * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0)
395  * are guaranteed to be held in memory, but can still be relocated
396  * providing the SE_EXCL lock can be obtained.
397  *
398  * The P_NORELOC bit in the page_t.p_state field is provided for use by
399  * the platform specific code in managing pages when the PG_NORELOC
400  * hint is used.
401  *
402  * Memory delete and page locking.
403  *
404  * The set of all usable pages is managed using the global page list as
405  * implemented by the memseg structure defined below. When memory is added
406  * or deleted this list changes. Additions to this list guarantee that the
407  * list is never corrupt.  In order to avoid the necessity of an additional
408  * lock to protect against failed accesses to the memseg being deleted and,
409  * more importantly, the page_ts, the memseg structure is never freed and the
410  * page_t virtual address space is remapped to a page (or pages) of
411  * zeros.  If a page_t is manipulated while it is p_selock'd, or if it is
412  * locked indirectly via a hash or freelist lock, it is not possible for
413  * memory delete to collect the page and so that part of the page list is
414  * prevented from being deleted. If the page is referenced outside of one
415  * of these locks, it is possible for the page_t being referenced to be
416  * deleted.  Examples of this are page_t pointers returned by
417  * page_numtopp_nolock, page_first and page_next.  Providing the page_t
418  * is re-checked after taking the p_selock (for p_vnode != NULL), the
419  * remapping to the zero pages will be detected.
420  *
421  *
422  * Page size (p_szc field) and page locking.
423  *
424  * p_szc field of free pages is changed by free list manager under freelist
425  * locks and is of no concern to the rest of VM subsystem.
426  *
427  * p_szc changes of allocated anonymous (swapfs) can only be done only after
428  * exclusively locking all constituent pages and calling hat_pageunload() on
429  * each of them. To prevent p_szc changes of non free anonymous (swapfs) large
430  * pages it's enough to either lock SHARED any of constituent pages or prevent
431  * hat_pageunload() by holding hat level lock that protects mapping lists (this
432  * method is for hat code only)
433  *
434  * To increase (promote) p_szc of allocated non anonymous file system pages
435  * one has to first lock exclusively all involved constituent pages and call
436  * hat_pageunload() on each of them. To prevent p_szc promote it's enough to
437  * either lock SHARED any of constituent pages that will be needed to make a
438  * large page or prevent hat_pageunload() by holding hat level lock that
439  * protects mapping lists (this method is for hat code only).
440  *
441  * To decrease (demote) p_szc of an allocated non anonymous file system large
442  * page one can either use the same method as used for changeing p_szc of
443  * anonymous large pages or if it's not possible to lock all constituent pages
444  * exclusively a different method can be used. In the second method one only
445  * has to exclusively lock one of constituent pages but then one has to
446  * acquire further locks by calling page_szc_lock() and
447  * hat_page_demote(). hat_page_demote() acquires hat level locks and then
448  * demotes the page. This mechanism relies on the fact that any code that
449  * needs to prevent p_szc of a file system large page from changeing either
450  * locks all constituent large pages at least SHARED or locks some pages at
451  * least SHARED and calls page_szc_lock() or uses hat level page locks.
452  * Demotion using this method is implemented by page_demote_vp_pages().
453  * Please see comments in front of page_demote_vp_pages(), hat_page_demote()
454  * and page_szc_lock() for more details.
455  *
456  * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist,
457  * hat level locks.
458  */
459 
460 typedef struct page {
461 	u_offset_t	p_offset;	/* offset into vnode for this page */
462 	struct vnode	*p_vnode;	/* vnode that this page is named by */
463 	selock_t	p_selock;	/* shared/exclusive lock on the page */
464 #if defined(_LP64)
465 	int		p_selockpad;	/* pad for growing selock */
466 #endif
467 	struct page	*p_hash;	/* hash by [vnode, offset] */
468 	struct page	*p_vpnext;	/* next page in vnode list */
469 	struct page	*p_vpprev;	/* prev page in vnode list */
470 	struct page	*p_next;	/* next page in free/intrans lists */
471 	struct page	*p_prev;	/* prev page in free/intrans lists */
472 	ushort_t	p_lckcnt;	/* number of locks on page data */
473 	ushort_t	p_cowcnt;	/* number of copy on write lock */
474 	kcondvar_t	p_cv;		/* page struct's condition var */
475 	kcondvar_t	p_io_cv;	/* for iolock */
476 	uchar_t		p_iolock_state;	/* replaces p_iolock */
477 	volatile uchar_t p_szc;		/* page size code */
478 	uchar_t		p_fsdata;	/* file system dependent byte */
479 	uchar_t		p_state;	/* p_free, p_noreloc */
480 	uchar_t		p_nrm;		/* non-cache, ref, mod readonly bits */
481 #if defined(__sparc)
482 	uchar_t		p_vcolor;	/* virtual color */
483 #else
484 	uchar_t		p_embed;	/* x86 - changes p_mapping & p_index */
485 #endif
486 	uchar_t		p_index;	/* MPSS mapping info. Not used on x86 */
487 	uchar_t		p_toxic;	/* page has an unrecoverable error */
488 	void		*p_mapping;	/* hat specific translation info */
489 	pfn_t		p_pagenum;	/* physical page number */
490 
491 	uint_t		p_share;	/* number of translations */
492 #if defined(_LP64)
493 	uint_t		p_sharepad;	/* pad for growing p_share */
494 #endif
495 	uint_t		p_msresv_1;	/* reserved for future use */
496 #if defined(__sparc)
497 	uint_t		p_kpmref;	/* number of kpm mapping sharers */
498 	struct kpme	*p_kpmelist;	/* kpm specific mapping info */
499 #else
500 	/* index of entry in p_map when p_embed is set */
501 	uint_t		p_mlentry;
502 #endif
503 	uint64_t	p_msresv_2;	/* page allocation debugging */
504 } page_t;
505 
506 
507 typedef	page_t	devpage_t;
508 #define	devpage	page
509 
510 
511 /*
512  * Page hash table is a power-of-two in size, externally chained
513  * through the hash field.  PAGE_HASHAVELEN is the average length
514  * desired for this chain, from which the size of the page_hash
515  * table is derived at boot time and stored in the kernel variable
516  * page_hashsz.  In the hash function it is given by PAGE_HASHSZ.
517  *
518  * PAGE_HASH_FUNC returns an index into the page_hash[] array.  This
519  * index is also used to derive the mutex that protects the chain.
520  *
521  * In constructing the hash function, first we dispose of unimportant bits
522  * (page offset from "off" and the low 3 bits of "vp" which are zero for
523  * struct alignment). Then shift and sum the remaining bits a couple times
524  * in order to get as many source bits from the two source values into the
525  * resulting hashed value.  Note that this will perform quickly, since the
526  * shifting/summing are fast register to register operations with no additional
527  * memory references).
528  */
529 #if NCPU < 4
530 #define	PH_TABLE_SIZE	16
531 #define	VP_SHIFT	7
532 #else
533 #define	PH_TABLE_SIZE	128
534 #define	VP_SHIFT	9
535 #endif
536 
537 /*
538  * The amount to use for the successive shifts in the hash function below.
539  * The actual value is LOG2(PH_TABLE_SIZE), so that as many bits as
540  * possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX().
541  */
542 #define	PH_SHIFT_SIZE   (7)
543 
544 #define	PAGE_HASHSZ	page_hashsz
545 #define	PAGE_HASHAVELEN		4
546 #define	PAGE_HASH_FUNC(vp, off) \
547 	((((uintptr_t)(off) >> PAGESHIFT) + \
548 		((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE)) + \
549 		((uintptr_t)(vp) >> 3) + \
550 		((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) + \
551 		((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE))) & \
552 		(PAGE_HASHSZ - 1))
553 #ifdef _KERNEL
554 
555 /*
556  * The page hash value is re-hashed to an index for the ph_mutex array.
557  *
558  * For 64 bit kernels, the mutex array is padded out to prevent false
559  * sharing of cache sub-blocks (64 bytes) of adjacent mutexes.
560  *
561  * For 32 bit kernels, we don't want to waste kernel address space with
562  * padding, so instead we rely on the hash function to introduce skew of
563  * adjacent vnode/offset indexes (the left shift part of the hash function).
564  * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different
565  * 64 byte sub-block.
566  */
567 typedef struct pad_mutex {
568 	kmutex_t	pad_mutex;
569 #ifdef _LP64
570 	char		pad_pad[64 - sizeof (kmutex_t)];
571 #endif
572 } pad_mutex_t;
573 extern pad_mutex_t ph_mutex[];
574 
575 #define	PAGE_HASH_MUTEX(x) \
576 	&(ph_mutex[((x) + ((x) >> VP_SHIFT) + ((x) << 3)) & \
577 		(PH_TABLE_SIZE - 1)].pad_mutex)
578 
579 /*
580  * Flags used while creating pages.
581  */
582 #define	PG_EXCL		0x0001
583 #define	PG_WAIT		0x0002
584 #define	PG_PHYSCONTIG	0x0004		/* NOT SUPPORTED */
585 #define	PG_MATCH_COLOR	0x0008		/* SUPPORTED by free list routines */
586 #define	PG_NORELOC	0x0010		/* Non-relocatable alloc hint. */
587 					/* Page must be PP_ISNORELOC */
588 #define	PG_PANIC	0x0020		/* system will panic if alloc fails */
589 #define	PG_PUSHPAGE	0x0040		/* alloc may use reserve */
590 
591 /*
592  * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL
593  * access are given priority over all other waiting threads.
594  */
595 #define	SE_EWANTED	0x40000000
596 #define	PAGE_LOCKED(pp)		(((pp)->p_selock & ~SE_EWANTED) != 0)
597 #define	PAGE_SHARED(pp)		(((pp)->p_selock & ~SE_EWANTED) > 0)
598 #define	PAGE_EXCL(pp)		((pp)->p_selock < 0)
599 #define	PAGE_LOCKED_SE(pp, se)	\
600 	((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp))
601 
602 extern	long page_hashsz;
603 extern	page_t **page_hash;
604 
605 extern	kmutex_t page_llock;		/* page logical lock mutex */
606 extern	kmutex_t freemem_lock;		/* freemem lock */
607 
608 extern	pgcnt_t	total_pages;		/* total pages in the system */
609 
610 /*
611  * Variables controlling locking of physical memory.
612  */
613 extern	pgcnt_t	pages_pp_maximum;	/* tuning: lock + claim <= max */
614 extern	void init_pages_pp_maximum(void);
615 
616 struct lgrp;
617 
618 /* page_list_{add,sub} flags */
619 
620 /* which list */
621 #define	PG_FREE_LIST	0x0001
622 #define	PG_CACHE_LIST	0x0002
623 
624 /* where on list */
625 #define	PG_LIST_TAIL	0x0010
626 #define	PG_LIST_HEAD	0x0020
627 
628 /* called from */
629 #define	PG_LIST_ISINIT	0x1000
630 #define	PG_LIST_ISCAGE	0x2000
631 
632 /*
633  * Flags for setting the p_toxic flag when a page has errors
634  * These flags may be OR'ed into the p_toxic page flag to
635  * indicate that error(s) have occurred on a page,
636  * (see page_settoxic()). If both PAGE_IS_TOXIC and
637  * PAGE_IS_FAILING are set, PAGE_IS_FAILING takes precedence.
638  *
639  * When an error happens on a page, the trap handler sets
640  * PAGE_IS_FAULTY on the page to indicate that an error has been
641  * seen on the page. The error could be really a memory error or
642  * something else (like a datapath error). When it is determined
643  * that it is a memory error, the page is marked as PAGE_IS_TOXIC
644  * or PAGE_IS_FAILING depending on the type of error and then
645  * retired.
646  *
647  * We use the page's 'toxic' flag to determine whether the page
648  * has just got a single error - PAGE_IS_TOXIC - or is being
649  * retired due to multiple soft errors - PAGE_IS_FAILING. In
650  * page_free(), a page that has been marked PAGE_IS_FAILING will
651  * not be cleaned, it will always be retired. A page marked
652  * PAGE_IS_TOXIC is cleaned and is retired only if this attempt at
653  * cleaning fails.
654  *
655  * When a page has been successfully retired, we set PAGE_IS_RETIRED.
656  */
657 #define	PAGE_IS_OK		0x0
658 #define	PAGE_IS_TOXIC		0x1
659 #define	PAGE_IS_FAILING		0x2
660 #define	PAGE_IS_RETIRED		0x4
661 #define	PAGE_IS_FAULTY		0x8
662 
663 /*
664  * Page frame operations.
665  */
666 page_t	*page_lookup(struct vnode *, u_offset_t, se_t);
667 page_t	*page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *,
668 	spgcnt_t *, int);
669 page_t	*page_lookup_nowait(struct vnode *, u_offset_t, se_t);
670 page_t	*page_find(struct vnode *, u_offset_t);
671 page_t	*page_exists(struct vnode *, u_offset_t);
672 int	page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]);
673 int	page_exists_forreal(struct vnode *, u_offset_t, uint_t *);
674 void	page_needfree(spgcnt_t);
675 page_t	*page_create(struct vnode *, u_offset_t, size_t, uint_t);
676 int	page_alloc_pages(struct seg *, caddr_t, page_t **, page_t **,
677 		uint_t, int);
678 page_t  *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes,
679 	uint_t flags, struct seg *seg, caddr_t vaddr, void *arg);
680 page_t	*page_create_va(struct vnode *, u_offset_t, size_t, uint_t,
681 	struct seg *, caddr_t);
682 int	page_create_wait(size_t npages, uint_t flags);
683 void    page_create_putback(ssize_t npages);
684 void	page_free(page_t *, int);
685 void	page_free_at_startup(page_t *);
686 void	page_free_pages(page_t *);
687 void	free_vp_pages(struct vnode *, u_offset_t, size_t);
688 int	page_reclaim(page_t *, kmutex_t *);
689 void	page_destroy(page_t *, int);
690 void	page_destroy_pages(page_t *);
691 void	page_destroy_free(page_t *);
692 void	page_rename(page_t *, struct vnode *, u_offset_t);
693 int	page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *);
694 void	page_hashout(page_t *, kmutex_t *);
695 int	page_num_hashin(pfn_t, struct vnode *, u_offset_t);
696 void	page_add(page_t **, page_t *);
697 void	page_add_common(page_t **, page_t *);
698 void	page_sub(page_t **, page_t *);
699 void	page_sub_common(page_t **, page_t *);
700 page_t	*page_get_freelist(struct vnode *, u_offset_t, struct seg *,
701 		caddr_t, size_t, uint_t, struct lgrp *);
702 
703 page_t	*page_get_cachelist(struct vnode *, u_offset_t, struct seg *,
704 		caddr_t, uint_t, struct lgrp *);
705 void	page_list_add(page_t *, int);
706 void	page_boot_demote(page_t *);
707 void	page_promote_size(page_t *, uint_t);
708 void	page_list_add_pages(page_t *, int);
709 void	page_list_sub(page_t *, int);
710 void	page_list_break(page_t **, page_t **, size_t);
711 void	page_list_concat(page_t **, page_t **);
712 void	page_vpadd(page_t **, page_t *);
713 void	page_vpsub(page_t **, page_t *);
714 int	page_lock(page_t *, se_t, kmutex_t *, reclaim_t);
715 int	page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int);
716 void page_lock_clr_exclwanted(page_t *);
717 int	page_trylock(page_t *, se_t);
718 int	page_try_reclaim_lock(page_t *, se_t, int);
719 int	page_tryupgrade(page_t *);
720 void	page_downgrade(page_t *);
721 void	page_unlock(page_t *);
722 void	page_lock_delete(page_t *);
723 int	page_pp_lock(page_t *, int, int);
724 void	page_pp_unlock(page_t *, int, int);
725 int	page_resv(pgcnt_t, uint_t);
726 void	page_unresv(pgcnt_t);
727 void	page_pp_useclaim(page_t *, page_t *, uint_t);
728 int	page_addclaim(page_t *);
729 int	page_subclaim(page_t *);
730 int	page_addclaim_pages(page_t **);
731 int	page_subclaim_pages(page_t **);
732 pfn_t	page_pptonum(page_t *);
733 page_t	*page_numtopp(pfn_t, se_t);
734 page_t	*page_numtopp_noreclaim(pfn_t, se_t);
735 page_t	*page_numtopp_nolock(pfn_t);
736 page_t	*page_numtopp_nowait(pfn_t, se_t);
737 page_t  *page_first();
738 page_t  *page_next(page_t *);
739 page_t  *page_nextn_raw(page_t *, ulong_t);	/* pp += n */
740 #define	page_next_raw(PP)	page_nextn_raw((PP), 1)
741 page_t  *page_list_next(page_t *);
742 page_t	*page_nextn(page_t *, ulong_t);
743 page_t	*page_next_scan_init(void **);
744 page_t	*page_next_scan_large(page_t *, ulong_t *, void **);
745 void    prefetch_page_r(void *);
746 void	ppcopy(page_t *, page_t *);
747 void	page_relocate_hash(page_t *, page_t *);
748 void	pagezero(page_t *, uint_t, uint_t);
749 void	pagescrub(page_t *, uint_t, uint_t);
750 void	page_io_lock(page_t *);
751 void	page_io_unlock(page_t *);
752 int	page_io_trylock(page_t *);
753 int	page_iolock_assert(page_t *);
754 void	page_iolock_init(page_t *);
755 pgcnt_t	page_busy(int);
756 void	page_lock_init(void);
757 ulong_t	page_share_cnt(page_t *);
758 int	page_isshared(page_t *);
759 int	page_isfree(page_t *);
760 int	page_isref(page_t *);
761 int	page_ismod(page_t *);
762 int	page_release(page_t *, int);
763 int	page_retire(page_t *, uchar_t);
764 int	page_istoxic(page_t *);
765 int	page_isfailing(page_t *);
766 int	page_isretired(page_t *);
767 int	page_deteriorating(page_t *);
768 void	page_settoxic(page_t *, uchar_t);
769 void	page_clrtoxic(page_t *);
770 void	page_clrtoxic_flag(page_t *, uchar_t);
771 int	page_isfaulty(page_t *);
772 int	page_mem_avail(pgcnt_t);
773 
774 void page_set_props(page_t *, uint_t);
775 void page_clr_all_props(page_t *);
776 
777 kmutex_t	*page_vnode_mutex(struct vnode *);
778 kmutex_t	*page_se_mutex(struct page *);
779 kmutex_t	*page_szc_lock(struct page *);
780 int		page_szc_lock_assert(struct page *pp);
781 
782 /*
783  * Page relocation interfaces. page_relocate() is generic.
784  * page_get_replacement_page() is provided by the PSM.
785  * page_free_replacement_page() is generic.
786  */
787 int group_page_trylock(page_t *, se_t);
788 void group_page_unlock(page_t *);
789 int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *);
790 int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *);
791 page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t);
792 void page_free_replacement_page(page_t *);
793 int page_relocate_cage(page_t **, page_t **);
794 
795 int page_try_demote_pages(page_t *);
796 void page_demote_free_pages(page_t *);
797 
798 struct anon_map;
799 
800 void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *,
801     ulong_t, vnode_t *, u_offset_t, int);
802 void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t);
803 
804 /*
805  * Tell the PIM we are adding physical memory
806  */
807 void add_physmem(page_t *, size_t, pfn_t);
808 void add_physmem_cb(page_t *, pfn_t);	/* callback for page_t part */
809 
810 /*
811  * hw_page_array[] is configured with hardware supported page sizes by
812  * platform specific code.
813  */
814 typedef struct {
815 	size_t	hp_size;
816 	uint_t	hp_shift;
817 	pgcnt_t	hp_pgcnt;	/* base pagesize cnt */
818 } hw_pagesize_t;
819 
820 extern hw_pagesize_t	hw_page_array[];
821 extern uint_t		page_colors, page_colors_mask;
822 extern uint_t		page_coloring_shift;
823 extern int		cpu_page_colors;
824 
825 uint_t	page_num_pagesizes(void);
826 uint_t	page_num_user_pagesizes(void);
827 size_t	page_get_pagesize(uint_t);
828 size_t	page_get_user_pagesize(uint_t n);
829 pgcnt_t	page_get_pagecnt(uint_t);
830 uint_t	page_get_shift(uint_t);
831 int	page_szc(size_t);
832 int	page_szc_user_filtered(size_t);
833 
834 
835 /* page_get_replacement page flags */
836 #define	PGR_SAMESZC	0x1	/* only look for page size same as orig */
837 #define	PGR_NORELOC	0x2	/* allocate a P_NORELOC page */
838 
839 #endif	/* _KERNEL */
840 
841 /*
842  * Constants used for the p_iolock_state
843  */
844 #define	PAGE_IO_INUSE	0x1
845 #define	PAGE_IO_WANTED	0x2
846 
847 /*
848  * Constants used for page_release status
849  */
850 #define	PGREL_NOTREL    0x1
851 #define	PGREL_CLEAN	0x2
852 #define	PGREL_MOD	0x3
853 
854 /*
855  * The p_state field holds what used to be the p_age and p_free
856  * bits.  These fields are protected by p_selock (see above).
857  */
858 #define	P_FREE		0x80		/* Page on free list */
859 #define	P_NORELOC	0x40		/* Page is non-relocatable */
860 #define	P_MIGRATE	0x20		/* Migrate page on next touch */
861 #define	P_SWAP		0x10		/* belongs to vnode that is V_ISSWAP */
862 
863 #define	PP_ISFREE(pp)		((pp)->p_state & P_FREE)
864 #define	PP_ISAGED(pp)		(((pp)->p_state & P_FREE) && \
865 					((pp)->p_vnode == NULL))
866 #define	PP_ISNORELOC(pp)	((pp)->p_state & P_NORELOC)
867 #define	PP_ISMIGRATE(pp)	((pp)->p_state & P_MIGRATE)
868 #define	PP_ISSWAP(pp)		((pp)->p_state & P_SWAP)
869 
870 #define	PP_SETFREE(pp)		((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \
871 				| P_FREE)
872 #define	PP_SETAGED(pp)		ASSERT(PP_ISAGED(pp))
873 #define	PP_SETNORELOC(pp)	((pp)->p_state |= P_NORELOC)
874 #define	PP_SETMIGRATE(pp)	((pp)->p_state |= P_MIGRATE)
875 #define	PP_SETSWAP(pp)		((pp)->p_state |= P_SWAP)
876 
877 #define	PP_CLRFREE(pp)		((pp)->p_state &= ~P_FREE)
878 #define	PP_CLRAGED(pp)		ASSERT(!PP_ISAGED(pp))
879 #define	PP_CLRNORELOC(pp)	((pp)->p_state &= ~P_NORELOC)
880 #define	PP_CLRMIGRATE(pp)	((pp)->p_state &= ~P_MIGRATE)
881 #define	PP_CLRSWAP(pp)		((pp)->p_state &= ~P_SWAP)
882 
883 
884 
885 /*
886  * kpm large page description.
887  * The virtual address range of segkpm is divided into chunks of
888  * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort
889  * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz
890  * for 8K is 256M and 2G for 64K pages. It it kept as small as
891  * possible to save physical memory space.
892  *
893  * There are 2 segkpm mapping windows within in the virtual address
894  * space when we have to prevent VAC alias conflicts. The so called
895  * Alias window (mappings are always by PAGESIZE) is controlled by
896  * kp_refcnta. The regular window is controlled by kp_refcnt for the
897  * normal operation, which is to use the largest available pagesize.
898  * When VAC alias conflicts are present within a chunk in the regular
899  * window the large page mapping is broken up into smaller PAGESIZE
900  * mappings. kp_refcntc is used to control the pages that are invoked
901  * in the conflict and kp_refcnts holds the active mappings done
902  * with the small page size. In non vac conflict mode kp_refcntc is
903  * also used as "go" indication (-1) for the trap level tsbmiss
904  * handler.
905  */
906 typedef struct kpm_page {
907 	short kp_refcnt;	/* pages mapped large */
908 	short kp_refcnta;	/* pages mapped in Alias window */
909 	short kp_refcntc;	/* TL-tsbmiss flag; #vac alias conflict pages */
910 	short kp_refcnts;	/* vac alias: pages mapped small */
911 } kpm_page_t;
912 
913 /*
914  * Note: khl_lock offset changes must be reflected in sfmmu_asm.s
915  */
916 typedef struct kpm_hlk {
917 	kmutex_t khl_mutex;	/* kpm_page mutex */
918 	uint_t   khl_lock;	/* trap level tsbmiss handling */
919 } kpm_hlk_t;
920 
921 /*
922  * kpm small page description.
923  * When kpm_pgsz is equal to PAGESIZE a smaller representation is used
924  * to save memory space. Alias range mappings and regular segkpm
925  * mappings are done in units of PAGESIZE and can share the mapping
926  * information and the mappings are always distinguishable by their
927  * virtual address. Other information neeeded for VAC conflict prevention
928  * is already available on a per page basis. There are basically 3 states
929  * a kpm_spage can have: not mapped (0), mapped in Alias range or virtually
930  * uncached (1) and mapped in the regular segkpm window (-1). The -1 value
931  * is also used as "go" indication for the segkpm trap level tsbmiss
932  * handler for small pages (value is kept the same as it is used for large
933  * mappings).
934  */
935 typedef struct kpm_spage {
936 	char	kp_mapped;	/* page mapped small */
937 } kpm_spage_t;
938 
939 /*
940  * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s
941  */
942 typedef struct kpm_shlk {
943 	uint_t   kshl_lock;	/* trap level tsbmiss handling */
944 } kpm_shlk_t;
945 
946 /*
947  * Each segment of physical memory is described by a memseg struct.
948  * Within a segment, memory is considered contiguous. The members
949  * can be categorized as follows:
950  * . Platform independent:
951  *         pages, epages, pages_base, pages_end, next, lnext.
952  * . 64bit only but platform independent:
953  *         kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages.
954  * . Really platform or mmu specific:
955  *         pagespa, epagespa, nextpa, kpm_pagespa.
956  * . Mixed:
957  *         msegflags.
958  */
959 struct memseg {
960 	page_t *pages, *epages;		/* [from, to] in page array */
961 	pfn_t pages_base, pages_end;	/* [from, to] in page numbers */
962 	struct memseg *next;		/* next segment in list */
963 #if defined(__sparc)
964 	struct memseg *lnext;		/* next segment in deleted list */
965 	uint64_t pagespa, epagespa;	/* [from, to] page array physical */
966 	uint64_t nextpa;		/* physical next pointer */
967 	pfn_t	kpm_pbase;		/* start of kpm range */
968 	pgcnt_t kpm_nkpmpgs;		/* # of kpm_pgsz pages */
969 	union _mseg_un {
970 		kpm_page_t  *kpm_lpgs;	/* ptr to kpm_page array */
971 		kpm_spage_t *kpm_spgs;	/* ptr to kpm_spage array */
972 	} mseg_un;
973 	uint64_t kpm_pagespa;		/* physical ptr to kpm (s)pages array */
974 	uint_t msegflags;		/* memseg flags */
975 #endif /* __sparc */
976 };
977 
978 /* memseg union aliases */
979 #define	kpm_pages	mseg_un.kpm_lpgs
980 #define	kpm_spages	mseg_un.kpm_spgs
981 
982 /* msegflags */
983 #define	MEMSEG_DYNAMIC		0x1	/* DR: memory was added dynamically */
984 
985 /* memseg support macros */
986 #define	MSEG_NPAGES(SEG)	((SEG)->pages_end - (SEG)->pages_base)
987 
988 /* memseg hash */
989 #define	MEM_HASH_SHIFT		0x9
990 #define	N_MEM_SLOTS		0x200		/* must be a power of 2 */
991 #define	MEMSEG_PFN_HASH(pfn)	(((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1))
992 
993 /* memseg  externals */
994 extern struct memseg *memsegs;		/* list of memory segments */
995 extern ulong_t mhash_per_slot;
996 extern uint64_t memsegspa;		/* memsegs as physical address */
997 
998 void build_pfn_hash();
999 extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum);
1000 
1001 
1002 #ifdef	__cplusplus
1003 }
1004 #endif
1005 
1006 #endif	/* _VM_PAGE_H */
1007