17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a5652762Spraks * Common Development and Distribution License (the "License"). 6a5652762Spraks * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2211494be0SStan Studzinski * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 267c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* 297c478bd9Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988 307c478bd9Sstevel@tonic-gate * The Regents of the University of California 317c478bd9Sstevel@tonic-gate * All Rights Reserved 327c478bd9Sstevel@tonic-gate * 337c478bd9Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from 347c478bd9Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its 357c478bd9Sstevel@tonic-gate * contributors. 367c478bd9Sstevel@tonic-gate */ 377c478bd9Sstevel@tonic-gate 387c478bd9Sstevel@tonic-gate #ifndef _VM_PAGE_H 397c478bd9Sstevel@tonic-gate #define _VM_PAGE_H 407c478bd9Sstevel@tonic-gate 417c478bd9Sstevel@tonic-gate #include <vm/seg.h> 427c478bd9Sstevel@tonic-gate 437c478bd9Sstevel@tonic-gate #ifdef __cplusplus 447c478bd9Sstevel@tonic-gate extern "C" { 457c478bd9Sstevel@tonic-gate #endif 467c478bd9Sstevel@tonic-gate 477c478bd9Sstevel@tonic-gate #if defined(_KERNEL) || defined(_KMEMUSER) 487c478bd9Sstevel@tonic-gate 497c478bd9Sstevel@tonic-gate /* 507c478bd9Sstevel@tonic-gate * Shared/Exclusive lock. 517c478bd9Sstevel@tonic-gate */ 527c478bd9Sstevel@tonic-gate 537c478bd9Sstevel@tonic-gate /* 547c478bd9Sstevel@tonic-gate * Types of page locking supported by page_lock & friends. 557c478bd9Sstevel@tonic-gate */ 567c478bd9Sstevel@tonic-gate typedef enum { 577c478bd9Sstevel@tonic-gate SE_SHARED, 587c478bd9Sstevel@tonic-gate SE_EXCL /* exclusive lock (value == -1) */ 597c478bd9Sstevel@tonic-gate } se_t; 607c478bd9Sstevel@tonic-gate 617c478bd9Sstevel@tonic-gate /* 627c478bd9Sstevel@tonic-gate * For requesting that page_lock reclaim the page from the free list. 637c478bd9Sstevel@tonic-gate */ 647c478bd9Sstevel@tonic-gate typedef enum { 657c478bd9Sstevel@tonic-gate P_RECLAIM, /* reclaim page from free list */ 667c478bd9Sstevel@tonic-gate P_NO_RECLAIM /* DON`T reclaim the page */ 677c478bd9Sstevel@tonic-gate } reclaim_t; 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* 707c478bd9Sstevel@tonic-gate * Callers of page_try_reclaim_lock and page_lock_es can use this flag 717c478bd9Sstevel@tonic-gate * to get SE_EXCL access before reader/writers are given access. 727c478bd9Sstevel@tonic-gate */ 737c478bd9Sstevel@tonic-gate #define SE_EXCL_WANTED 0x02 747c478bd9Sstevel@tonic-gate 75db874c57Selowe /* 76db874c57Selowe * All page_*lock() requests will be denied unless this flag is set in 77db874c57Selowe * the 'es' parameter. 78db874c57Selowe */ 79db874c57Selowe #define SE_RETIRED 0x04 80db874c57Selowe 817c478bd9Sstevel@tonic-gate #endif /* _KERNEL | _KMEMUSER */ 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate typedef int selock_t; 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate /* 867c478bd9Sstevel@tonic-gate * Define VM_STATS to turn on all sorts of statistic gathering about 877c478bd9Sstevel@tonic-gate * the VM layer. By default, it is only turned on when DEBUG is 887c478bd9Sstevel@tonic-gate * also defined. 897c478bd9Sstevel@tonic-gate */ 907c478bd9Sstevel@tonic-gate #ifdef DEBUG 917c478bd9Sstevel@tonic-gate #define VM_STATS 927c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate #ifdef VM_STATS 957c478bd9Sstevel@tonic-gate #define VM_STAT_ADD(stat) (stat)++ 967c478bd9Sstevel@tonic-gate #define VM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++)) 977c478bd9Sstevel@tonic-gate #else 987c478bd9Sstevel@tonic-gate #define VM_STAT_ADD(stat) 997c478bd9Sstevel@tonic-gate #define VM_STAT_COND_ADD(cond, stat) 1007c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate #ifdef _KERNEL 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate /* 105cb15d5d9SPeter Rival * PAGE_LLOCK_SIZE is 2 * NCPU, but no smaller than 128. 106cb15d5d9SPeter Rival * PAGE_LLOCK_SHIFT is log2(PAGE_LLOCK_SIZE). 1077c478bd9Sstevel@tonic-gate */ 108cb15d5d9SPeter Rival #if ((2*NCPU_P2) > 128) 109cb15d5d9SPeter Rival #define PAGE_LLOCK_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 110cb15d5d9SPeter Rival #else 111cb15d5d9SPeter Rival #define PAGE_LLOCK_SHIFT 7U 112cb15d5d9SPeter Rival #endif 113cb15d5d9SPeter Rival #define PAGE_LLOCK_SIZE (1 << PAGE_LLOCK_SHIFT) 114cb15d5d9SPeter Rival 115cb15d5d9SPeter Rival /* 116*1ab248cfSPeter Rival * The number of low order 0 (or less variable) bits in the page_t address. 117cb15d5d9SPeter Rival */ 118*1ab248cfSPeter Rival #if defined(__sparc) 119cb15d5d9SPeter Rival #define PP_SHIFT 7 120*1ab248cfSPeter Rival #else 121*1ab248cfSPeter Rival #define PP_SHIFT 6 122*1ab248cfSPeter Rival #endif 123cb15d5d9SPeter Rival 124cb15d5d9SPeter Rival /* 125cb15d5d9SPeter Rival * pp may be the root of a large page, and many low order bits will be 0. 126cb15d5d9SPeter Rival * Shift and XOR multiple times to capture the good bits across the range of 127cb15d5d9SPeter Rival * possible page sizes. 128cb15d5d9SPeter Rival */ 129cb15d5d9SPeter Rival #define PAGE_LLOCK_HASH(pp) \ 130cb15d5d9SPeter Rival (((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 131cb15d5d9SPeter Rival ((uintptr_t)(pp) >> (PAGE_LLOCK_SHIFT + PP_SHIFT))) ^ \ 132cb15d5d9SPeter Rival ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 2) + PP_SHIFT)) ^ \ 133cb15d5d9SPeter Rival ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 3) + PP_SHIFT))) & \ 134cb15d5d9SPeter Rival (PAGE_LLOCK_SIZE - 1)) 135cb15d5d9SPeter Rival 136cb15d5d9SPeter Rival #define page_struct_lock(pp) \ 137cb15d5d9SPeter Rival mutex_enter(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex) 138cb15d5d9SPeter Rival #define page_struct_unlock(pp) \ 139cb15d5d9SPeter Rival mutex_exit(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex) 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate #endif /* _KERNEL */ 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate struct as; 1467c478bd9Sstevel@tonic-gate 1477c478bd9Sstevel@tonic-gate /* 1487c478bd9Sstevel@tonic-gate * Each physical page has a page structure, which is used to maintain 1497c478bd9Sstevel@tonic-gate * these pages as a cache. A page can be found via a hashed lookup 1507c478bd9Sstevel@tonic-gate * based on the [vp, offset]. If a page has an [vp, offset] identity, 1517c478bd9Sstevel@tonic-gate * then it is entered on a doubly linked circular list off the 1527c478bd9Sstevel@tonic-gate * vnode using the vpnext/vpprev pointers. If the p_free bit 1537c478bd9Sstevel@tonic-gate * is on, then the page is also on a doubly linked circular free 1547c478bd9Sstevel@tonic-gate * list using next/prev pointers. If the "p_selock" and "p_iolock" 1557c478bd9Sstevel@tonic-gate * are held, then the page is currently being read in (exclusive p_selock) 1567c478bd9Sstevel@tonic-gate * or written back (shared p_selock). In this case, the next/prev pointers 1577c478bd9Sstevel@tonic-gate * are used to link the pages together for a consecutive i/o request. If 1587c478bd9Sstevel@tonic-gate * the page is being brought in from its backing store, then other processes 1597c478bd9Sstevel@tonic-gate * will wait for the i/o to complete before attaching to the page since it 1607c478bd9Sstevel@tonic-gate * will have an "exclusive" lock. 1617c478bd9Sstevel@tonic-gate * 1627c478bd9Sstevel@tonic-gate * Each page structure has the locks described below along with 1637c478bd9Sstevel@tonic-gate * the fields they protect: 1647c478bd9Sstevel@tonic-gate * 1657c478bd9Sstevel@tonic-gate * p_selock This is a per-page shared/exclusive lock that is 1667c478bd9Sstevel@tonic-gate * used to implement the logical shared/exclusive 1677c478bd9Sstevel@tonic-gate * lock for each page. The "shared" lock is normally 1687c478bd9Sstevel@tonic-gate * used in most cases while the "exclusive" lock is 1697c478bd9Sstevel@tonic-gate * required to destroy or retain exclusive access to 1707c478bd9Sstevel@tonic-gate * a page (e.g., while reading in pages). The appropriate 1717c478bd9Sstevel@tonic-gate * lock is always held whenever there is any reference 1727c478bd9Sstevel@tonic-gate * to a page structure (e.g., during i/o). 1737c478bd9Sstevel@tonic-gate * (Note that with the addition of the "writer-lock-wanted" 1747c478bd9Sstevel@tonic-gate * semantics (via SE_EWANTED), threads must not acquire 1757c478bd9Sstevel@tonic-gate * multiple reader locks or else a deadly embrace will 1767c478bd9Sstevel@tonic-gate * occur in the following situation: thread 1 obtains a 1777c478bd9Sstevel@tonic-gate * reader lock; next thread 2 fails to get a writer lock 1787c478bd9Sstevel@tonic-gate * but specified SE_EWANTED so it will wait by either 1797c478bd9Sstevel@tonic-gate * blocking (when using page_lock_es) or spinning while 1807c478bd9Sstevel@tonic-gate * retrying (when using page_try_reclaim_lock) until the 1817c478bd9Sstevel@tonic-gate * reader lock is released; then thread 1 attempts to 1827c478bd9Sstevel@tonic-gate * get another reader lock but is denied due to 1837c478bd9Sstevel@tonic-gate * SE_EWANTED being set, and now both threads are in a 1847c478bd9Sstevel@tonic-gate * deadly embrace.) 1857c478bd9Sstevel@tonic-gate * 1867c478bd9Sstevel@tonic-gate * p_hash 1877c478bd9Sstevel@tonic-gate * p_vnode 1887c478bd9Sstevel@tonic-gate * p_offset 1897c478bd9Sstevel@tonic-gate * 1907c478bd9Sstevel@tonic-gate * p_free 1917c478bd9Sstevel@tonic-gate * p_age 1927c478bd9Sstevel@tonic-gate * 1937c478bd9Sstevel@tonic-gate * p_iolock This is a binary semaphore lock that provides 1947c478bd9Sstevel@tonic-gate * exclusive access to the i/o list links in each 1957c478bd9Sstevel@tonic-gate * page structure. It is always held while the page 1967c478bd9Sstevel@tonic-gate * is on an i/o list (i.e., involved in i/o). That is, 1977c478bd9Sstevel@tonic-gate * even though a page may be only `shared' locked 1987c478bd9Sstevel@tonic-gate * while it is doing a write, the following fields may 1997c478bd9Sstevel@tonic-gate * change anyway. Normally, the page must be 2007c478bd9Sstevel@tonic-gate * `exclusively' locked to change anything in it. 2017c478bd9Sstevel@tonic-gate * 2027c478bd9Sstevel@tonic-gate * p_next 2037c478bd9Sstevel@tonic-gate * p_prev 2047c478bd9Sstevel@tonic-gate * 205cb15d5d9SPeter Rival * The following fields are protected by the global page_llocks[]: 2067c478bd9Sstevel@tonic-gate * 2077c478bd9Sstevel@tonic-gate * p_lckcnt 2087c478bd9Sstevel@tonic-gate * p_cowcnt 2097c478bd9Sstevel@tonic-gate * 2107c478bd9Sstevel@tonic-gate * The following lists are protected by the global page_freelock: 2117c478bd9Sstevel@tonic-gate * 2127c478bd9Sstevel@tonic-gate * page_cachelist 2137c478bd9Sstevel@tonic-gate * page_freelist 2147c478bd9Sstevel@tonic-gate * 2157c478bd9Sstevel@tonic-gate * The following, for our purposes, are protected by 2167c478bd9Sstevel@tonic-gate * the global freemem_lock: 2177c478bd9Sstevel@tonic-gate * 2187c478bd9Sstevel@tonic-gate * freemem 2197c478bd9Sstevel@tonic-gate * freemem_wait 2207c478bd9Sstevel@tonic-gate * freemem_cv 2217c478bd9Sstevel@tonic-gate * 2227c478bd9Sstevel@tonic-gate * The following fields are protected by hat layer lock(s). When a page 2237c478bd9Sstevel@tonic-gate * structure is not mapped and is not associated with a vnode (after a call 2247c478bd9Sstevel@tonic-gate * to page_hashout() for example) the p_nrm field may be modified with out 2257c478bd9Sstevel@tonic-gate * holding the hat layer lock: 2267c478bd9Sstevel@tonic-gate * 2277c478bd9Sstevel@tonic-gate * p_nrm 2287c478bd9Sstevel@tonic-gate * p_mapping 2297c478bd9Sstevel@tonic-gate * p_share 2307c478bd9Sstevel@tonic-gate * 2317c478bd9Sstevel@tonic-gate * The following field is file system dependent. How it is used and 2327c478bd9Sstevel@tonic-gate * the locking strategies applied are up to the individual file system 2337c478bd9Sstevel@tonic-gate * implementation. 2347c478bd9Sstevel@tonic-gate * 2357c478bd9Sstevel@tonic-gate * p_fsdata 2367c478bd9Sstevel@tonic-gate * 2377c478bd9Sstevel@tonic-gate * The page structure is used to represent and control the system's 2387c478bd9Sstevel@tonic-gate * physical pages. There is one instance of the structure for each 2397c478bd9Sstevel@tonic-gate * page that is not permenately allocated. For example, the pages that 2407c478bd9Sstevel@tonic-gate * hold the page structures are permanently held by the kernel 2417c478bd9Sstevel@tonic-gate * and hence do not need page structures to track them. The array 2427c478bd9Sstevel@tonic-gate * of page structures is allocated early on in the kernel's life and 2437c478bd9Sstevel@tonic-gate * is based on the amount of available physical memory. 2447c478bd9Sstevel@tonic-gate * 2457c478bd9Sstevel@tonic-gate * Each page structure may simultaneously appear on several linked lists. 2467c478bd9Sstevel@tonic-gate * The lists are: hash list, free or in i/o list, and a vnode's page list. 2477c478bd9Sstevel@tonic-gate * Each type of list is protected by a different group of mutexes as described 2487c478bd9Sstevel@tonic-gate * below: 2497c478bd9Sstevel@tonic-gate * 2507c478bd9Sstevel@tonic-gate * The hash list is used to quickly find a page when the page's vnode and 2517c478bd9Sstevel@tonic-gate * offset within the vnode are known. Each page that is hashed is 2527c478bd9Sstevel@tonic-gate * connected via the `p_hash' field. The anchor for each hash is in the 2537c478bd9Sstevel@tonic-gate * array `page_hash'. An array of mutexes, `ph_mutex', protects the 2547c478bd9Sstevel@tonic-gate * lists anchored by page_hash[]. To either search or modify a given hash 2557c478bd9Sstevel@tonic-gate * list, the appropriate mutex in the ph_mutex array must be held. 2567c478bd9Sstevel@tonic-gate * 2577c478bd9Sstevel@tonic-gate * The free list contains pages that are `free to be given away'. For 2587c478bd9Sstevel@tonic-gate * efficiency reasons, pages on this list are placed in two catagories: 2597c478bd9Sstevel@tonic-gate * pages that are still associated with a vnode, and pages that are not 2607c478bd9Sstevel@tonic-gate * associated with a vnode. Free pages always have their `p_free' bit set, 2617c478bd9Sstevel@tonic-gate * free pages that are still associated with a vnode also have their 2627c478bd9Sstevel@tonic-gate * `p_age' bit set. Pages on the free list are connected via their 2637c478bd9Sstevel@tonic-gate * `p_next' and `p_prev' fields. When a page is involved in some sort 2647c478bd9Sstevel@tonic-gate * of i/o, it is not free and these fields may be used to link associated 2657c478bd9Sstevel@tonic-gate * pages together. At the moment, the free list is protected by a 2667c478bd9Sstevel@tonic-gate * single mutex `page_freelock'. The list of free pages still associated 2677c478bd9Sstevel@tonic-gate * with a vnode is anchored by `page_cachelist' while other free pages 2687c478bd9Sstevel@tonic-gate * are anchored in architecture dependent ways (to handle page coloring etc.). 2697c478bd9Sstevel@tonic-gate * 2707c478bd9Sstevel@tonic-gate * Pages associated with a given vnode appear on a list anchored in the 2717c478bd9Sstevel@tonic-gate * vnode by the `v_pages' field. They are linked together with 2727c478bd9Sstevel@tonic-gate * `p_vpnext' and `p_vpprev'. The field `p_offset' contains a page's 2737c478bd9Sstevel@tonic-gate * offset within the vnode. The pages on this list are not kept in 2747c478bd9Sstevel@tonic-gate * offset order. These lists, in a manner similar to the hash lists, 2757c478bd9Sstevel@tonic-gate * are protected by an array of mutexes called `vph_hash'. Before 2767c478bd9Sstevel@tonic-gate * searching or modifying this chain the appropriate mutex in the 2777c478bd9Sstevel@tonic-gate * vph_hash[] array must be held. 2787c478bd9Sstevel@tonic-gate * 2797c478bd9Sstevel@tonic-gate * Again, each of the lists that a page can appear on is protected by a 2807c478bd9Sstevel@tonic-gate * mutex. Before reading or writing any of the fields comprising the 2817c478bd9Sstevel@tonic-gate * list, the appropriate lock must be held. These list locks should only 2827c478bd9Sstevel@tonic-gate * be held for very short intervals. 2837c478bd9Sstevel@tonic-gate * 2847c478bd9Sstevel@tonic-gate * In addition to the list locks, each page structure contains a 2857c478bd9Sstevel@tonic-gate * shared/exclusive lock that protects various fields within it. 2867c478bd9Sstevel@tonic-gate * To modify one of these fields, the `p_selock' must be exclusively held. 2877c478bd9Sstevel@tonic-gate * To read a field with a degree of certainty, the lock must be at least 2887c478bd9Sstevel@tonic-gate * held shared. 2897c478bd9Sstevel@tonic-gate * 2907c478bd9Sstevel@tonic-gate * Removing a page structure from one of the lists requires holding 2917c478bd9Sstevel@tonic-gate * the appropriate list lock and the page's p_selock. A page may be 2927c478bd9Sstevel@tonic-gate * prevented from changing identity, being freed, or otherwise modified 2937c478bd9Sstevel@tonic-gate * by acquiring p_selock shared. 2947c478bd9Sstevel@tonic-gate * 2957c478bd9Sstevel@tonic-gate * To avoid deadlocks, a strict locking protocol must be followed. Basically 2967c478bd9Sstevel@tonic-gate * there are two cases: In the first case, the page structure in question 2977c478bd9Sstevel@tonic-gate * is known ahead of time (e.g., when the page is to be added or removed 2987c478bd9Sstevel@tonic-gate * from a list). In the second case, the page structure is not known and 2997c478bd9Sstevel@tonic-gate * must be found by searching one of the lists. 3007c478bd9Sstevel@tonic-gate * 3017c478bd9Sstevel@tonic-gate * When adding or removing a known page to one of the lists, first the 3027c478bd9Sstevel@tonic-gate * page must be exclusively locked (since at least one of its fields 3037c478bd9Sstevel@tonic-gate * will be modified), second the lock protecting the list must be acquired, 3047c478bd9Sstevel@tonic-gate * third the page inserted or deleted, and finally the list lock dropped. 3057c478bd9Sstevel@tonic-gate * 3067c478bd9Sstevel@tonic-gate * The more interesting case occures when the particular page structure 3077c478bd9Sstevel@tonic-gate * is not known ahead of time. For example, when a call is made to 3087c478bd9Sstevel@tonic-gate * page_lookup(), it is not known if a page with the desired (vnode and 3097c478bd9Sstevel@tonic-gate * offset pair) identity exists. So the appropriate mutex in ph_mutex is 3107c478bd9Sstevel@tonic-gate * acquired, the hash list searched, and if the desired page is found 3117c478bd9Sstevel@tonic-gate * an attempt is made to lock it. The attempt to acquire p_selock must 3127c478bd9Sstevel@tonic-gate * not block while the hash list lock is held. A deadlock could occure 3137c478bd9Sstevel@tonic-gate * if some other process was trying to remove the page from the list. 3147c478bd9Sstevel@tonic-gate * The removing process (following the above protocol) would have exclusively 3157c478bd9Sstevel@tonic-gate * locked the page, and be spinning waiting to acquire the lock protecting 3167c478bd9Sstevel@tonic-gate * the hash list. Since the searching process holds the hash list lock 3177c478bd9Sstevel@tonic-gate * and is waiting to acquire the page lock, a deadlock occurs. 3187c478bd9Sstevel@tonic-gate * 3197c478bd9Sstevel@tonic-gate * The proper scheme to follow is: first, lock the appropriate list, 3207c478bd9Sstevel@tonic-gate * search the list, and if the desired page is found either use 3217c478bd9Sstevel@tonic-gate * page_trylock() (which will not block) or pass the address of the 3227c478bd9Sstevel@tonic-gate * list lock to page_lock(). If page_lock() can not acquire the page's 3237c478bd9Sstevel@tonic-gate * lock, it will drop the list lock before going to sleep. page_lock() 3247c478bd9Sstevel@tonic-gate * returns a value to indicate if the list lock was dropped allowing the 3257c478bd9Sstevel@tonic-gate * calling program to react appropriately (i.e., retry the operation). 3267c478bd9Sstevel@tonic-gate * 3277c478bd9Sstevel@tonic-gate * If the list lock was dropped before the attempt at locking the page 3287c478bd9Sstevel@tonic-gate * was made, checks would have to be made to ensure that the page had 3297c478bd9Sstevel@tonic-gate * not changed identity before its lock was obtained. This is because 3307c478bd9Sstevel@tonic-gate * the interval between dropping the list lock and acquiring the page 3317c478bd9Sstevel@tonic-gate * lock is indeterminate. 3327c478bd9Sstevel@tonic-gate * 3337c478bd9Sstevel@tonic-gate * In addition, when both a hash list lock (ph_mutex[]) and a vnode list 3347c478bd9Sstevel@tonic-gate * lock (vph_mutex[]) are needed, the hash list lock must be acquired first. 3357c478bd9Sstevel@tonic-gate * The routine page_hashin() is a good example of this sequence. 3367c478bd9Sstevel@tonic-gate * This sequence is ASSERTed by checking that the vph_mutex[] is not held 3377c478bd9Sstevel@tonic-gate * just before each acquisition of one of the mutexs in ph_mutex[]. 3387c478bd9Sstevel@tonic-gate * 3397c478bd9Sstevel@tonic-gate * So, as a quick summary: 3407c478bd9Sstevel@tonic-gate * 3417c478bd9Sstevel@tonic-gate * pse_mutex[]'s protect the p_selock and p_cv fields. 3427c478bd9Sstevel@tonic-gate * 3437c478bd9Sstevel@tonic-gate * p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash, 3447c478bd9Sstevel@tonic-gate * 3457c478bd9Sstevel@tonic-gate * ph_mutex[]'s protect the page_hash[] array and its chains. 3467c478bd9Sstevel@tonic-gate * 3477c478bd9Sstevel@tonic-gate * vph_mutex[]'s protect the v_pages field and the vp page chains. 3487c478bd9Sstevel@tonic-gate * 3497c478bd9Sstevel@tonic-gate * First lock the page, then the hash chain, then the vnode chain. When 3507c478bd9Sstevel@tonic-gate * this is not possible `trylocks' must be used. Sleeping while holding 3517c478bd9Sstevel@tonic-gate * any of these mutexes (p_selock is not a mutex) is not allowed. 3527c478bd9Sstevel@tonic-gate * 3537c478bd9Sstevel@tonic-gate * 3547c478bd9Sstevel@tonic-gate * field reading writing ordering 3557c478bd9Sstevel@tonic-gate * ====================================================================== 3567c478bd9Sstevel@tonic-gate * p_vnode p_selock(E,S) p_selock(E) 3577c478bd9Sstevel@tonic-gate * p_offset 3587c478bd9Sstevel@tonic-gate * p_free 3597c478bd9Sstevel@tonic-gate * p_age 3607c478bd9Sstevel@tonic-gate * ===================================================================== 3617c478bd9Sstevel@tonic-gate * p_hash p_selock(E,S) p_selock(E) && p_selock, ph_mutex 3627c478bd9Sstevel@tonic-gate * ph_mutex[] 3637c478bd9Sstevel@tonic-gate * ===================================================================== 3647c478bd9Sstevel@tonic-gate * p_vpnext p_selock(E,S) p_selock(E) && p_selock, vph_mutex 3657c478bd9Sstevel@tonic-gate * p_vpprev vph_mutex[] 3667c478bd9Sstevel@tonic-gate * ===================================================================== 3677c478bd9Sstevel@tonic-gate * When the p_free bit is set: 3687c478bd9Sstevel@tonic-gate * 3697c478bd9Sstevel@tonic-gate * p_next p_selock(E,S) p_selock(E) && p_selock, 3707c478bd9Sstevel@tonic-gate * p_prev page_freelock page_freelock 3717c478bd9Sstevel@tonic-gate * 3727c478bd9Sstevel@tonic-gate * When the p_free bit is not set: 3737c478bd9Sstevel@tonic-gate * 3747c478bd9Sstevel@tonic-gate * p_next p_selock(E,S) p_selock(E) && p_selock, p_iolock 3757c478bd9Sstevel@tonic-gate * p_prev p_iolock 3767c478bd9Sstevel@tonic-gate * ===================================================================== 3777c478bd9Sstevel@tonic-gate * p_selock pse_mutex[] pse_mutex[] can`t acquire any 3787c478bd9Sstevel@tonic-gate * p_cv other mutexes or 3797c478bd9Sstevel@tonic-gate * sleep while holding 3807c478bd9Sstevel@tonic-gate * this lock. 3817c478bd9Sstevel@tonic-gate * ===================================================================== 382cb15d5d9SPeter Rival * p_lckcnt p_selock(E,S) p_selock(E) 383cb15d5d9SPeter Rival * OR 384cb15d5d9SPeter Rival * p_selock(S) && 385cb15d5d9SPeter Rival * page_llocks[] 386cb15d5d9SPeter Rival * p_cowcnt 3877c478bd9Sstevel@tonic-gate * ===================================================================== 3887c478bd9Sstevel@tonic-gate * p_nrm hat layer lock hat layer lock 3897c478bd9Sstevel@tonic-gate * p_mapping 3907c478bd9Sstevel@tonic-gate * p_pagenum 3917c478bd9Sstevel@tonic-gate * ===================================================================== 3927c478bd9Sstevel@tonic-gate * 3937c478bd9Sstevel@tonic-gate * where: 3947c478bd9Sstevel@tonic-gate * E----> exclusive version of p_selock. 3957c478bd9Sstevel@tonic-gate * S----> shared version of p_selock. 3967c478bd9Sstevel@tonic-gate * 3977c478bd9Sstevel@tonic-gate * 3987c478bd9Sstevel@tonic-gate * Global data structures and variable: 3997c478bd9Sstevel@tonic-gate * 4007c478bd9Sstevel@tonic-gate * field reading writing ordering 4017c478bd9Sstevel@tonic-gate * ===================================================================== 4027c478bd9Sstevel@tonic-gate * page_hash[] ph_mutex[] ph_mutex[] can hold this lock 4037c478bd9Sstevel@tonic-gate * before acquiring 4047c478bd9Sstevel@tonic-gate * a vph_mutex or 4057c478bd9Sstevel@tonic-gate * pse_mutex. 4067c478bd9Sstevel@tonic-gate * ===================================================================== 4077c478bd9Sstevel@tonic-gate * vp->v_pages vph_mutex[] vph_mutex[] can only acquire 4087c478bd9Sstevel@tonic-gate * a pse_mutex while 4097c478bd9Sstevel@tonic-gate * holding this lock. 4107c478bd9Sstevel@tonic-gate * ===================================================================== 4117c478bd9Sstevel@tonic-gate * page_cachelist page_freelock page_freelock can't acquire any 4127c478bd9Sstevel@tonic-gate * page_freelist page_freelock page_freelock 4137c478bd9Sstevel@tonic-gate * ===================================================================== 4147c478bd9Sstevel@tonic-gate * freemem freemem_lock freemem_lock can't acquire any 4157c478bd9Sstevel@tonic-gate * freemem_wait other mutexes while 4167c478bd9Sstevel@tonic-gate * freemem_cv holding this mutex. 4177c478bd9Sstevel@tonic-gate * ===================================================================== 4187c478bd9Sstevel@tonic-gate * 4197c478bd9Sstevel@tonic-gate * Page relocation, PG_NORELOC and P_NORELOC. 4207c478bd9Sstevel@tonic-gate * 4217c478bd9Sstevel@tonic-gate * Pages may be relocated using the page_relocate() interface. Relocation 4227c478bd9Sstevel@tonic-gate * involves moving the contents and identity of a page to another, free page. 4237c478bd9Sstevel@tonic-gate * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent 4247c478bd9Sstevel@tonic-gate * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL 4257c478bd9Sstevel@tonic-gate * lock must not be held indefinitely). If the page is going to be held 4267c478bd9Sstevel@tonic-gate * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed 4277c478bd9Sstevel@tonic-gate * to page_create_va so that pages that are prevented from being relocated 4287c478bd9Sstevel@tonic-gate * can be managed differently by the platform specific layer. 4297c478bd9Sstevel@tonic-gate * 4307c478bd9Sstevel@tonic-gate * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0) 4317c478bd9Sstevel@tonic-gate * are guaranteed to be held in memory, but can still be relocated 4327c478bd9Sstevel@tonic-gate * providing the SE_EXCL lock can be obtained. 4337c478bd9Sstevel@tonic-gate * 4347c478bd9Sstevel@tonic-gate * The P_NORELOC bit in the page_t.p_state field is provided for use by 4357c478bd9Sstevel@tonic-gate * the platform specific code in managing pages when the PG_NORELOC 4367c478bd9Sstevel@tonic-gate * hint is used. 4377c478bd9Sstevel@tonic-gate * 4387c478bd9Sstevel@tonic-gate * Memory delete and page locking. 4397c478bd9Sstevel@tonic-gate * 4407c478bd9Sstevel@tonic-gate * The set of all usable pages is managed using the global page list as 4417c478bd9Sstevel@tonic-gate * implemented by the memseg structure defined below. When memory is added 4427c478bd9Sstevel@tonic-gate * or deleted this list changes. Additions to this list guarantee that the 4437c478bd9Sstevel@tonic-gate * list is never corrupt. In order to avoid the necessity of an additional 4447c478bd9Sstevel@tonic-gate * lock to protect against failed accesses to the memseg being deleted and, 4457c478bd9Sstevel@tonic-gate * more importantly, the page_ts, the memseg structure is never freed and the 4467c478bd9Sstevel@tonic-gate * page_t virtual address space is remapped to a page (or pages) of 4477c478bd9Sstevel@tonic-gate * zeros. If a page_t is manipulated while it is p_selock'd, or if it is 4487c478bd9Sstevel@tonic-gate * locked indirectly via a hash or freelist lock, it is not possible for 4497c478bd9Sstevel@tonic-gate * memory delete to collect the page and so that part of the page list is 4507c478bd9Sstevel@tonic-gate * prevented from being deleted. If the page is referenced outside of one 4517c478bd9Sstevel@tonic-gate * of these locks, it is possible for the page_t being referenced to be 4527c478bd9Sstevel@tonic-gate * deleted. Examples of this are page_t pointers returned by 4537c478bd9Sstevel@tonic-gate * page_numtopp_nolock, page_first and page_next. Providing the page_t 4547c478bd9Sstevel@tonic-gate * is re-checked after taking the p_selock (for p_vnode != NULL), the 4557c478bd9Sstevel@tonic-gate * remapping to the zero pages will be detected. 4567c478bd9Sstevel@tonic-gate * 4577c478bd9Sstevel@tonic-gate * 4587c478bd9Sstevel@tonic-gate * Page size (p_szc field) and page locking. 4597c478bd9Sstevel@tonic-gate * 4607c478bd9Sstevel@tonic-gate * p_szc field of free pages is changed by free list manager under freelist 4617c478bd9Sstevel@tonic-gate * locks and is of no concern to the rest of VM subsystem. 4627c478bd9Sstevel@tonic-gate * 4637c478bd9Sstevel@tonic-gate * p_szc changes of allocated anonymous (swapfs) can only be done only after 4647c478bd9Sstevel@tonic-gate * exclusively locking all constituent pages and calling hat_pageunload() on 4657c478bd9Sstevel@tonic-gate * each of them. To prevent p_szc changes of non free anonymous (swapfs) large 4667c478bd9Sstevel@tonic-gate * pages it's enough to either lock SHARED any of constituent pages or prevent 4677c478bd9Sstevel@tonic-gate * hat_pageunload() by holding hat level lock that protects mapping lists (this 4687c478bd9Sstevel@tonic-gate * method is for hat code only) 4697c478bd9Sstevel@tonic-gate * 4707c478bd9Sstevel@tonic-gate * To increase (promote) p_szc of allocated non anonymous file system pages 4717c478bd9Sstevel@tonic-gate * one has to first lock exclusively all involved constituent pages and call 4727c478bd9Sstevel@tonic-gate * hat_pageunload() on each of them. To prevent p_szc promote it's enough to 4737c478bd9Sstevel@tonic-gate * either lock SHARED any of constituent pages that will be needed to make a 4747c478bd9Sstevel@tonic-gate * large page or prevent hat_pageunload() by holding hat level lock that 4757c478bd9Sstevel@tonic-gate * protects mapping lists (this method is for hat code only). 4767c478bd9Sstevel@tonic-gate * 4777c478bd9Sstevel@tonic-gate * To decrease (demote) p_szc of an allocated non anonymous file system large 4787c478bd9Sstevel@tonic-gate * page one can either use the same method as used for changeing p_szc of 4797c478bd9Sstevel@tonic-gate * anonymous large pages or if it's not possible to lock all constituent pages 4807c478bd9Sstevel@tonic-gate * exclusively a different method can be used. In the second method one only 4817c478bd9Sstevel@tonic-gate * has to exclusively lock one of constituent pages but then one has to 4827c478bd9Sstevel@tonic-gate * acquire further locks by calling page_szc_lock() and 4837c478bd9Sstevel@tonic-gate * hat_page_demote(). hat_page_demote() acquires hat level locks and then 4847c478bd9Sstevel@tonic-gate * demotes the page. This mechanism relies on the fact that any code that 4857c478bd9Sstevel@tonic-gate * needs to prevent p_szc of a file system large page from changeing either 4867c478bd9Sstevel@tonic-gate * locks all constituent large pages at least SHARED or locks some pages at 4877c478bd9Sstevel@tonic-gate * least SHARED and calls page_szc_lock() or uses hat level page locks. 4887c478bd9Sstevel@tonic-gate * Demotion using this method is implemented by page_demote_vp_pages(). 4897c478bd9Sstevel@tonic-gate * Please see comments in front of page_demote_vp_pages(), hat_page_demote() 4907c478bd9Sstevel@tonic-gate * and page_szc_lock() for more details. 4917c478bd9Sstevel@tonic-gate * 4927c478bd9Sstevel@tonic-gate * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist, 4937c478bd9Sstevel@tonic-gate * hat level locks. 4947c478bd9Sstevel@tonic-gate */ 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate typedef struct page { 4977c478bd9Sstevel@tonic-gate u_offset_t p_offset; /* offset into vnode for this page */ 4987c478bd9Sstevel@tonic-gate struct vnode *p_vnode; /* vnode that this page is named by */ 4997c478bd9Sstevel@tonic-gate selock_t p_selock; /* shared/exclusive lock on the page */ 5007c478bd9Sstevel@tonic-gate #if defined(_LP64) 501a5652762Spraks uint_t p_vpmref; /* vpm ref - index of the vpmap_t */ 5027c478bd9Sstevel@tonic-gate #endif 5037c478bd9Sstevel@tonic-gate struct page *p_hash; /* hash by [vnode, offset] */ 5047c478bd9Sstevel@tonic-gate struct page *p_vpnext; /* next page in vnode list */ 5057c478bd9Sstevel@tonic-gate struct page *p_vpprev; /* prev page in vnode list */ 5067c478bd9Sstevel@tonic-gate struct page *p_next; /* next page in free/intrans lists */ 5077c478bd9Sstevel@tonic-gate struct page *p_prev; /* prev page in free/intrans lists */ 5087c478bd9Sstevel@tonic-gate ushort_t p_lckcnt; /* number of locks on page data */ 5097c478bd9Sstevel@tonic-gate ushort_t p_cowcnt; /* number of copy on write lock */ 5107c478bd9Sstevel@tonic-gate kcondvar_t p_cv; /* page struct's condition var */ 5117c478bd9Sstevel@tonic-gate kcondvar_t p_io_cv; /* for iolock */ 5127c478bd9Sstevel@tonic-gate uchar_t p_iolock_state; /* replaces p_iolock */ 5137c478bd9Sstevel@tonic-gate volatile uchar_t p_szc; /* page size code */ 5147c478bd9Sstevel@tonic-gate uchar_t p_fsdata; /* file system dependent byte */ 5157c478bd9Sstevel@tonic-gate uchar_t p_state; /* p_free, p_noreloc */ 5167c478bd9Sstevel@tonic-gate uchar_t p_nrm; /* non-cache, ref, mod readonly bits */ 5177c478bd9Sstevel@tonic-gate #if defined(__sparc) 5187c478bd9Sstevel@tonic-gate uchar_t p_vcolor; /* virtual color */ 5197c478bd9Sstevel@tonic-gate #else 5207c478bd9Sstevel@tonic-gate uchar_t p_embed; /* x86 - changes p_mapping & p_index */ 5217c478bd9Sstevel@tonic-gate #endif 5227c478bd9Sstevel@tonic-gate uchar_t p_index; /* MPSS mapping info. Not used on x86 */ 5237c478bd9Sstevel@tonic-gate uchar_t p_toxic; /* page has an unrecoverable error */ 5247c478bd9Sstevel@tonic-gate void *p_mapping; /* hat specific translation info */ 5257c478bd9Sstevel@tonic-gate pfn_t p_pagenum; /* physical page number */ 5267c478bd9Sstevel@tonic-gate 5277c478bd9Sstevel@tonic-gate uint_t p_share; /* number of translations */ 5287c478bd9Sstevel@tonic-gate #if defined(_LP64) 5297c478bd9Sstevel@tonic-gate uint_t p_sharepad; /* pad for growing p_share */ 5307c478bd9Sstevel@tonic-gate #endif 53107b65a64Saguzovsk uint_t p_slckcnt; /* number of softlocks */ 5327c478bd9Sstevel@tonic-gate #if defined(__sparc) 5337c478bd9Sstevel@tonic-gate uint_t p_kpmref; /* number of kpm mapping sharers */ 5347c478bd9Sstevel@tonic-gate struct kpme *p_kpmelist; /* kpm specific mapping info */ 5357c478bd9Sstevel@tonic-gate #else 5367c478bd9Sstevel@tonic-gate /* index of entry in p_map when p_embed is set */ 5377c478bd9Sstevel@tonic-gate uint_t p_mlentry; 5387c478bd9Sstevel@tonic-gate #endif 539a5652762Spraks #if defined(_LP64) 540a5652762Spraks kmutex_t p_ilock; /* protects p_vpmref */ 541a5652762Spraks #else 5427c478bd9Sstevel@tonic-gate uint64_t p_msresv_2; /* page allocation debugging */ 543a5652762Spraks #endif 5447c478bd9Sstevel@tonic-gate } page_t; 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate typedef page_t devpage_t; 5487c478bd9Sstevel@tonic-gate #define devpage page 5497c478bd9Sstevel@tonic-gate 55007b65a64Saguzovsk #define PAGE_LOCK_MAXIMUM \ 55107b65a64Saguzovsk ((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1) 55207b65a64Saguzovsk 55307b65a64Saguzovsk #define PAGE_SLOCK_MAXIMUM UINT_MAX 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* 5567c478bd9Sstevel@tonic-gate * Page hash table is a power-of-two in size, externally chained 5577c478bd9Sstevel@tonic-gate * through the hash field. PAGE_HASHAVELEN is the average length 5587c478bd9Sstevel@tonic-gate * desired for this chain, from which the size of the page_hash 5597c478bd9Sstevel@tonic-gate * table is derived at boot time and stored in the kernel variable 5607c478bd9Sstevel@tonic-gate * page_hashsz. In the hash function it is given by PAGE_HASHSZ. 5617c478bd9Sstevel@tonic-gate * 5627c478bd9Sstevel@tonic-gate * PAGE_HASH_FUNC returns an index into the page_hash[] array. This 5637c478bd9Sstevel@tonic-gate * index is also used to derive the mutex that protects the chain. 5647c478bd9Sstevel@tonic-gate * 5657c478bd9Sstevel@tonic-gate * In constructing the hash function, first we dispose of unimportant bits 5667c478bd9Sstevel@tonic-gate * (page offset from "off" and the low 3 bits of "vp" which are zero for 5677c478bd9Sstevel@tonic-gate * struct alignment). Then shift and sum the remaining bits a couple times 5687c478bd9Sstevel@tonic-gate * in order to get as many source bits from the two source values into the 5697c478bd9Sstevel@tonic-gate * resulting hashed value. Note that this will perform quickly, since the 5707c478bd9Sstevel@tonic-gate * shifting/summing are fast register to register operations with no additional 5717c478bd9Sstevel@tonic-gate * memory references). 572cb15d5d9SPeter Rival * 573cb15d5d9SPeter Rival * PH_SHIFT_SIZE is the amount to use for the successive shifts in the hash 574cb15d5d9SPeter Rival * function below. The actual value is LOG2(PH_TABLE_SIZE), so that as many 575cb15d5d9SPeter Rival * bits as possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX(). 5767c478bd9Sstevel@tonic-gate */ 577ac52b00eSqiao #if defined(_LP64) 578ac52b00eSqiao 579ac52b00eSqiao #if NCPU < 4 580ac52b00eSqiao #define PH_TABLE_SIZE 128 581cb15d5d9SPeter Rival #define PH_SHIFT_SIZE 7 582ac52b00eSqiao #else 583cb15d5d9SPeter Rival #define PH_TABLE_SIZE (2 * NCPU_P2) 584cb15d5d9SPeter Rival #define PH_SHIFT_SIZE (NCPU_LOG2 + 1) 585ac52b00eSqiao #endif 586ac52b00eSqiao 587ac52b00eSqiao #else /* 32 bits */ 588ac52b00eSqiao 5897c478bd9Sstevel@tonic-gate #if NCPU < 4 5907c478bd9Sstevel@tonic-gate #define PH_TABLE_SIZE 16 591cb15d5d9SPeter Rival #define PH_SHIFT_SIZE 4 5927c478bd9Sstevel@tonic-gate #else 5937c478bd9Sstevel@tonic-gate #define PH_TABLE_SIZE 128 594cb15d5d9SPeter Rival #define PH_SHIFT_SIZE 7 5957c478bd9Sstevel@tonic-gate #endif 5967c478bd9Sstevel@tonic-gate 597ac52b00eSqiao #endif /* _LP64 */ 598ac52b00eSqiao 5997c478bd9Sstevel@tonic-gate /* 600cb15d5d9SPeter Rival * 601cb15d5d9SPeter Rival * We take care to get as much randomness as possible from both the vp and 602cb15d5d9SPeter Rival * the offset. Workloads can have few vnodes with many offsets, many vnodes 603cb15d5d9SPeter Rival * with few offsets or a moderate mix of both. This hash should perform 604cb15d5d9SPeter Rival * equally well for each of these possibilities and for all types of memory 605cb15d5d9SPeter Rival * allocations. 606cb15d5d9SPeter Rival * 607cb15d5d9SPeter Rival * vnodes representing files are created over a long period of time and 608cb15d5d9SPeter Rival * have good variation in the upper vp bits, and the right shifts below 609cb15d5d9SPeter Rival * capture these bits. However, swap vnodes are created quickly in a 610cb15d5d9SPeter Rival * narrow vp* range. Refer to comments at swap_alloc: vnum has exactly 611cb15d5d9SPeter Rival * AN_VPSHIFT bits, so the kmem_alloc'd vnode addresses have approximately 612cb15d5d9SPeter Rival * AN_VPSHIFT bits of variation above their VNODE_ALIGN low order 0 bits. 613cb15d5d9SPeter Rival * Spread swap vnodes widely in the hash table by XOR'ing a term with the 614cb15d5d9SPeter Rival * vp bits of variation left shifted to the top of the range. 6157c478bd9Sstevel@tonic-gate */ 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate #define PAGE_HASHSZ page_hashsz 6187c478bd9Sstevel@tonic-gate #define PAGE_HASHAVELEN 4 6197c478bd9Sstevel@tonic-gate #define PAGE_HASH_FUNC(vp, off) \ 620cb15d5d9SPeter Rival (((((uintptr_t)(off) >> PAGESHIFT) ^ \ 621cb15d5d9SPeter Rival ((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE))) ^ \ 622cb15d5d9SPeter Rival (((uintptr_t)(vp) >> 3) ^ \ 623cb15d5d9SPeter Rival ((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) ^ \ 624cb15d5d9SPeter Rival ((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE)) ^ \ 625cb15d5d9SPeter Rival ((uintptr_t)(vp) << \ 626cb15d5d9SPeter Rival (page_hashsz_shift - AN_VPSHIFT - VNODE_ALIGN_LOG2)))) & \ 6277c478bd9Sstevel@tonic-gate (PAGE_HASHSZ - 1)) 6287c478bd9Sstevel@tonic-gate #ifdef _KERNEL 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate /* 6317c478bd9Sstevel@tonic-gate * The page hash value is re-hashed to an index for the ph_mutex array. 6327c478bd9Sstevel@tonic-gate * 6337c478bd9Sstevel@tonic-gate * For 64 bit kernels, the mutex array is padded out to prevent false 6347c478bd9Sstevel@tonic-gate * sharing of cache sub-blocks (64 bytes) of adjacent mutexes. 6357c478bd9Sstevel@tonic-gate * 6367c478bd9Sstevel@tonic-gate * For 32 bit kernels, we don't want to waste kernel address space with 6377c478bd9Sstevel@tonic-gate * padding, so instead we rely on the hash function to introduce skew of 6387c478bd9Sstevel@tonic-gate * adjacent vnode/offset indexes (the left shift part of the hash function). 6397c478bd9Sstevel@tonic-gate * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different 6407c478bd9Sstevel@tonic-gate * 64 byte sub-block. 6417c478bd9Sstevel@tonic-gate */ 6427c478bd9Sstevel@tonic-gate extern pad_mutex_t ph_mutex[]; 6437c478bd9Sstevel@tonic-gate 6447c478bd9Sstevel@tonic-gate #define PAGE_HASH_MUTEX(x) \ 645cb15d5d9SPeter Rival &(ph_mutex[((x) ^ ((x) >> PH_SHIFT_SIZE) + ((x) << 3)) & \ 6467c478bd9Sstevel@tonic-gate (PH_TABLE_SIZE - 1)].pad_mutex) 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate /* 6497c478bd9Sstevel@tonic-gate * Flags used while creating pages. 6507c478bd9Sstevel@tonic-gate */ 6517c478bd9Sstevel@tonic-gate #define PG_EXCL 0x0001 65223a80de1SStan Studzinski #define PG_WAIT 0x0002 /* Blocking memory allocations */ 6537c478bd9Sstevel@tonic-gate #define PG_PHYSCONTIG 0x0004 /* NOT SUPPORTED */ 6547c478bd9Sstevel@tonic-gate #define PG_MATCH_COLOR 0x0008 /* SUPPORTED by free list routines */ 6557c478bd9Sstevel@tonic-gate #define PG_NORELOC 0x0010 /* Non-relocatable alloc hint. */ 6567c478bd9Sstevel@tonic-gate /* Page must be PP_ISNORELOC */ 6577c478bd9Sstevel@tonic-gate #define PG_PANIC 0x0020 /* system will panic if alloc fails */ 6587c478bd9Sstevel@tonic-gate #define PG_PUSHPAGE 0x0040 /* alloc may use reserve */ 6592cb27123Saguzovsk #define PG_LOCAL 0x0080 /* alloc from given lgrp only */ 66023a80de1SStan Studzinski #define PG_NORMALPRI 0x0100 /* PG_WAIT like priority, but */ 66123a80de1SStan Studzinski /* non-blocking */ 6627c478bd9Sstevel@tonic-gate /* 6637c478bd9Sstevel@tonic-gate * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL 6647c478bd9Sstevel@tonic-gate * access are given priority over all other waiting threads. 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate #define SE_EWANTED 0x40000000 6677c478bd9Sstevel@tonic-gate #define PAGE_LOCKED(pp) (((pp)->p_selock & ~SE_EWANTED) != 0) 6687c478bd9Sstevel@tonic-gate #define PAGE_SHARED(pp) (((pp)->p_selock & ~SE_EWANTED) > 0) 6697c478bd9Sstevel@tonic-gate #define PAGE_EXCL(pp) ((pp)->p_selock < 0) 6707c478bd9Sstevel@tonic-gate #define PAGE_LOCKED_SE(pp, se) \ 6717c478bd9Sstevel@tonic-gate ((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp)) 6727c478bd9Sstevel@tonic-gate 6737c478bd9Sstevel@tonic-gate extern long page_hashsz; 674cb15d5d9SPeter Rival extern unsigned int page_hashsz_shift; 6757c478bd9Sstevel@tonic-gate extern page_t **page_hash; 6767c478bd9Sstevel@tonic-gate 677cb15d5d9SPeter Rival extern pad_mutex_t page_llocks[]; /* page logical lock mutex */ 6787c478bd9Sstevel@tonic-gate extern kmutex_t freemem_lock; /* freemem lock */ 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate extern pgcnt_t total_pages; /* total pages in the system */ 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate /* 6837c478bd9Sstevel@tonic-gate * Variables controlling locking of physical memory. 6847c478bd9Sstevel@tonic-gate */ 6857c478bd9Sstevel@tonic-gate extern pgcnt_t pages_pp_maximum; /* tuning: lock + claim <= max */ 6867c478bd9Sstevel@tonic-gate extern void init_pages_pp_maximum(void); 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate struct lgrp; 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate /* page_list_{add,sub} flags */ 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate /* which list */ 6937c478bd9Sstevel@tonic-gate #define PG_FREE_LIST 0x0001 6947c478bd9Sstevel@tonic-gate #define PG_CACHE_LIST 0x0002 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate /* where on list */ 6977c478bd9Sstevel@tonic-gate #define PG_LIST_TAIL 0x0010 6987c478bd9Sstevel@tonic-gate #define PG_LIST_HEAD 0x0020 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate /* called from */ 7017c478bd9Sstevel@tonic-gate #define PG_LIST_ISINIT 0x1000 7027c478bd9Sstevel@tonic-gate 7037c478bd9Sstevel@tonic-gate /* 7047c478bd9Sstevel@tonic-gate * Page frame operations. 7057c478bd9Sstevel@tonic-gate */ 7067c478bd9Sstevel@tonic-gate page_t *page_lookup(struct vnode *, u_offset_t, se_t); 7077c478bd9Sstevel@tonic-gate page_t *page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *, 7087c478bd9Sstevel@tonic-gate spgcnt_t *, int); 7097c478bd9Sstevel@tonic-gate page_t *page_lookup_nowait(struct vnode *, u_offset_t, se_t); 7107c478bd9Sstevel@tonic-gate page_t *page_find(struct vnode *, u_offset_t); 7117c478bd9Sstevel@tonic-gate page_t *page_exists(struct vnode *, u_offset_t); 7127c478bd9Sstevel@tonic-gate int page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]); 7137c478bd9Sstevel@tonic-gate int page_exists_forreal(struct vnode *, u_offset_t, uint_t *); 7147c478bd9Sstevel@tonic-gate void page_needfree(spgcnt_t); 7157c478bd9Sstevel@tonic-gate page_t *page_create(struct vnode *, u_offset_t, size_t, uint_t); 716e44bd21cSsusans int page_alloc_pages(struct vnode *, struct seg *, caddr_t, page_t **, 7172cb27123Saguzovsk page_t **, uint_t, int, int); 7187c478bd9Sstevel@tonic-gate page_t *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, 7197c478bd9Sstevel@tonic-gate uint_t flags, struct seg *seg, caddr_t vaddr, void *arg); 7207c478bd9Sstevel@tonic-gate page_t *page_create_va(struct vnode *, u_offset_t, size_t, uint_t, 7217c478bd9Sstevel@tonic-gate struct seg *, caddr_t); 72206fb6a36Sdv142724 int page_create_wait(pgcnt_t npages, uint_t flags); 72306fb6a36Sdv142724 void page_create_putback(spgcnt_t npages); 7247c478bd9Sstevel@tonic-gate void page_free(page_t *, int); 7257c478bd9Sstevel@tonic-gate void page_free_at_startup(page_t *); 7267c478bd9Sstevel@tonic-gate void page_free_pages(page_t *); 7277c478bd9Sstevel@tonic-gate void free_vp_pages(struct vnode *, u_offset_t, size_t); 7287c478bd9Sstevel@tonic-gate int page_reclaim(page_t *, kmutex_t *); 7298b464eb8Smec int page_reclaim_pages(page_t *, kmutex_t *, uint_t); 7307c478bd9Sstevel@tonic-gate void page_destroy(page_t *, int); 7317c478bd9Sstevel@tonic-gate void page_destroy_pages(page_t *); 7327c478bd9Sstevel@tonic-gate void page_destroy_free(page_t *); 7337c478bd9Sstevel@tonic-gate void page_rename(page_t *, struct vnode *, u_offset_t); 7347c478bd9Sstevel@tonic-gate int page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *); 7357c478bd9Sstevel@tonic-gate void page_hashout(page_t *, kmutex_t *); 7367c478bd9Sstevel@tonic-gate int page_num_hashin(pfn_t, struct vnode *, u_offset_t); 7377c478bd9Sstevel@tonic-gate void page_add(page_t **, page_t *); 7387c478bd9Sstevel@tonic-gate void page_add_common(page_t **, page_t *); 7397c478bd9Sstevel@tonic-gate void page_sub(page_t **, page_t *); 7407c478bd9Sstevel@tonic-gate void page_sub_common(page_t **, page_t *); 7417c478bd9Sstevel@tonic-gate page_t *page_get_freelist(struct vnode *, u_offset_t, struct seg *, 7427c478bd9Sstevel@tonic-gate caddr_t, size_t, uint_t, struct lgrp *); 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate page_t *page_get_cachelist(struct vnode *, u_offset_t, struct seg *, 7457c478bd9Sstevel@tonic-gate caddr_t, uint_t, struct lgrp *); 74678b03d3aSkchow #if defined(__i386) || defined(__amd64) 74778b03d3aSkchow int page_chk_freelist(uint_t); 74878b03d3aSkchow #endif 7497c478bd9Sstevel@tonic-gate void page_list_add(page_t *, int); 7507c478bd9Sstevel@tonic-gate void page_boot_demote(page_t *); 7517c478bd9Sstevel@tonic-gate void page_promote_size(page_t *, uint_t); 7527c478bd9Sstevel@tonic-gate void page_list_add_pages(page_t *, int); 7537c478bd9Sstevel@tonic-gate void page_list_sub(page_t *, int); 754db874c57Selowe void page_list_sub_pages(page_t *, uint_t); 755affbd3ccSkchow void page_list_xfer(page_t *, int, int); 7567c478bd9Sstevel@tonic-gate void page_list_break(page_t **, page_t **, size_t); 7577c478bd9Sstevel@tonic-gate void page_list_concat(page_t **, page_t **); 7587c478bd9Sstevel@tonic-gate void page_vpadd(page_t **, page_t *); 7597c478bd9Sstevel@tonic-gate void page_vpsub(page_t **, page_t *); 7607c478bd9Sstevel@tonic-gate int page_lock(page_t *, se_t, kmutex_t *, reclaim_t); 7617c478bd9Sstevel@tonic-gate int page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int); 7627c478bd9Sstevel@tonic-gate void page_lock_clr_exclwanted(page_t *); 7637c478bd9Sstevel@tonic-gate int page_trylock(page_t *, se_t); 7647c478bd9Sstevel@tonic-gate int page_try_reclaim_lock(page_t *, se_t, int); 7657c478bd9Sstevel@tonic-gate int page_tryupgrade(page_t *); 7667c478bd9Sstevel@tonic-gate void page_downgrade(page_t *); 7677c478bd9Sstevel@tonic-gate void page_unlock(page_t *); 7688b464eb8Smec void page_unlock_nocapture(page_t *); 7697c478bd9Sstevel@tonic-gate void page_lock_delete(page_t *); 7708b464eb8Smec int page_deleted(page_t *); 7717c478bd9Sstevel@tonic-gate int page_pp_lock(page_t *, int, int); 7727c478bd9Sstevel@tonic-gate void page_pp_unlock(page_t *, int, int); 7737c478bd9Sstevel@tonic-gate int page_resv(pgcnt_t, uint_t); 7747c478bd9Sstevel@tonic-gate void page_unresv(pgcnt_t); 7757c478bd9Sstevel@tonic-gate void page_pp_useclaim(page_t *, page_t *, uint_t); 7767c478bd9Sstevel@tonic-gate int page_addclaim(page_t *); 7777c478bd9Sstevel@tonic-gate int page_subclaim(page_t *); 7787c478bd9Sstevel@tonic-gate int page_addclaim_pages(page_t **); 7797c478bd9Sstevel@tonic-gate int page_subclaim_pages(page_t **); 7807c478bd9Sstevel@tonic-gate pfn_t page_pptonum(page_t *); 7817c478bd9Sstevel@tonic-gate page_t *page_numtopp(pfn_t, se_t); 7827c478bd9Sstevel@tonic-gate page_t *page_numtopp_noreclaim(pfn_t, se_t); 7837c478bd9Sstevel@tonic-gate page_t *page_numtopp_nolock(pfn_t); 7847c478bd9Sstevel@tonic-gate page_t *page_numtopp_nowait(pfn_t, se_t); 7857c478bd9Sstevel@tonic-gate page_t *page_first(); 7867c478bd9Sstevel@tonic-gate page_t *page_next(page_t *); 7877c478bd9Sstevel@tonic-gate page_t *page_list_next(page_t *); 7887c478bd9Sstevel@tonic-gate page_t *page_nextn(page_t *, ulong_t); 7897c478bd9Sstevel@tonic-gate page_t *page_next_scan_init(void **); 7907c478bd9Sstevel@tonic-gate page_t *page_next_scan_large(page_t *, ulong_t *, void **); 7917c478bd9Sstevel@tonic-gate void prefetch_page_r(void *); 7928b464eb8Smec int ppcopy(page_t *, page_t *); 7937c478bd9Sstevel@tonic-gate void page_relocate_hash(page_t *, page_t *); 7947c478bd9Sstevel@tonic-gate void pagezero(page_t *, uint_t, uint_t); 7957c478bd9Sstevel@tonic-gate void pagescrub(page_t *, uint_t, uint_t); 7967c478bd9Sstevel@tonic-gate void page_io_lock(page_t *); 7977c478bd9Sstevel@tonic-gate void page_io_unlock(page_t *); 7987c478bd9Sstevel@tonic-gate int page_io_trylock(page_t *); 7997c478bd9Sstevel@tonic-gate int page_iolock_assert(page_t *); 8007c478bd9Sstevel@tonic-gate void page_iolock_init(page_t *); 801a71e32b6Sstans void page_io_wait(page_t *); 802a71e32b6Sstans int page_io_locked(page_t *); 8037c478bd9Sstevel@tonic-gate pgcnt_t page_busy(int); 8047c478bd9Sstevel@tonic-gate void page_lock_init(void); 8057c478bd9Sstevel@tonic-gate ulong_t page_share_cnt(page_t *); 8067c478bd9Sstevel@tonic-gate int page_isshared(page_t *); 8077c478bd9Sstevel@tonic-gate int page_isfree(page_t *); 8087c478bd9Sstevel@tonic-gate int page_isref(page_t *); 8097c478bd9Sstevel@tonic-gate int page_ismod(page_t *); 8107c478bd9Sstevel@tonic-gate int page_release(page_t *, int); 811db874c57Selowe void page_retire_init(void); 812db874c57Selowe int page_retire(uint64_t, uchar_t); 813db874c57Selowe int page_retire_check(uint64_t, uint64_t *); 814db874c57Selowe int page_unretire(uint64_t); 815db874c57Selowe int page_unretire_pp(page_t *, int); 816db874c57Selowe void page_tryretire(page_t *); 8178b464eb8Smec void page_retire_mdboot(); 818cee1d74bSjfrank uint64_t page_retire_pend_count(void); 819704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States uint64_t page_retire_pend_kas_count(void); 820704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States void page_retire_incr_pend_count(void *); 821704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States void page_retire_decr_pend_count(void *); 822db874c57Selowe void page_clrtoxic(page_t *, uchar_t); 8237c478bd9Sstevel@tonic-gate void page_settoxic(page_t *, uchar_t); 824db874c57Selowe 8257c478bd9Sstevel@tonic-gate int page_mem_avail(pgcnt_t); 8263cff2f43Sstans int page_reclaim_mem(pgcnt_t, pgcnt_t, int); 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate void page_set_props(page_t *, uint_t); 8299d0d62adSJason Beloro void page_clr_all_props(page_t *); 830db874c57Selowe int page_clear_lck_cow(page_t *, int); 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate kmutex_t *page_vnode_mutex(struct vnode *); 8337c478bd9Sstevel@tonic-gate kmutex_t *page_se_mutex(struct page *); 8347c478bd9Sstevel@tonic-gate kmutex_t *page_szc_lock(struct page *); 8357c478bd9Sstevel@tonic-gate int page_szc_lock_assert(struct page *pp); 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate /* 8387c478bd9Sstevel@tonic-gate * Page relocation interfaces. page_relocate() is generic. 8397c478bd9Sstevel@tonic-gate * page_get_replacement_page() is provided by the PSM. 8407c478bd9Sstevel@tonic-gate * page_free_replacement_page() is generic. 8417c478bd9Sstevel@tonic-gate */ 8427c478bd9Sstevel@tonic-gate int group_page_trylock(page_t *, se_t); 8437c478bd9Sstevel@tonic-gate void group_page_unlock(page_t *); 8447c478bd9Sstevel@tonic-gate int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *); 8457c478bd9Sstevel@tonic-gate int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *); 8467c478bd9Sstevel@tonic-gate page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t); 8477c478bd9Sstevel@tonic-gate void page_free_replacement_page(page_t *); 8487c478bd9Sstevel@tonic-gate int page_relocate_cage(page_t **, page_t **); 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate int page_try_demote_pages(page_t *); 851db874c57Selowe int page_try_demote_free_pages(page_t *); 8527c478bd9Sstevel@tonic-gate void page_demote_free_pages(page_t *); 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate struct anon_map; 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *, 8577c478bd9Sstevel@tonic-gate ulong_t, vnode_t *, u_offset_t, int); 8587c478bd9Sstevel@tonic-gate void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t); 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate /* 8617c478bd9Sstevel@tonic-gate * Tell the PIM we are adding physical memory 8627c478bd9Sstevel@tonic-gate */ 8637c478bd9Sstevel@tonic-gate void add_physmem(page_t *, size_t, pfn_t); 8647c478bd9Sstevel@tonic-gate void add_physmem_cb(page_t *, pfn_t); /* callback for page_t part */ 8657c478bd9Sstevel@tonic-gate 8667c478bd9Sstevel@tonic-gate /* 8677c478bd9Sstevel@tonic-gate * hw_page_array[] is configured with hardware supported page sizes by 8687c478bd9Sstevel@tonic-gate * platform specific code. 8697c478bd9Sstevel@tonic-gate */ 8707c478bd9Sstevel@tonic-gate typedef struct { 8717c478bd9Sstevel@tonic-gate size_t hp_size; 8727c478bd9Sstevel@tonic-gate uint_t hp_shift; 8735d07b933Sdp78419 uint_t hp_colors; 8747c478bd9Sstevel@tonic-gate pgcnt_t hp_pgcnt; /* base pagesize cnt */ 8757c478bd9Sstevel@tonic-gate } hw_pagesize_t; 8767c478bd9Sstevel@tonic-gate 8777c478bd9Sstevel@tonic-gate extern hw_pagesize_t hw_page_array[]; 8787c478bd9Sstevel@tonic-gate extern uint_t page_coloring_shift; 8795d07b933Sdp78419 extern uint_t page_colors_mask; 8807c478bd9Sstevel@tonic-gate extern int cpu_page_colors; 8815d07b933Sdp78419 extern uint_t colorequiv; 8825d07b933Sdp78419 extern uchar_t colorequivszc[]; 8837c478bd9Sstevel@tonic-gate 8847c478bd9Sstevel@tonic-gate uint_t page_num_pagesizes(void); 88502bc52beSkchow uint_t page_num_user_pagesizes(int); 8867c478bd9Sstevel@tonic-gate size_t page_get_pagesize(uint_t); 8877c478bd9Sstevel@tonic-gate size_t page_get_user_pagesize(uint_t n); 8887c478bd9Sstevel@tonic-gate pgcnt_t page_get_pagecnt(uint_t); 8897c478bd9Sstevel@tonic-gate uint_t page_get_shift(uint_t); 8907c478bd9Sstevel@tonic-gate int page_szc(size_t); 8914abce959Smec int page_szc_user_filtered(size_t); 8927c478bd9Sstevel@tonic-gate 8937c478bd9Sstevel@tonic-gate /* page_get_replacement page flags */ 8947c478bd9Sstevel@tonic-gate #define PGR_SAMESZC 0x1 /* only look for page size same as orig */ 8957c478bd9Sstevel@tonic-gate #define PGR_NORELOC 0x2 /* allocate a P_NORELOC page */ 8967c478bd9Sstevel@tonic-gate 8975d07b933Sdp78419 /* 8985d07b933Sdp78419 * macros for "masked arithmetic" 8995d07b933Sdp78419 * The purpose is to step through all combinations of a set of bits while 9005d07b933Sdp78419 * keeping some other bits fixed. Fixed bits need not be contiguous. The 9015d07b933Sdp78419 * variable bits need not be contiguous either, or even right aligned. The 9025d07b933Sdp78419 * trick is to set all fixed bits to 1, then increment, then restore the 9035d07b933Sdp78419 * fixed bits. If incrementing causes a carry from a low bit position, the 9045d07b933Sdp78419 * carry propagates thru the fixed bits, because they are temporarily set to 1. 9055d07b933Sdp78419 * v is the value 9065d07b933Sdp78419 * i is the increment 9075d07b933Sdp78419 * eq_mask defines the fixed bits 9085d07b933Sdp78419 * mask limits the size of the result 9095d07b933Sdp78419 */ 9105d07b933Sdp78419 #define ADD_MASKED(v, i, eq_mask, mask) \ 9115d07b933Sdp78419 (((((v) | (eq_mask)) + (i)) & (mask) & ~(eq_mask)) | ((v) & (eq_mask))) 9125d07b933Sdp78419 9135d07b933Sdp78419 /* 9145d07b933Sdp78419 * convenience macro which increments by 1 9155d07b933Sdp78419 */ 9165d07b933Sdp78419 #define INC_MASKED(v, eq_mask, mask) ADD_MASKED(v, 1, eq_mask, mask) 9175d07b933Sdp78419 9187c478bd9Sstevel@tonic-gate #endif /* _KERNEL */ 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate /* 9217c478bd9Sstevel@tonic-gate * Constants used for the p_iolock_state 9227c478bd9Sstevel@tonic-gate */ 9237c478bd9Sstevel@tonic-gate #define PAGE_IO_INUSE 0x1 9247c478bd9Sstevel@tonic-gate #define PAGE_IO_WANTED 0x2 9257c478bd9Sstevel@tonic-gate 9267c478bd9Sstevel@tonic-gate /* 9277c478bd9Sstevel@tonic-gate * Constants used for page_release status 9287c478bd9Sstevel@tonic-gate */ 9297c478bd9Sstevel@tonic-gate #define PGREL_NOTREL 0x1 9307c478bd9Sstevel@tonic-gate #define PGREL_CLEAN 0x2 9317c478bd9Sstevel@tonic-gate #define PGREL_MOD 0x3 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate /* 9347c478bd9Sstevel@tonic-gate * The p_state field holds what used to be the p_age and p_free 9357c478bd9Sstevel@tonic-gate * bits. These fields are protected by p_selock (see above). 9367c478bd9Sstevel@tonic-gate */ 9377c478bd9Sstevel@tonic-gate #define P_FREE 0x80 /* Page on free list */ 9387c478bd9Sstevel@tonic-gate #define P_NORELOC 0x40 /* Page is non-relocatable */ 9397c478bd9Sstevel@tonic-gate #define P_MIGRATE 0x20 /* Migrate page on next touch */ 9407c478bd9Sstevel@tonic-gate #define P_SWAP 0x10 /* belongs to vnode that is V_ISSWAP */ 941ae115bc7Smrj #define P_BOOTPAGES 0x08 /* member of bootpages list */ 94211494be0SStan Studzinski #define P_RAF 0x04 /* page retired at free */ 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate #define PP_ISFREE(pp) ((pp)->p_state & P_FREE) 9457c478bd9Sstevel@tonic-gate #define PP_ISAGED(pp) (((pp)->p_state & P_FREE) && \ 9467c478bd9Sstevel@tonic-gate ((pp)->p_vnode == NULL)) 9477c478bd9Sstevel@tonic-gate #define PP_ISNORELOC(pp) ((pp)->p_state & P_NORELOC) 948af4c679fSSean McEnroe #define PP_ISKAS(pp) (VN_ISKAS((pp)->p_vnode)) 949ad23a2dbSjohansen #define PP_ISNORELOCKERNEL(pp) (PP_ISNORELOC(pp) && PP_ISKAS(pp)) 9507c478bd9Sstevel@tonic-gate #define PP_ISMIGRATE(pp) ((pp)->p_state & P_MIGRATE) 9517c478bd9Sstevel@tonic-gate #define PP_ISSWAP(pp) ((pp)->p_state & P_SWAP) 952ae115bc7Smrj #define PP_ISBOOTPAGES(pp) ((pp)->p_state & P_BOOTPAGES) 95311494be0SStan Studzinski #define PP_ISRAF(pp) ((pp)->p_state & P_RAF) 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate #define PP_SETFREE(pp) ((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \ 9567c478bd9Sstevel@tonic-gate | P_FREE) 9577c478bd9Sstevel@tonic-gate #define PP_SETAGED(pp) ASSERT(PP_ISAGED(pp)) 9587c478bd9Sstevel@tonic-gate #define PP_SETNORELOC(pp) ((pp)->p_state |= P_NORELOC) 9597c478bd9Sstevel@tonic-gate #define PP_SETMIGRATE(pp) ((pp)->p_state |= P_MIGRATE) 9607c478bd9Sstevel@tonic-gate #define PP_SETSWAP(pp) ((pp)->p_state |= P_SWAP) 961ae115bc7Smrj #define PP_SETBOOTPAGES(pp) ((pp)->p_state |= P_BOOTPAGES) 96211494be0SStan Studzinski #define PP_SETRAF(pp) ((pp)->p_state |= P_RAF) 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate #define PP_CLRFREE(pp) ((pp)->p_state &= ~P_FREE) 9657c478bd9Sstevel@tonic-gate #define PP_CLRAGED(pp) ASSERT(!PP_ISAGED(pp)) 9667c478bd9Sstevel@tonic-gate #define PP_CLRNORELOC(pp) ((pp)->p_state &= ~P_NORELOC) 9677c478bd9Sstevel@tonic-gate #define PP_CLRMIGRATE(pp) ((pp)->p_state &= ~P_MIGRATE) 9687c478bd9Sstevel@tonic-gate #define PP_CLRSWAP(pp) ((pp)->p_state &= ~P_SWAP) 969ae115bc7Smrj #define PP_CLRBOOTPAGES(pp) ((pp)->p_state &= ~P_BOOTPAGES) 97011494be0SStan Studzinski #define PP_CLRRAF(pp) ((pp)->p_state &= ~P_RAF) 9717c478bd9Sstevel@tonic-gate 972db874c57Selowe /* 973db874c57Selowe * Flags for page_t p_toxic, for tracking memory hardware errors. 974db874c57Selowe * 975db874c57Selowe * These flags are OR'ed into p_toxic with page_settoxic() to track which 976db874c57Selowe * error(s) have occurred on a given page. The flags are cleared with 977db874c57Selowe * page_clrtoxic(). Both page_settoxic() and page_cleartoxic use atomic 978db874c57Selowe * primitives to manipulate the p_toxic field so no other locking is needed. 979db874c57Selowe * 980db874c57Selowe * When an error occurs on a page, p_toxic is set to record the error. The 981db874c57Selowe * error could be a memory error or something else (i.e. a datapath). The Page 982db874c57Selowe * Retire mechanism does not try to determine the exact cause of the error; 983db874c57Selowe * Page Retire rightly leaves that sort of determination to FMA's Diagnostic 984db874c57Selowe * Engine (DE). 985db874c57Selowe * 986db874c57Selowe * Note that, while p_toxic bits can be set without holding any locks, they 987db874c57Selowe * should only be cleared while holding the page exclusively locked. 9888b464eb8Smec * There is one exception to this, the PR_CAPTURE bit is protected by a mutex 9898b464eb8Smec * within the page capture logic and thus to set or clear the bit, that mutex 9908b464eb8Smec * needs to be held. The page does not need to be locked but the page_clrtoxic 9918b464eb8Smec * function must be used as we need an atomic operation. 9928b464eb8Smec * Also note that there is what amounts to a hack to prevent recursion with 9938b464eb8Smec * large pages such that if we are unlocking a page and the PR_CAPTURE bit is 9948b464eb8Smec * set, we will only try to capture the page if the current threads T_CAPTURING 9958b464eb8Smec * flag is not set. If the flag is set, the unlock will not try to capture 9968b464eb8Smec * the page even though the PR_CAPTURE bit is set. 997db874c57Selowe * 998db874c57Selowe * Pages with PR_UE or PR_FMA flags are retired unconditionally, while pages 999db874c57Selowe * with PR_MCE are retired if the system has not retired too many of them. 1000db874c57Selowe * 1001db874c57Selowe * A page must be exclusively locked to be retired. Pages can be retired if 1002db874c57Selowe * they are mapped, modified, or both, as long as they are not marked PR_UE, 1003db874c57Selowe * since pages with uncorrectable errors cannot be relocated in memory. 1004db874c57Selowe * Once a page has been successfully retired it is zeroed, attached to the 1005db874c57Selowe * retired_pages vnode and, finally, PR_RETIRED is set in p_toxic. The other 1006db874c57Selowe * p_toxic bits are NOT cleared. Pages are not left locked after retiring them 1007db874c57Selowe * to avoid special case code throughout the kernel; rather, page_*lock() will 1008db874c57Selowe * fail to lock the page, unless SE_RETIRED is passed as an argument. 1009db874c57Selowe * 1010db874c57Selowe * While we have your attention, go take a look at the comments at the 1011db874c57Selowe * beginning of page_retire.c too. 1012db874c57Selowe */ 1013db874c57Selowe #define PR_OK 0x00 /* no problem */ 1014db874c57Selowe #define PR_MCE 0x01 /* page has seen two or more CEs */ 1015db874c57Selowe #define PR_UE 0x02 /* page has an unhandled UE */ 1016db874c57Selowe #define PR_UE_SCRUBBED 0x04 /* page has seen a UE but was cleaned */ 1017db874c57Selowe #define PR_FMA 0x08 /* A DE wants this page retired */ 101823a80de1SStan Studzinski #define PR_CAPTURE 0x10 /* page is hashed on page_capture_hash[] */ 10198b464eb8Smec #define PR_RESV 0x20 /* Reserved for future use */ 1020db874c57Selowe #define PR_MSG 0x40 /* message(s) already printed for this page */ 1021db874c57Selowe #define PR_RETIRED 0x80 /* This page has been retired */ 10227c478bd9Sstevel@tonic-gate 1023db874c57Selowe #define PR_REASONS (PR_UE | PR_MCE | PR_FMA) 1024db874c57Selowe #define PR_TOXIC (PR_UE) 1025db874c57Selowe #define PR_ERRMASK (PR_UE | PR_UE_SCRUBBED | PR_MCE | PR_FMA) 10268b464eb8Smec #define PR_TOXICFLAGS (0xCF) 1027db874c57Selowe 1028db874c57Selowe #define PP_RETIRED(pp) ((pp)->p_toxic & PR_RETIRED) 1029db874c57Selowe #define PP_TOXIC(pp) ((pp)->p_toxic & PR_TOXIC) 1030db874c57Selowe #define PP_PR_REQ(pp) (((pp)->p_toxic & PR_REASONS) && !PP_RETIRED(pp)) 103124e9c58bSelowe #define PP_PR_NOSHARE(pp) \ 103224e9c58bSelowe ((((pp)->p_toxic & (PR_RETIRED | PR_FMA | PR_UE)) == PR_FMA) && \ 1033ad23a2dbSjohansen !PP_ISKAS(pp)) 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate /* 10368b464eb8Smec * Flags for page_unretire_pp 10378b464eb8Smec */ 10388b464eb8Smec #define PR_UNR_FREE 0x1 10398b464eb8Smec #define PR_UNR_CLEAN 0x2 10408b464eb8Smec #define PR_UNR_TEMP 0x4 10418b464eb8Smec 10428b464eb8Smec /* 10437c478bd9Sstevel@tonic-gate * kpm large page description. 10447c478bd9Sstevel@tonic-gate * The virtual address range of segkpm is divided into chunks of 10457c478bd9Sstevel@tonic-gate * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort 10467c478bd9Sstevel@tonic-gate * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz 10477c478bd9Sstevel@tonic-gate * for 8K is 256M and 2G for 64K pages. It it kept as small as 10487c478bd9Sstevel@tonic-gate * possible to save physical memory space. 10497c478bd9Sstevel@tonic-gate * 10507c478bd9Sstevel@tonic-gate * There are 2 segkpm mapping windows within in the virtual address 10517c478bd9Sstevel@tonic-gate * space when we have to prevent VAC alias conflicts. The so called 10527c478bd9Sstevel@tonic-gate * Alias window (mappings are always by PAGESIZE) is controlled by 10537c478bd9Sstevel@tonic-gate * kp_refcnta. The regular window is controlled by kp_refcnt for the 10547c478bd9Sstevel@tonic-gate * normal operation, which is to use the largest available pagesize. 10557c478bd9Sstevel@tonic-gate * When VAC alias conflicts are present within a chunk in the regular 10567c478bd9Sstevel@tonic-gate * window the large page mapping is broken up into smaller PAGESIZE 10577c478bd9Sstevel@tonic-gate * mappings. kp_refcntc is used to control the pages that are invoked 10587c478bd9Sstevel@tonic-gate * in the conflict and kp_refcnts holds the active mappings done 10597c478bd9Sstevel@tonic-gate * with the small page size. In non vac conflict mode kp_refcntc is 10607c478bd9Sstevel@tonic-gate * also used as "go" indication (-1) for the trap level tsbmiss 10617c478bd9Sstevel@tonic-gate * handler. 10627c478bd9Sstevel@tonic-gate */ 10637c478bd9Sstevel@tonic-gate typedef struct kpm_page { 10647c478bd9Sstevel@tonic-gate short kp_refcnt; /* pages mapped large */ 10657c478bd9Sstevel@tonic-gate short kp_refcnta; /* pages mapped in Alias window */ 10667c478bd9Sstevel@tonic-gate short kp_refcntc; /* TL-tsbmiss flag; #vac alias conflict pages */ 10677c478bd9Sstevel@tonic-gate short kp_refcnts; /* vac alias: pages mapped small */ 10687c478bd9Sstevel@tonic-gate } kpm_page_t; 10697c478bd9Sstevel@tonic-gate 10707c478bd9Sstevel@tonic-gate /* 10717c478bd9Sstevel@tonic-gate * Note: khl_lock offset changes must be reflected in sfmmu_asm.s 10727c478bd9Sstevel@tonic-gate */ 10737c478bd9Sstevel@tonic-gate typedef struct kpm_hlk { 10747c478bd9Sstevel@tonic-gate kmutex_t khl_mutex; /* kpm_page mutex */ 10757c478bd9Sstevel@tonic-gate uint_t khl_lock; /* trap level tsbmiss handling */ 10767c478bd9Sstevel@tonic-gate } kpm_hlk_t; 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate /* 10797c478bd9Sstevel@tonic-gate * kpm small page description. 10807c478bd9Sstevel@tonic-gate * When kpm_pgsz is equal to PAGESIZE a smaller representation is used 10817c478bd9Sstevel@tonic-gate * to save memory space. Alias range mappings and regular segkpm 10827c478bd9Sstevel@tonic-gate * mappings are done in units of PAGESIZE and can share the mapping 10837c478bd9Sstevel@tonic-gate * information and the mappings are always distinguishable by their 1084444ce08eSDonghai Qiao * virtual address. Other information needed for VAC conflict prevention 1085444ce08eSDonghai Qiao * is already available on a per page basis. 1086444ce08eSDonghai Qiao * 1087444ce08eSDonghai Qiao * The state about how a kpm page is mapped and whether it is ready to go 1088444ce08eSDonghai Qiao * is indicated by the following 1 byte kpm_spage structure. This byte is 1089444ce08eSDonghai Qiao * split into two 4-bit parts - kp_mapped and kp_mapped_go. 1090444ce08eSDonghai Qiao * - kp_mapped == 1 the page is mapped cacheable 1091444ce08eSDonghai Qiao * - kp_mapped == 2 the page is mapped non-cacheable 1092444ce08eSDonghai Qiao * - kp_mapped_go == 1 the mapping is ready to be dropped in 1093444ce08eSDonghai Qiao * - kp_mapped_go == 0 the mapping is not ready to be dropped in. 1094444ce08eSDonghai Qiao * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict. 1095444ce08eSDonghai Qiao * Otherwise, the assembly tsb miss handler can simply drop in the mapping 1096444ce08eSDonghai Qiao * when a tsb miss occurs. 10977c478bd9Sstevel@tonic-gate */ 1098444ce08eSDonghai Qiao typedef union kpm_spage { 1099444ce08eSDonghai Qiao struct { 1100444ce08eSDonghai Qiao #ifdef _BIG_ENDIAN 1101444ce08eSDonghai Qiao uchar_t mapped_go: 4; /* go or nogo flag */ 1102444ce08eSDonghai Qiao uchar_t mapped: 4; /* page mapped small */ 1103444ce08eSDonghai Qiao #else 1104444ce08eSDonghai Qiao uchar_t mapped: 4; /* page mapped small */ 1105444ce08eSDonghai Qiao uchar_t mapped_go: 4; /* go or nogo flag */ 1106444ce08eSDonghai Qiao #endif 1107444ce08eSDonghai Qiao } kpm_spage_un; 1108444ce08eSDonghai Qiao uchar_t kp_mapped_flag; 11097c478bd9Sstevel@tonic-gate } kpm_spage_t; 11107c478bd9Sstevel@tonic-gate 1111444ce08eSDonghai Qiao #define kp_mapped kpm_spage_un.mapped 1112444ce08eSDonghai Qiao #define kp_mapped_go kpm_spage_un.mapped_go 1113444ce08eSDonghai Qiao 11147c478bd9Sstevel@tonic-gate /* 11157c478bd9Sstevel@tonic-gate * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s 11167c478bd9Sstevel@tonic-gate */ 11177c478bd9Sstevel@tonic-gate typedef struct kpm_shlk { 11187c478bd9Sstevel@tonic-gate uint_t kshl_lock; /* trap level tsbmiss handling */ 11197c478bd9Sstevel@tonic-gate } kpm_shlk_t; 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate /* 11227c478bd9Sstevel@tonic-gate * Each segment of physical memory is described by a memseg struct. 11237c478bd9Sstevel@tonic-gate * Within a segment, memory is considered contiguous. The members 11247c478bd9Sstevel@tonic-gate * can be categorized as follows: 11257c478bd9Sstevel@tonic-gate * . Platform independent: 11267c478bd9Sstevel@tonic-gate * pages, epages, pages_base, pages_end, next, lnext. 11277c478bd9Sstevel@tonic-gate * . 64bit only but platform independent: 11287c478bd9Sstevel@tonic-gate * kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages. 11297c478bd9Sstevel@tonic-gate * . Really platform or mmu specific: 11307c478bd9Sstevel@tonic-gate * pagespa, epagespa, nextpa, kpm_pagespa. 11317c478bd9Sstevel@tonic-gate * . Mixed: 11327c478bd9Sstevel@tonic-gate * msegflags. 11337c478bd9Sstevel@tonic-gate */ 11347c478bd9Sstevel@tonic-gate struct memseg { 11357c478bd9Sstevel@tonic-gate page_t *pages, *epages; /* [from, to] in page array */ 11367c478bd9Sstevel@tonic-gate pfn_t pages_base, pages_end; /* [from, to] in page numbers */ 11377c478bd9Sstevel@tonic-gate struct memseg *next; /* next segment in list */ 11387c478bd9Sstevel@tonic-gate struct memseg *lnext; /* next segment in deleted list */ 1139a3114836SGerry Liu #if defined(__sparc) 11407c478bd9Sstevel@tonic-gate uint64_t pagespa, epagespa; /* [from, to] page array physical */ 11417c478bd9Sstevel@tonic-gate uint64_t nextpa; /* physical next pointer */ 11427c478bd9Sstevel@tonic-gate pfn_t kpm_pbase; /* start of kpm range */ 11437c478bd9Sstevel@tonic-gate pgcnt_t kpm_nkpmpgs; /* # of kpm_pgsz pages */ 11447c478bd9Sstevel@tonic-gate union _mseg_un { 11457c478bd9Sstevel@tonic-gate kpm_page_t *kpm_lpgs; /* ptr to kpm_page array */ 11467c478bd9Sstevel@tonic-gate kpm_spage_t *kpm_spgs; /* ptr to kpm_spage array */ 11477c478bd9Sstevel@tonic-gate } mseg_un; 11487c478bd9Sstevel@tonic-gate uint64_t kpm_pagespa; /* physical ptr to kpm (s)pages array */ 11497c478bd9Sstevel@tonic-gate #endif /* __sparc */ 1150a3114836SGerry Liu uint_t msegflags; /* memseg flags */ 11517c478bd9Sstevel@tonic-gate }; 11527c478bd9Sstevel@tonic-gate 11537c478bd9Sstevel@tonic-gate /* memseg union aliases */ 11547c478bd9Sstevel@tonic-gate #define kpm_pages mseg_un.kpm_lpgs 11557c478bd9Sstevel@tonic-gate #define kpm_spages mseg_un.kpm_spgs 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate /* msegflags */ 11587c478bd9Sstevel@tonic-gate #define MEMSEG_DYNAMIC 0x1 /* DR: memory was added dynamically */ 11599853d9e8SJason Beloro #define MEMSEG_META_INCL 0x2 /* DR: memseg includes it's metadata */ 11609853d9e8SJason Beloro #define MEMSEG_META_ALLOC 0x4 /* DR: memseg allocated it's metadata */ 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate /* memseg support macros */ 11637c478bd9Sstevel@tonic-gate #define MSEG_NPAGES(SEG) ((SEG)->pages_end - (SEG)->pages_base) 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate /* memseg hash */ 11667c478bd9Sstevel@tonic-gate #define MEM_HASH_SHIFT 0x9 11677c478bd9Sstevel@tonic-gate #define N_MEM_SLOTS 0x200 /* must be a power of 2 */ 11687c478bd9Sstevel@tonic-gate #define MEMSEG_PFN_HASH(pfn) (((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1)) 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate /* memseg externals */ 11717c478bd9Sstevel@tonic-gate extern struct memseg *memsegs; /* list of memory segments */ 11727c478bd9Sstevel@tonic-gate extern ulong_t mhash_per_slot; 11737c478bd9Sstevel@tonic-gate extern uint64_t memsegspa; /* memsegs as physical address */ 11747c478bd9Sstevel@tonic-gate 11757c478bd9Sstevel@tonic-gate void build_pfn_hash(); 11767c478bd9Sstevel@tonic-gate extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum); 11777c478bd9Sstevel@tonic-gate 11788b464eb8Smec /* 11798b464eb8Smec * page capture related info: 11808b464eb8Smec * The page capture routines allow us to asynchronously capture given pages 11818b464eb8Smec * for the explicit use of the requestor. New requestors can be added by 11828b464eb8Smec * explicitly adding themselves to the PC_* flags below and incrementing 11838b464eb8Smec * PC_NUM_CALLBACKS as necessary. 11848b464eb8Smec * 11858b464eb8Smec * Subsystems using page capture must register a callback before attempting 11868b464eb8Smec * to capture a page. A duration of -1 will indicate that we will never give 11878b464eb8Smec * up while trying to capture a page and will only stop trying to capture the 11888b464eb8Smec * given page once we have successfully captured it. Thus the user needs to be 11898b464eb8Smec * aware of the behavior of all callers who have a duration of -1. 11908b464eb8Smec * 11918b464eb8Smec * For now, only /dev/physmem and page retire use the page capture interface 11928b464eb8Smec * and only a single request can be outstanding for a given page. Thus, if 11938b464eb8Smec * /dev/phsymem wants a page and page retire also wants the same page, only 11948b464eb8Smec * the page retire request will be honored until the point in time that the 11958b464eb8Smec * page is actually retired, at which point in time, subsequent requests by 11968b464eb8Smec * /dev/physmem will succeed if the CAPTURE_GET_RETIRED flag was set. 11978b464eb8Smec */ 11988b464eb8Smec 11998b464eb8Smec #define PC_RETIRE (0) 12008b464eb8Smec #define PC_PHYSMEM (1) 12018b464eb8Smec #define PC_NUM_CALLBACKS (2) 12028b464eb8Smec #define PC_MASK ((1 << PC_NUM_CALLBACKS) - 1) 12038b464eb8Smec 12048b464eb8Smec #define CAPTURE_RETIRE (1 << PC_RETIRE) 12058b464eb8Smec #define CAPTURE_PHYSMEM (1 << PC_PHYSMEM) 12068b464eb8Smec 12078b464eb8Smec #define CAPTURE_ASYNC (0x0200) 12088b464eb8Smec 12098b464eb8Smec #define CAPTURE_GET_RETIRED (0x1000) 12108b464eb8Smec #define CAPTURE_GET_CAGE (0x2000) 12118b464eb8Smec 12128b464eb8Smec struct page_capture_callback { 12138b464eb8Smec int cb_active; /* 1 means active, 0 means inactive */ 12148b464eb8Smec clock_t duration; /* the length in time that we'll attempt to */ 12158b464eb8Smec /* capture this page asynchronously. (in HZ) */ 12168b464eb8Smec krwlock_t cb_rwlock; 12178b464eb8Smec int (*cb_func)(page_t *, void *, uint_t); /* callback function */ 12188b464eb8Smec }; 12198b464eb8Smec 12208b464eb8Smec extern kcondvar_t pc_cv; 12218b464eb8Smec 12228b464eb8Smec void page_capture_register_callback(uint_t index, clock_t duration, 12238b464eb8Smec int (*cb_func)(page_t *, void *, uint_t)); 12248b464eb8Smec void page_capture_unregister_callback(uint_t index); 12258b464eb8Smec int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 12268b464eb8Smec void page_unlock_capture(page_t *pp); 12278b464eb8Smec int page_capture_unretire_pp(page_t *); 12288b464eb8Smec 1229af4c679fSSean McEnroe extern int memsegs_trylock(int); 1230ae115bc7Smrj extern void memsegs_lock(int); 1231ae115bc7Smrj extern void memsegs_unlock(int); 1232ae115bc7Smrj extern int memsegs_lock_held(void); 1233ae115bc7Smrj extern void memlist_read_lock(void); 1234ae115bc7Smrj extern void memlist_read_unlock(void); 1235ae115bc7Smrj extern void memlist_write_lock(void); 1236ae115bc7Smrj extern void memlist_write_unlock(void); 1237ae115bc7Smrj 12387c478bd9Sstevel@tonic-gate #ifdef __cplusplus 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate #endif 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gate #endif /* _VM_PAGE_H */ 1243