1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * University Copyright- Copyright (c) 1982, 1986, 1988 30 * The Regents of the University of California 31 * All Rights Reserved 32 * 33 * University Acknowledgment- Portions of this document are derived from 34 * software developed by the University of California, Berkeley, and its 35 * contributors. 36 */ 37 38 #ifndef _VM_PAGE_H 39 #define _VM_PAGE_H 40 41 #include <vm/seg.h> 42 43 #ifdef __cplusplus 44 extern "C" { 45 #endif 46 47 #if defined(_KERNEL) || defined(_KMEMUSER) 48 49 /* 50 * Shared/Exclusive lock. 51 */ 52 53 /* 54 * Types of page locking supported by page_lock & friends. 55 */ 56 typedef enum { 57 SE_SHARED, 58 SE_EXCL /* exclusive lock (value == -1) */ 59 } se_t; 60 61 /* 62 * For requesting that page_lock reclaim the page from the free list. 63 */ 64 typedef enum { 65 P_RECLAIM, /* reclaim page from free list */ 66 P_NO_RECLAIM /* DON`T reclaim the page */ 67 } reclaim_t; 68 69 /* 70 * Callers of page_try_reclaim_lock and page_lock_es can use this flag 71 * to get SE_EXCL access before reader/writers are given access. 72 */ 73 #define SE_EXCL_WANTED 0x02 74 75 /* 76 * All page_*lock() requests will be denied unless this flag is set in 77 * the 'es' parameter. 78 */ 79 #define SE_RETIRED 0x04 80 81 #endif /* _KERNEL | _KMEMUSER */ 82 83 typedef int selock_t; 84 85 /* 86 * Define VM_STATS to turn on all sorts of statistic gathering about 87 * the VM layer. By default, it is only turned on when DEBUG is 88 * also defined. 89 */ 90 #ifdef DEBUG 91 #define VM_STATS 92 #endif /* DEBUG */ 93 94 #ifdef VM_STATS 95 #define VM_STAT_ADD(stat) (stat)++ 96 #define VM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++)) 97 #else 98 #define VM_STAT_ADD(stat) 99 #define VM_STAT_COND_ADD(cond, stat) 100 #endif /* VM_STATS */ 101 102 #ifdef _KERNEL 103 104 /* 105 * Macros to acquire and release the page logical lock. 106 */ 107 #define page_struct_lock(pp) mutex_enter(&page_llock) 108 #define page_struct_unlock(pp) mutex_exit(&page_llock) 109 110 #endif /* _KERNEL */ 111 112 #include <sys/t_lock.h> 113 114 struct as; 115 116 /* 117 * Each physical page has a page structure, which is used to maintain 118 * these pages as a cache. A page can be found via a hashed lookup 119 * based on the [vp, offset]. If a page has an [vp, offset] identity, 120 * then it is entered on a doubly linked circular list off the 121 * vnode using the vpnext/vpprev pointers. If the p_free bit 122 * is on, then the page is also on a doubly linked circular free 123 * list using next/prev pointers. If the "p_selock" and "p_iolock" 124 * are held, then the page is currently being read in (exclusive p_selock) 125 * or written back (shared p_selock). In this case, the next/prev pointers 126 * are used to link the pages together for a consecutive i/o request. If 127 * the page is being brought in from its backing store, then other processes 128 * will wait for the i/o to complete before attaching to the page since it 129 * will have an "exclusive" lock. 130 * 131 * Each page structure has the locks described below along with 132 * the fields they protect: 133 * 134 * p_selock This is a per-page shared/exclusive lock that is 135 * used to implement the logical shared/exclusive 136 * lock for each page. The "shared" lock is normally 137 * used in most cases while the "exclusive" lock is 138 * required to destroy or retain exclusive access to 139 * a page (e.g., while reading in pages). The appropriate 140 * lock is always held whenever there is any reference 141 * to a page structure (e.g., during i/o). 142 * (Note that with the addition of the "writer-lock-wanted" 143 * semantics (via SE_EWANTED), threads must not acquire 144 * multiple reader locks or else a deadly embrace will 145 * occur in the following situation: thread 1 obtains a 146 * reader lock; next thread 2 fails to get a writer lock 147 * but specified SE_EWANTED so it will wait by either 148 * blocking (when using page_lock_es) or spinning while 149 * retrying (when using page_try_reclaim_lock) until the 150 * reader lock is released; then thread 1 attempts to 151 * get another reader lock but is denied due to 152 * SE_EWANTED being set, and now both threads are in a 153 * deadly embrace.) 154 * 155 * p_hash 156 * p_vnode 157 * p_offset 158 * 159 * p_free 160 * p_age 161 * 162 * p_iolock This is a binary semaphore lock that provides 163 * exclusive access to the i/o list links in each 164 * page structure. It is always held while the page 165 * is on an i/o list (i.e., involved in i/o). That is, 166 * even though a page may be only `shared' locked 167 * while it is doing a write, the following fields may 168 * change anyway. Normally, the page must be 169 * `exclusively' locked to change anything in it. 170 * 171 * p_next 172 * p_prev 173 * 174 * The following fields are protected by the global page_llock: 175 * 176 * p_lckcnt 177 * p_cowcnt 178 * 179 * The following lists are protected by the global page_freelock: 180 * 181 * page_cachelist 182 * page_freelist 183 * 184 * The following, for our purposes, are protected by 185 * the global freemem_lock: 186 * 187 * freemem 188 * freemem_wait 189 * freemem_cv 190 * 191 * The following fields are protected by hat layer lock(s). When a page 192 * structure is not mapped and is not associated with a vnode (after a call 193 * to page_hashout() for example) the p_nrm field may be modified with out 194 * holding the hat layer lock: 195 * 196 * p_nrm 197 * p_mapping 198 * p_share 199 * 200 * The following field is file system dependent. How it is used and 201 * the locking strategies applied are up to the individual file system 202 * implementation. 203 * 204 * p_fsdata 205 * 206 * The page structure is used to represent and control the system's 207 * physical pages. There is one instance of the structure for each 208 * page that is not permenately allocated. For example, the pages that 209 * hold the page structures are permanently held by the kernel 210 * and hence do not need page structures to track them. The array 211 * of page structures is allocated early on in the kernel's life and 212 * is based on the amount of available physical memory. 213 * 214 * Each page structure may simultaneously appear on several linked lists. 215 * The lists are: hash list, free or in i/o list, and a vnode's page list. 216 * Each type of list is protected by a different group of mutexes as described 217 * below: 218 * 219 * The hash list is used to quickly find a page when the page's vnode and 220 * offset within the vnode are known. Each page that is hashed is 221 * connected via the `p_hash' field. The anchor for each hash is in the 222 * array `page_hash'. An array of mutexes, `ph_mutex', protects the 223 * lists anchored by page_hash[]. To either search or modify a given hash 224 * list, the appropriate mutex in the ph_mutex array must be held. 225 * 226 * The free list contains pages that are `free to be given away'. For 227 * efficiency reasons, pages on this list are placed in two catagories: 228 * pages that are still associated with a vnode, and pages that are not 229 * associated with a vnode. Free pages always have their `p_free' bit set, 230 * free pages that are still associated with a vnode also have their 231 * `p_age' bit set. Pages on the free list are connected via their 232 * `p_next' and `p_prev' fields. When a page is involved in some sort 233 * of i/o, it is not free and these fields may be used to link associated 234 * pages together. At the moment, the free list is protected by a 235 * single mutex `page_freelock'. The list of free pages still associated 236 * with a vnode is anchored by `page_cachelist' while other free pages 237 * are anchored in architecture dependent ways (to handle page coloring etc.). 238 * 239 * Pages associated with a given vnode appear on a list anchored in the 240 * vnode by the `v_pages' field. They are linked together with 241 * `p_vpnext' and `p_vpprev'. The field `p_offset' contains a page's 242 * offset within the vnode. The pages on this list are not kept in 243 * offset order. These lists, in a manner similar to the hash lists, 244 * are protected by an array of mutexes called `vph_hash'. Before 245 * searching or modifying this chain the appropriate mutex in the 246 * vph_hash[] array must be held. 247 * 248 * Again, each of the lists that a page can appear on is protected by a 249 * mutex. Before reading or writing any of the fields comprising the 250 * list, the appropriate lock must be held. These list locks should only 251 * be held for very short intervals. 252 * 253 * In addition to the list locks, each page structure contains a 254 * shared/exclusive lock that protects various fields within it. 255 * To modify one of these fields, the `p_selock' must be exclusively held. 256 * To read a field with a degree of certainty, the lock must be at least 257 * held shared. 258 * 259 * Removing a page structure from one of the lists requires holding 260 * the appropriate list lock and the page's p_selock. A page may be 261 * prevented from changing identity, being freed, or otherwise modified 262 * by acquiring p_selock shared. 263 * 264 * To avoid deadlocks, a strict locking protocol must be followed. Basically 265 * there are two cases: In the first case, the page structure in question 266 * is known ahead of time (e.g., when the page is to be added or removed 267 * from a list). In the second case, the page structure is not known and 268 * must be found by searching one of the lists. 269 * 270 * When adding or removing a known page to one of the lists, first the 271 * page must be exclusively locked (since at least one of its fields 272 * will be modified), second the lock protecting the list must be acquired, 273 * third the page inserted or deleted, and finally the list lock dropped. 274 * 275 * The more interesting case occures when the particular page structure 276 * is not known ahead of time. For example, when a call is made to 277 * page_lookup(), it is not known if a page with the desired (vnode and 278 * offset pair) identity exists. So the appropriate mutex in ph_mutex is 279 * acquired, the hash list searched, and if the desired page is found 280 * an attempt is made to lock it. The attempt to acquire p_selock must 281 * not block while the hash list lock is held. A deadlock could occure 282 * if some other process was trying to remove the page from the list. 283 * The removing process (following the above protocol) would have exclusively 284 * locked the page, and be spinning waiting to acquire the lock protecting 285 * the hash list. Since the searching process holds the hash list lock 286 * and is waiting to acquire the page lock, a deadlock occurs. 287 * 288 * The proper scheme to follow is: first, lock the appropriate list, 289 * search the list, and if the desired page is found either use 290 * page_trylock() (which will not block) or pass the address of the 291 * list lock to page_lock(). If page_lock() can not acquire the page's 292 * lock, it will drop the list lock before going to sleep. page_lock() 293 * returns a value to indicate if the list lock was dropped allowing the 294 * calling program to react appropriately (i.e., retry the operation). 295 * 296 * If the list lock was dropped before the attempt at locking the page 297 * was made, checks would have to be made to ensure that the page had 298 * not changed identity before its lock was obtained. This is because 299 * the interval between dropping the list lock and acquiring the page 300 * lock is indeterminate. 301 * 302 * In addition, when both a hash list lock (ph_mutex[]) and a vnode list 303 * lock (vph_mutex[]) are needed, the hash list lock must be acquired first. 304 * The routine page_hashin() is a good example of this sequence. 305 * This sequence is ASSERTed by checking that the vph_mutex[] is not held 306 * just before each acquisition of one of the mutexs in ph_mutex[]. 307 * 308 * So, as a quick summary: 309 * 310 * pse_mutex[]'s protect the p_selock and p_cv fields. 311 * 312 * p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash, 313 * 314 * ph_mutex[]'s protect the page_hash[] array and its chains. 315 * 316 * vph_mutex[]'s protect the v_pages field and the vp page chains. 317 * 318 * First lock the page, then the hash chain, then the vnode chain. When 319 * this is not possible `trylocks' must be used. Sleeping while holding 320 * any of these mutexes (p_selock is not a mutex) is not allowed. 321 * 322 * 323 * field reading writing ordering 324 * ====================================================================== 325 * p_vnode p_selock(E,S) p_selock(E) 326 * p_offset 327 * p_free 328 * p_age 329 * ===================================================================== 330 * p_hash p_selock(E,S) p_selock(E) && p_selock, ph_mutex 331 * ph_mutex[] 332 * ===================================================================== 333 * p_vpnext p_selock(E,S) p_selock(E) && p_selock, vph_mutex 334 * p_vpprev vph_mutex[] 335 * ===================================================================== 336 * When the p_free bit is set: 337 * 338 * p_next p_selock(E,S) p_selock(E) && p_selock, 339 * p_prev page_freelock page_freelock 340 * 341 * When the p_free bit is not set: 342 * 343 * p_next p_selock(E,S) p_selock(E) && p_selock, p_iolock 344 * p_prev p_iolock 345 * ===================================================================== 346 * p_selock pse_mutex[] pse_mutex[] can`t acquire any 347 * p_cv other mutexes or 348 * sleep while holding 349 * this lock. 350 * ===================================================================== 351 * p_lckcnt p_selock(E,S) p_selock(E) && 352 * p_cowcnt page_llock 353 * ===================================================================== 354 * p_nrm hat layer lock hat layer lock 355 * p_mapping 356 * p_pagenum 357 * ===================================================================== 358 * 359 * where: 360 * E----> exclusive version of p_selock. 361 * S----> shared version of p_selock. 362 * 363 * 364 * Global data structures and variable: 365 * 366 * field reading writing ordering 367 * ===================================================================== 368 * page_hash[] ph_mutex[] ph_mutex[] can hold this lock 369 * before acquiring 370 * a vph_mutex or 371 * pse_mutex. 372 * ===================================================================== 373 * vp->v_pages vph_mutex[] vph_mutex[] can only acquire 374 * a pse_mutex while 375 * holding this lock. 376 * ===================================================================== 377 * page_cachelist page_freelock page_freelock can't acquire any 378 * page_freelist page_freelock page_freelock 379 * ===================================================================== 380 * freemem freemem_lock freemem_lock can't acquire any 381 * freemem_wait other mutexes while 382 * freemem_cv holding this mutex. 383 * ===================================================================== 384 * 385 * Page relocation, PG_NORELOC and P_NORELOC. 386 * 387 * Pages may be relocated using the page_relocate() interface. Relocation 388 * involves moving the contents and identity of a page to another, free page. 389 * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent 390 * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL 391 * lock must not be held indefinitely). If the page is going to be held 392 * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed 393 * to page_create_va so that pages that are prevented from being relocated 394 * can be managed differently by the platform specific layer. 395 * 396 * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0) 397 * are guaranteed to be held in memory, but can still be relocated 398 * providing the SE_EXCL lock can be obtained. 399 * 400 * The P_NORELOC bit in the page_t.p_state field is provided for use by 401 * the platform specific code in managing pages when the PG_NORELOC 402 * hint is used. 403 * 404 * Memory delete and page locking. 405 * 406 * The set of all usable pages is managed using the global page list as 407 * implemented by the memseg structure defined below. When memory is added 408 * or deleted this list changes. Additions to this list guarantee that the 409 * list is never corrupt. In order to avoid the necessity of an additional 410 * lock to protect against failed accesses to the memseg being deleted and, 411 * more importantly, the page_ts, the memseg structure is never freed and the 412 * page_t virtual address space is remapped to a page (or pages) of 413 * zeros. If a page_t is manipulated while it is p_selock'd, or if it is 414 * locked indirectly via a hash or freelist lock, it is not possible for 415 * memory delete to collect the page and so that part of the page list is 416 * prevented from being deleted. If the page is referenced outside of one 417 * of these locks, it is possible for the page_t being referenced to be 418 * deleted. Examples of this are page_t pointers returned by 419 * page_numtopp_nolock, page_first and page_next. Providing the page_t 420 * is re-checked after taking the p_selock (for p_vnode != NULL), the 421 * remapping to the zero pages will be detected. 422 * 423 * 424 * Page size (p_szc field) and page locking. 425 * 426 * p_szc field of free pages is changed by free list manager under freelist 427 * locks and is of no concern to the rest of VM subsystem. 428 * 429 * p_szc changes of allocated anonymous (swapfs) can only be done only after 430 * exclusively locking all constituent pages and calling hat_pageunload() on 431 * each of them. To prevent p_szc changes of non free anonymous (swapfs) large 432 * pages it's enough to either lock SHARED any of constituent pages or prevent 433 * hat_pageunload() by holding hat level lock that protects mapping lists (this 434 * method is for hat code only) 435 * 436 * To increase (promote) p_szc of allocated non anonymous file system pages 437 * one has to first lock exclusively all involved constituent pages and call 438 * hat_pageunload() on each of them. To prevent p_szc promote it's enough to 439 * either lock SHARED any of constituent pages that will be needed to make a 440 * large page or prevent hat_pageunload() by holding hat level lock that 441 * protects mapping lists (this method is for hat code only). 442 * 443 * To decrease (demote) p_szc of an allocated non anonymous file system large 444 * page one can either use the same method as used for changeing p_szc of 445 * anonymous large pages or if it's not possible to lock all constituent pages 446 * exclusively a different method can be used. In the second method one only 447 * has to exclusively lock one of constituent pages but then one has to 448 * acquire further locks by calling page_szc_lock() and 449 * hat_page_demote(). hat_page_demote() acquires hat level locks and then 450 * demotes the page. This mechanism relies on the fact that any code that 451 * needs to prevent p_szc of a file system large page from changeing either 452 * locks all constituent large pages at least SHARED or locks some pages at 453 * least SHARED and calls page_szc_lock() or uses hat level page locks. 454 * Demotion using this method is implemented by page_demote_vp_pages(). 455 * Please see comments in front of page_demote_vp_pages(), hat_page_demote() 456 * and page_szc_lock() for more details. 457 * 458 * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist, 459 * hat level locks. 460 */ 461 462 typedef struct page { 463 u_offset_t p_offset; /* offset into vnode for this page */ 464 struct vnode *p_vnode; /* vnode that this page is named by */ 465 selock_t p_selock; /* shared/exclusive lock on the page */ 466 #if defined(_LP64) 467 uint_t p_vpmref; /* vpm ref - index of the vpmap_t */ 468 #endif 469 struct page *p_hash; /* hash by [vnode, offset] */ 470 struct page *p_vpnext; /* next page in vnode list */ 471 struct page *p_vpprev; /* prev page in vnode list */ 472 struct page *p_next; /* next page in free/intrans lists */ 473 struct page *p_prev; /* prev page in free/intrans lists */ 474 ushort_t p_lckcnt; /* number of locks on page data */ 475 ushort_t p_cowcnt; /* number of copy on write lock */ 476 kcondvar_t p_cv; /* page struct's condition var */ 477 kcondvar_t p_io_cv; /* for iolock */ 478 uchar_t p_iolock_state; /* replaces p_iolock */ 479 volatile uchar_t p_szc; /* page size code */ 480 uchar_t p_fsdata; /* file system dependent byte */ 481 uchar_t p_state; /* p_free, p_noreloc */ 482 uchar_t p_nrm; /* non-cache, ref, mod readonly bits */ 483 #if defined(__sparc) 484 uchar_t p_vcolor; /* virtual color */ 485 #else 486 uchar_t p_embed; /* x86 - changes p_mapping & p_index */ 487 #endif 488 uchar_t p_index; /* MPSS mapping info. Not used on x86 */ 489 uchar_t p_toxic; /* page has an unrecoverable error */ 490 void *p_mapping; /* hat specific translation info */ 491 pfn_t p_pagenum; /* physical page number */ 492 493 uint_t p_share; /* number of translations */ 494 #if defined(_LP64) 495 uint_t p_sharepad; /* pad for growing p_share */ 496 #endif 497 uint_t p_slckcnt; /* number of softlocks */ 498 #if defined(__sparc) 499 uint_t p_kpmref; /* number of kpm mapping sharers */ 500 struct kpme *p_kpmelist; /* kpm specific mapping info */ 501 #else 502 /* index of entry in p_map when p_embed is set */ 503 uint_t p_mlentry; 504 #endif 505 #if defined(_LP64) 506 kmutex_t p_ilock; /* protects p_vpmref */ 507 #else 508 uint64_t p_msresv_2; /* page allocation debugging */ 509 #endif 510 } page_t; 511 512 513 typedef page_t devpage_t; 514 #define devpage page 515 516 #define PAGE_LOCK_MAXIMUM \ 517 ((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1) 518 519 #define PAGE_SLOCK_MAXIMUM UINT_MAX 520 521 /* 522 * Page hash table is a power-of-two in size, externally chained 523 * through the hash field. PAGE_HASHAVELEN is the average length 524 * desired for this chain, from which the size of the page_hash 525 * table is derived at boot time and stored in the kernel variable 526 * page_hashsz. In the hash function it is given by PAGE_HASHSZ. 527 * 528 * PAGE_HASH_FUNC returns an index into the page_hash[] array. This 529 * index is also used to derive the mutex that protects the chain. 530 * 531 * In constructing the hash function, first we dispose of unimportant bits 532 * (page offset from "off" and the low 3 bits of "vp" which are zero for 533 * struct alignment). Then shift and sum the remaining bits a couple times 534 * in order to get as many source bits from the two source values into the 535 * resulting hashed value. Note that this will perform quickly, since the 536 * shifting/summing are fast register to register operations with no additional 537 * memory references). 538 */ 539 #if defined(_LP64) 540 541 #if NCPU < 4 542 #define PH_TABLE_SIZE 128 543 #define VP_SHIFT 7 544 #else 545 #define PH_TABLE_SIZE 1024 546 #define VP_SHIFT 9 547 #endif 548 549 #else /* 32 bits */ 550 551 #if NCPU < 4 552 #define PH_TABLE_SIZE 16 553 #define VP_SHIFT 7 554 #else 555 #define PH_TABLE_SIZE 128 556 #define VP_SHIFT 9 557 #endif 558 559 #endif /* _LP64 */ 560 561 /* 562 * The amount to use for the successive shifts in the hash function below. 563 * The actual value is LOG2(PH_TABLE_SIZE), so that as many bits as 564 * possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX(). 565 */ 566 #define PH_SHIFT_SIZE (7) 567 568 #define PAGE_HASHSZ page_hashsz 569 #define PAGE_HASHAVELEN 4 570 #define PAGE_HASH_FUNC(vp, off) \ 571 ((((uintptr_t)(off) >> PAGESHIFT) + \ 572 ((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE)) + \ 573 ((uintptr_t)(vp) >> 3) + \ 574 ((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) + \ 575 ((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE))) & \ 576 (PAGE_HASHSZ - 1)) 577 #ifdef _KERNEL 578 579 /* 580 * The page hash value is re-hashed to an index for the ph_mutex array. 581 * 582 * For 64 bit kernels, the mutex array is padded out to prevent false 583 * sharing of cache sub-blocks (64 bytes) of adjacent mutexes. 584 * 585 * For 32 bit kernels, we don't want to waste kernel address space with 586 * padding, so instead we rely on the hash function to introduce skew of 587 * adjacent vnode/offset indexes (the left shift part of the hash function). 588 * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different 589 * 64 byte sub-block. 590 */ 591 typedef struct pad_mutex { 592 kmutex_t pad_mutex; 593 #ifdef _LP64 594 char pad_pad[64 - sizeof (kmutex_t)]; 595 #endif 596 } pad_mutex_t; 597 extern pad_mutex_t ph_mutex[]; 598 599 #define PAGE_HASH_MUTEX(x) \ 600 &(ph_mutex[((x) + ((x) >> VP_SHIFT) + ((x) << 3)) & \ 601 (PH_TABLE_SIZE - 1)].pad_mutex) 602 603 /* 604 * Flags used while creating pages. 605 */ 606 #define PG_EXCL 0x0001 607 #define PG_WAIT 0x0002 608 #define PG_PHYSCONTIG 0x0004 /* NOT SUPPORTED */ 609 #define PG_MATCH_COLOR 0x0008 /* SUPPORTED by free list routines */ 610 #define PG_NORELOC 0x0010 /* Non-relocatable alloc hint. */ 611 /* Page must be PP_ISNORELOC */ 612 #define PG_PANIC 0x0020 /* system will panic if alloc fails */ 613 #define PG_PUSHPAGE 0x0040 /* alloc may use reserve */ 614 #define PG_LOCAL 0x0080 /* alloc from given lgrp only */ 615 616 /* 617 * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL 618 * access are given priority over all other waiting threads. 619 */ 620 #define SE_EWANTED 0x40000000 621 #define PAGE_LOCKED(pp) (((pp)->p_selock & ~SE_EWANTED) != 0) 622 #define PAGE_SHARED(pp) (((pp)->p_selock & ~SE_EWANTED) > 0) 623 #define PAGE_EXCL(pp) ((pp)->p_selock < 0) 624 #define PAGE_LOCKED_SE(pp, se) \ 625 ((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp)) 626 627 extern long page_hashsz; 628 extern page_t **page_hash; 629 630 extern kmutex_t page_llock; /* page logical lock mutex */ 631 extern kmutex_t freemem_lock; /* freemem lock */ 632 633 extern pgcnt_t total_pages; /* total pages in the system */ 634 635 /* 636 * Variables controlling locking of physical memory. 637 */ 638 extern pgcnt_t pages_pp_maximum; /* tuning: lock + claim <= max */ 639 extern void init_pages_pp_maximum(void); 640 641 struct lgrp; 642 643 /* page_list_{add,sub} flags */ 644 645 /* which list */ 646 #define PG_FREE_LIST 0x0001 647 #define PG_CACHE_LIST 0x0002 648 649 /* where on list */ 650 #define PG_LIST_TAIL 0x0010 651 #define PG_LIST_HEAD 0x0020 652 653 /* called from */ 654 #define PG_LIST_ISINIT 0x1000 655 656 /* 657 * Page frame operations. 658 */ 659 page_t *page_lookup(struct vnode *, u_offset_t, se_t); 660 page_t *page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *, 661 spgcnt_t *, int); 662 page_t *page_lookup_nowait(struct vnode *, u_offset_t, se_t); 663 page_t *page_find(struct vnode *, u_offset_t); 664 page_t *page_exists(struct vnode *, u_offset_t); 665 int page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]); 666 int page_exists_forreal(struct vnode *, u_offset_t, uint_t *); 667 void page_needfree(spgcnt_t); 668 page_t *page_create(struct vnode *, u_offset_t, size_t, uint_t); 669 int page_alloc_pages(struct vnode *, struct seg *, caddr_t, page_t **, 670 page_t **, uint_t, int, int); 671 page_t *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, 672 uint_t flags, struct seg *seg, caddr_t vaddr, void *arg); 673 page_t *page_create_va(struct vnode *, u_offset_t, size_t, uint_t, 674 struct seg *, caddr_t); 675 int page_create_wait(pgcnt_t npages, uint_t flags); 676 void page_create_putback(spgcnt_t npages); 677 void page_free(page_t *, int); 678 void page_free_at_startup(page_t *); 679 void page_free_pages(page_t *); 680 void free_vp_pages(struct vnode *, u_offset_t, size_t); 681 int page_reclaim(page_t *, kmutex_t *); 682 int page_reclaim_pages(page_t *, kmutex_t *, uint_t); 683 void page_destroy(page_t *, int); 684 void page_destroy_pages(page_t *); 685 void page_destroy_free(page_t *); 686 void page_rename(page_t *, struct vnode *, u_offset_t); 687 int page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *); 688 void page_hashout(page_t *, kmutex_t *); 689 int page_num_hashin(pfn_t, struct vnode *, u_offset_t); 690 void page_add(page_t **, page_t *); 691 void page_add_common(page_t **, page_t *); 692 void page_sub(page_t **, page_t *); 693 void page_sub_common(page_t **, page_t *); 694 page_t *page_get_freelist(struct vnode *, u_offset_t, struct seg *, 695 caddr_t, size_t, uint_t, struct lgrp *); 696 697 page_t *page_get_cachelist(struct vnode *, u_offset_t, struct seg *, 698 caddr_t, uint_t, struct lgrp *); 699 #if defined(__i386) || defined(__amd64) 700 int page_chk_freelist(uint_t); 701 #endif 702 void page_list_add(page_t *, int); 703 void page_boot_demote(page_t *); 704 void page_promote_size(page_t *, uint_t); 705 void page_list_add_pages(page_t *, int); 706 void page_list_sub(page_t *, int); 707 void page_list_sub_pages(page_t *, uint_t); 708 void page_list_xfer(page_t *, int, int); 709 void page_list_break(page_t **, page_t **, size_t); 710 void page_list_concat(page_t **, page_t **); 711 void page_vpadd(page_t **, page_t *); 712 void page_vpsub(page_t **, page_t *); 713 int page_lock(page_t *, se_t, kmutex_t *, reclaim_t); 714 int page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int); 715 void page_lock_clr_exclwanted(page_t *); 716 int page_trylock(page_t *, se_t); 717 int page_try_reclaim_lock(page_t *, se_t, int); 718 int page_tryupgrade(page_t *); 719 void page_downgrade(page_t *); 720 void page_unlock(page_t *); 721 void page_unlock_nocapture(page_t *); 722 void page_lock_delete(page_t *); 723 int page_deleted(page_t *); 724 int page_pp_lock(page_t *, int, int); 725 void page_pp_unlock(page_t *, int, int); 726 int page_resv(pgcnt_t, uint_t); 727 void page_unresv(pgcnt_t); 728 void page_pp_useclaim(page_t *, page_t *, uint_t); 729 int page_addclaim(page_t *); 730 int page_subclaim(page_t *); 731 int page_addclaim_pages(page_t **); 732 int page_subclaim_pages(page_t **); 733 pfn_t page_pptonum(page_t *); 734 page_t *page_numtopp(pfn_t, se_t); 735 page_t *page_numtopp_noreclaim(pfn_t, se_t); 736 page_t *page_numtopp_nolock(pfn_t); 737 page_t *page_numtopp_nowait(pfn_t, se_t); 738 page_t *page_first(); 739 page_t *page_next(page_t *); 740 page_t *page_list_next(page_t *); 741 page_t *page_nextn(page_t *, ulong_t); 742 page_t *page_next_scan_init(void **); 743 page_t *page_next_scan_large(page_t *, ulong_t *, void **); 744 void prefetch_page_r(void *); 745 int ppcopy(page_t *, page_t *); 746 void page_relocate_hash(page_t *, page_t *); 747 void pagezero(page_t *, uint_t, uint_t); 748 void pagescrub(page_t *, uint_t, uint_t); 749 void page_io_lock(page_t *); 750 void page_io_unlock(page_t *); 751 int page_io_trylock(page_t *); 752 int page_iolock_assert(page_t *); 753 void page_iolock_init(page_t *); 754 void page_io_wait(page_t *); 755 int page_io_locked(page_t *); 756 pgcnt_t page_busy(int); 757 void page_lock_init(void); 758 ulong_t page_share_cnt(page_t *); 759 int page_isshared(page_t *); 760 int page_isfree(page_t *); 761 int page_isref(page_t *); 762 int page_ismod(page_t *); 763 int page_release(page_t *, int); 764 void page_retire_init(void); 765 int page_retire(uint64_t, uchar_t); 766 int page_retire_check(uint64_t, uint64_t *); 767 int page_unretire(uint64_t); 768 int page_unretire_pp(page_t *, int); 769 void page_tryretire(page_t *); 770 void page_retire_mdboot(); 771 uint64_t page_retire_pend_count(void); 772 uint64_t page_retire_pend_kas_count(void); 773 void page_retire_incr_pend_count(void *); 774 void page_retire_decr_pend_count(void *); 775 void page_clrtoxic(page_t *, uchar_t); 776 void page_settoxic(page_t *, uchar_t); 777 778 int page_mem_avail(pgcnt_t); 779 int page_reclaim_mem(pgcnt_t, pgcnt_t, int); 780 781 void page_set_props(page_t *, uint_t); 782 void page_clr_all_props(page_t *); 783 int page_clear_lck_cow(page_t *, int); 784 785 kmutex_t *page_vnode_mutex(struct vnode *); 786 kmutex_t *page_se_mutex(struct page *); 787 kmutex_t *page_szc_lock(struct page *); 788 int page_szc_lock_assert(struct page *pp); 789 790 /* 791 * Page relocation interfaces. page_relocate() is generic. 792 * page_get_replacement_page() is provided by the PSM. 793 * page_free_replacement_page() is generic. 794 */ 795 int group_page_trylock(page_t *, se_t); 796 void group_page_unlock(page_t *); 797 int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *); 798 int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *); 799 page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t); 800 void page_free_replacement_page(page_t *); 801 int page_relocate_cage(page_t **, page_t **); 802 803 int page_try_demote_pages(page_t *); 804 int page_try_demote_free_pages(page_t *); 805 void page_demote_free_pages(page_t *); 806 807 struct anon_map; 808 809 void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *, 810 ulong_t, vnode_t *, u_offset_t, int); 811 void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t); 812 813 /* 814 * Tell the PIM we are adding physical memory 815 */ 816 void add_physmem(page_t *, size_t, pfn_t); 817 void add_physmem_cb(page_t *, pfn_t); /* callback for page_t part */ 818 819 /* 820 * hw_page_array[] is configured with hardware supported page sizes by 821 * platform specific code. 822 */ 823 typedef struct { 824 size_t hp_size; 825 uint_t hp_shift; 826 uint_t hp_colors; 827 pgcnt_t hp_pgcnt; /* base pagesize cnt */ 828 } hw_pagesize_t; 829 830 extern hw_pagesize_t hw_page_array[]; 831 extern uint_t page_coloring_shift; 832 extern uint_t page_colors_mask; 833 extern int cpu_page_colors; 834 extern uint_t colorequiv; 835 extern uchar_t colorequivszc[]; 836 837 uint_t page_num_pagesizes(void); 838 uint_t page_num_user_pagesizes(int); 839 size_t page_get_pagesize(uint_t); 840 size_t page_get_user_pagesize(uint_t n); 841 pgcnt_t page_get_pagecnt(uint_t); 842 uint_t page_get_shift(uint_t); 843 int page_szc(size_t); 844 int page_szc_user_filtered(size_t); 845 846 /* page_get_replacement page flags */ 847 #define PGR_SAMESZC 0x1 /* only look for page size same as orig */ 848 #define PGR_NORELOC 0x2 /* allocate a P_NORELOC page */ 849 850 /* 851 * macros for "masked arithmetic" 852 * The purpose is to step through all combinations of a set of bits while 853 * keeping some other bits fixed. Fixed bits need not be contiguous. The 854 * variable bits need not be contiguous either, or even right aligned. The 855 * trick is to set all fixed bits to 1, then increment, then restore the 856 * fixed bits. If incrementing causes a carry from a low bit position, the 857 * carry propagates thru the fixed bits, because they are temporarily set to 1. 858 * v is the value 859 * i is the increment 860 * eq_mask defines the fixed bits 861 * mask limits the size of the result 862 */ 863 #define ADD_MASKED(v, i, eq_mask, mask) \ 864 (((((v) | (eq_mask)) + (i)) & (mask) & ~(eq_mask)) | ((v) & (eq_mask))) 865 866 /* 867 * convenience macro which increments by 1 868 */ 869 #define INC_MASKED(v, eq_mask, mask) ADD_MASKED(v, 1, eq_mask, mask) 870 871 #endif /* _KERNEL */ 872 873 /* 874 * Constants used for the p_iolock_state 875 */ 876 #define PAGE_IO_INUSE 0x1 877 #define PAGE_IO_WANTED 0x2 878 879 /* 880 * Constants used for page_release status 881 */ 882 #define PGREL_NOTREL 0x1 883 #define PGREL_CLEAN 0x2 884 #define PGREL_MOD 0x3 885 886 /* 887 * The p_state field holds what used to be the p_age and p_free 888 * bits. These fields are protected by p_selock (see above). 889 */ 890 #define P_FREE 0x80 /* Page on free list */ 891 #define P_NORELOC 0x40 /* Page is non-relocatable */ 892 #define P_MIGRATE 0x20 /* Migrate page on next touch */ 893 #define P_SWAP 0x10 /* belongs to vnode that is V_ISSWAP */ 894 #define P_BOOTPAGES 0x08 /* member of bootpages list */ 895 #define P_RAF 0x04 /* page retired at free */ 896 897 #define PP_ISFREE(pp) ((pp)->p_state & P_FREE) 898 #define PP_ISAGED(pp) (((pp)->p_state & P_FREE) && \ 899 ((pp)->p_vnode == NULL)) 900 #define PP_ISNORELOC(pp) ((pp)->p_state & P_NORELOC) 901 #define PP_ISKAS(pp) (VN_ISKAS((pp)->p_vnode)) 902 #define PP_ISNORELOCKERNEL(pp) (PP_ISNORELOC(pp) && PP_ISKAS(pp)) 903 #define PP_ISMIGRATE(pp) ((pp)->p_state & P_MIGRATE) 904 #define PP_ISSWAP(pp) ((pp)->p_state & P_SWAP) 905 #define PP_ISBOOTPAGES(pp) ((pp)->p_state & P_BOOTPAGES) 906 #define PP_ISRAF(pp) ((pp)->p_state & P_RAF) 907 908 #define PP_SETFREE(pp) ((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \ 909 | P_FREE) 910 #define PP_SETAGED(pp) ASSERT(PP_ISAGED(pp)) 911 #define PP_SETNORELOC(pp) ((pp)->p_state |= P_NORELOC) 912 #define PP_SETMIGRATE(pp) ((pp)->p_state |= P_MIGRATE) 913 #define PP_SETSWAP(pp) ((pp)->p_state |= P_SWAP) 914 #define PP_SETBOOTPAGES(pp) ((pp)->p_state |= P_BOOTPAGES) 915 #define PP_SETRAF(pp) ((pp)->p_state |= P_RAF) 916 917 #define PP_CLRFREE(pp) ((pp)->p_state &= ~P_FREE) 918 #define PP_CLRAGED(pp) ASSERT(!PP_ISAGED(pp)) 919 #define PP_CLRNORELOC(pp) ((pp)->p_state &= ~P_NORELOC) 920 #define PP_CLRMIGRATE(pp) ((pp)->p_state &= ~P_MIGRATE) 921 #define PP_CLRSWAP(pp) ((pp)->p_state &= ~P_SWAP) 922 #define PP_CLRBOOTPAGES(pp) ((pp)->p_state &= ~P_BOOTPAGES) 923 #define PP_CLRRAF(pp) ((pp)->p_state &= ~P_RAF) 924 925 /* 926 * Flags for page_t p_toxic, for tracking memory hardware errors. 927 * 928 * These flags are OR'ed into p_toxic with page_settoxic() to track which 929 * error(s) have occurred on a given page. The flags are cleared with 930 * page_clrtoxic(). Both page_settoxic() and page_cleartoxic use atomic 931 * primitives to manipulate the p_toxic field so no other locking is needed. 932 * 933 * When an error occurs on a page, p_toxic is set to record the error. The 934 * error could be a memory error or something else (i.e. a datapath). The Page 935 * Retire mechanism does not try to determine the exact cause of the error; 936 * Page Retire rightly leaves that sort of determination to FMA's Diagnostic 937 * Engine (DE). 938 * 939 * Note that, while p_toxic bits can be set without holding any locks, they 940 * should only be cleared while holding the page exclusively locked. 941 * There is one exception to this, the PR_CAPTURE bit is protected by a mutex 942 * within the page capture logic and thus to set or clear the bit, that mutex 943 * needs to be held. The page does not need to be locked but the page_clrtoxic 944 * function must be used as we need an atomic operation. 945 * Also note that there is what amounts to a hack to prevent recursion with 946 * large pages such that if we are unlocking a page and the PR_CAPTURE bit is 947 * set, we will only try to capture the page if the current threads T_CAPTURING 948 * flag is not set. If the flag is set, the unlock will not try to capture 949 * the page even though the PR_CAPTURE bit is set. 950 * 951 * Pages with PR_UE or PR_FMA flags are retired unconditionally, while pages 952 * with PR_MCE are retired if the system has not retired too many of them. 953 * 954 * A page must be exclusively locked to be retired. Pages can be retired if 955 * they are mapped, modified, or both, as long as they are not marked PR_UE, 956 * since pages with uncorrectable errors cannot be relocated in memory. 957 * Once a page has been successfully retired it is zeroed, attached to the 958 * retired_pages vnode and, finally, PR_RETIRED is set in p_toxic. The other 959 * p_toxic bits are NOT cleared. Pages are not left locked after retiring them 960 * to avoid special case code throughout the kernel; rather, page_*lock() will 961 * fail to lock the page, unless SE_RETIRED is passed as an argument. 962 * 963 * While we have your attention, go take a look at the comments at the 964 * beginning of page_retire.c too. 965 */ 966 #define PR_OK 0x00 /* no problem */ 967 #define PR_MCE 0x01 /* page has seen two or more CEs */ 968 #define PR_UE 0x02 /* page has an unhandled UE */ 969 #define PR_UE_SCRUBBED 0x04 /* page has seen a UE but was cleaned */ 970 #define PR_FMA 0x08 /* A DE wants this page retired */ 971 #define PR_CAPTURE 0x10 /* Generic page capture flag */ 972 #define PR_RESV 0x20 /* Reserved for future use */ 973 #define PR_MSG 0x40 /* message(s) already printed for this page */ 974 #define PR_RETIRED 0x80 /* This page has been retired */ 975 976 #define PR_REASONS (PR_UE | PR_MCE | PR_FMA) 977 #define PR_TOXIC (PR_UE) 978 #define PR_ERRMASK (PR_UE | PR_UE_SCRUBBED | PR_MCE | PR_FMA) 979 #define PR_TOXICFLAGS (0xCF) 980 981 #define PP_RETIRED(pp) ((pp)->p_toxic & PR_RETIRED) 982 #define PP_TOXIC(pp) ((pp)->p_toxic & PR_TOXIC) 983 #define PP_PR_REQ(pp) (((pp)->p_toxic & PR_REASONS) && !PP_RETIRED(pp)) 984 #define PP_PR_NOSHARE(pp) \ 985 ((((pp)->p_toxic & (PR_RETIRED | PR_FMA | PR_UE)) == PR_FMA) && \ 986 !PP_ISKAS(pp)) 987 988 /* 989 * Flags for page_unretire_pp 990 */ 991 #define PR_UNR_FREE 0x1 992 #define PR_UNR_CLEAN 0x2 993 #define PR_UNR_TEMP 0x4 994 995 /* 996 * kpm large page description. 997 * The virtual address range of segkpm is divided into chunks of 998 * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort 999 * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz 1000 * for 8K is 256M and 2G for 64K pages. It it kept as small as 1001 * possible to save physical memory space. 1002 * 1003 * There are 2 segkpm mapping windows within in the virtual address 1004 * space when we have to prevent VAC alias conflicts. The so called 1005 * Alias window (mappings are always by PAGESIZE) is controlled by 1006 * kp_refcnta. The regular window is controlled by kp_refcnt for the 1007 * normal operation, which is to use the largest available pagesize. 1008 * When VAC alias conflicts are present within a chunk in the regular 1009 * window the large page mapping is broken up into smaller PAGESIZE 1010 * mappings. kp_refcntc is used to control the pages that are invoked 1011 * in the conflict and kp_refcnts holds the active mappings done 1012 * with the small page size. In non vac conflict mode kp_refcntc is 1013 * also used as "go" indication (-1) for the trap level tsbmiss 1014 * handler. 1015 */ 1016 typedef struct kpm_page { 1017 short kp_refcnt; /* pages mapped large */ 1018 short kp_refcnta; /* pages mapped in Alias window */ 1019 short kp_refcntc; /* TL-tsbmiss flag; #vac alias conflict pages */ 1020 short kp_refcnts; /* vac alias: pages mapped small */ 1021 } kpm_page_t; 1022 1023 /* 1024 * Note: khl_lock offset changes must be reflected in sfmmu_asm.s 1025 */ 1026 typedef struct kpm_hlk { 1027 kmutex_t khl_mutex; /* kpm_page mutex */ 1028 uint_t khl_lock; /* trap level tsbmiss handling */ 1029 } kpm_hlk_t; 1030 1031 /* 1032 * kpm small page description. 1033 * When kpm_pgsz is equal to PAGESIZE a smaller representation is used 1034 * to save memory space. Alias range mappings and regular segkpm 1035 * mappings are done in units of PAGESIZE and can share the mapping 1036 * information and the mappings are always distinguishable by their 1037 * virtual address. Other information needed for VAC conflict prevention 1038 * is already available on a per page basis. 1039 * 1040 * The state about how a kpm page is mapped and whether it is ready to go 1041 * is indicated by the following 1 byte kpm_spage structure. This byte is 1042 * split into two 4-bit parts - kp_mapped and kp_mapped_go. 1043 * - kp_mapped == 1 the page is mapped cacheable 1044 * - kp_mapped == 2 the page is mapped non-cacheable 1045 * - kp_mapped_go == 1 the mapping is ready to be dropped in 1046 * - kp_mapped_go == 0 the mapping is not ready to be dropped in. 1047 * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict. 1048 * Otherwise, the assembly tsb miss handler can simply drop in the mapping 1049 * when a tsb miss occurs. 1050 */ 1051 typedef union kpm_spage { 1052 struct { 1053 #ifdef _BIG_ENDIAN 1054 uchar_t mapped_go: 4; /* go or nogo flag */ 1055 uchar_t mapped: 4; /* page mapped small */ 1056 #else 1057 uchar_t mapped: 4; /* page mapped small */ 1058 uchar_t mapped_go: 4; /* go or nogo flag */ 1059 #endif 1060 } kpm_spage_un; 1061 uchar_t kp_mapped_flag; 1062 } kpm_spage_t; 1063 1064 #define kp_mapped kpm_spage_un.mapped 1065 #define kp_mapped_go kpm_spage_un.mapped_go 1066 1067 /* 1068 * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s 1069 */ 1070 typedef struct kpm_shlk { 1071 uint_t kshl_lock; /* trap level tsbmiss handling */ 1072 } kpm_shlk_t; 1073 1074 /* 1075 * Each segment of physical memory is described by a memseg struct. 1076 * Within a segment, memory is considered contiguous. The members 1077 * can be categorized as follows: 1078 * . Platform independent: 1079 * pages, epages, pages_base, pages_end, next, lnext. 1080 * . 64bit only but platform independent: 1081 * kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages. 1082 * . Really platform or mmu specific: 1083 * pagespa, epagespa, nextpa, kpm_pagespa. 1084 * . Mixed: 1085 * msegflags. 1086 */ 1087 struct memseg { 1088 page_t *pages, *epages; /* [from, to] in page array */ 1089 pfn_t pages_base, pages_end; /* [from, to] in page numbers */ 1090 struct memseg *next; /* next segment in list */ 1091 struct memseg *lnext; /* next segment in deleted list */ 1092 #if defined(__sparc) 1093 uint64_t pagespa, epagespa; /* [from, to] page array physical */ 1094 uint64_t nextpa; /* physical next pointer */ 1095 pfn_t kpm_pbase; /* start of kpm range */ 1096 pgcnt_t kpm_nkpmpgs; /* # of kpm_pgsz pages */ 1097 union _mseg_un { 1098 kpm_page_t *kpm_lpgs; /* ptr to kpm_page array */ 1099 kpm_spage_t *kpm_spgs; /* ptr to kpm_spage array */ 1100 } mseg_un; 1101 uint64_t kpm_pagespa; /* physical ptr to kpm (s)pages array */ 1102 #endif /* __sparc */ 1103 uint_t msegflags; /* memseg flags */ 1104 }; 1105 1106 /* memseg union aliases */ 1107 #define kpm_pages mseg_un.kpm_lpgs 1108 #define kpm_spages mseg_un.kpm_spgs 1109 1110 /* msegflags */ 1111 #define MEMSEG_DYNAMIC 0x1 /* DR: memory was added dynamically */ 1112 #define MEMSEG_META_INCL 0x2 /* DR: memseg includes it's metadata */ 1113 #define MEMSEG_META_ALLOC 0x4 /* DR: memseg allocated it's metadata */ 1114 1115 /* memseg support macros */ 1116 #define MSEG_NPAGES(SEG) ((SEG)->pages_end - (SEG)->pages_base) 1117 1118 /* memseg hash */ 1119 #define MEM_HASH_SHIFT 0x9 1120 #define N_MEM_SLOTS 0x200 /* must be a power of 2 */ 1121 #define MEMSEG_PFN_HASH(pfn) (((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1)) 1122 1123 /* memseg externals */ 1124 extern struct memseg *memsegs; /* list of memory segments */ 1125 extern ulong_t mhash_per_slot; 1126 extern uint64_t memsegspa; /* memsegs as physical address */ 1127 1128 void build_pfn_hash(); 1129 extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum); 1130 1131 /* 1132 * page capture related info: 1133 * The page capture routines allow us to asynchronously capture given pages 1134 * for the explicit use of the requestor. New requestors can be added by 1135 * explicitly adding themselves to the PC_* flags below and incrementing 1136 * PC_NUM_CALLBACKS as necessary. 1137 * 1138 * Subsystems using page capture must register a callback before attempting 1139 * to capture a page. A duration of -1 will indicate that we will never give 1140 * up while trying to capture a page and will only stop trying to capture the 1141 * given page once we have successfully captured it. Thus the user needs to be 1142 * aware of the behavior of all callers who have a duration of -1. 1143 * 1144 * For now, only /dev/physmem and page retire use the page capture interface 1145 * and only a single request can be outstanding for a given page. Thus, if 1146 * /dev/phsymem wants a page and page retire also wants the same page, only 1147 * the page retire request will be honored until the point in time that the 1148 * page is actually retired, at which point in time, subsequent requests by 1149 * /dev/physmem will succeed if the CAPTURE_GET_RETIRED flag was set. 1150 */ 1151 1152 #define PC_RETIRE (0) 1153 #define PC_PHYSMEM (1) 1154 #define PC_NUM_CALLBACKS (2) 1155 #define PC_MASK ((1 << PC_NUM_CALLBACKS) - 1) 1156 1157 #define CAPTURE_RETIRE (1 << PC_RETIRE) 1158 #define CAPTURE_PHYSMEM (1 << PC_PHYSMEM) 1159 1160 #define CAPTURE_ASYNC (0x0200) 1161 1162 #define CAPTURE_GET_RETIRED (0x1000) 1163 #define CAPTURE_GET_CAGE (0x2000) 1164 1165 struct page_capture_callback { 1166 int cb_active; /* 1 means active, 0 means inactive */ 1167 clock_t duration; /* the length in time that we'll attempt to */ 1168 /* capture this page asynchronously. (in HZ) */ 1169 krwlock_t cb_rwlock; 1170 int (*cb_func)(page_t *, void *, uint_t); /* callback function */ 1171 }; 1172 1173 extern kcondvar_t pc_cv; 1174 1175 void page_capture_register_callback(uint_t index, clock_t duration, 1176 int (*cb_func)(page_t *, void *, uint_t)); 1177 void page_capture_unregister_callback(uint_t index); 1178 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 1179 void page_unlock_capture(page_t *pp); 1180 int page_capture_unretire_pp(page_t *); 1181 1182 extern int memsegs_trylock(int); 1183 extern void memsegs_lock(int); 1184 extern void memsegs_unlock(int); 1185 extern int memsegs_lock_held(void); 1186 extern void memlist_read_lock(void); 1187 extern void memlist_read_unlock(void); 1188 extern void memlist_write_lock(void); 1189 extern void memlist_write_unlock(void); 1190 1191 #ifdef __cplusplus 1192 } 1193 #endif 1194 1195 #endif /* _VM_PAGE_H */ 1196