1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * University Copyright- Copyright (c) 1982, 1986, 1988 30 * The Regents of the University of California 31 * All Rights Reserved 32 * 33 * University Acknowledgment- Portions of this document are derived from 34 * software developed by the University of California, Berkeley, and its 35 * contributors. 36 */ 37 38 #ifndef _VM_PAGE_H 39 #define _VM_PAGE_H 40 41 #include <vm/seg.h> 42 43 #ifdef __cplusplus 44 extern "C" { 45 #endif 46 47 #if defined(_KERNEL) || defined(_KMEMUSER) 48 49 /* 50 * Shared/Exclusive lock. 51 */ 52 53 /* 54 * Types of page locking supported by page_lock & friends. 55 */ 56 typedef enum { 57 SE_SHARED, 58 SE_EXCL /* exclusive lock (value == -1) */ 59 } se_t; 60 61 /* 62 * For requesting that page_lock reclaim the page from the free list. 63 */ 64 typedef enum { 65 P_RECLAIM, /* reclaim page from free list */ 66 P_NO_RECLAIM /* DON`T reclaim the page */ 67 } reclaim_t; 68 69 /* 70 * Callers of page_try_reclaim_lock and page_lock_es can use this flag 71 * to get SE_EXCL access before reader/writers are given access. 72 */ 73 #define SE_EXCL_WANTED 0x02 74 75 /* 76 * All page_*lock() requests will be denied unless this flag is set in 77 * the 'es' parameter. 78 */ 79 #define SE_RETIRED 0x04 80 81 #endif /* _KERNEL | _KMEMUSER */ 82 83 typedef int selock_t; 84 85 /* 86 * Define VM_STATS to turn on all sorts of statistic gathering about 87 * the VM layer. By default, it is only turned on when DEBUG is 88 * also defined. 89 */ 90 #ifdef DEBUG 91 #define VM_STATS 92 #endif /* DEBUG */ 93 94 #ifdef VM_STATS 95 #define VM_STAT_ADD(stat) (stat)++ 96 #define VM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++)) 97 #else 98 #define VM_STAT_ADD(stat) 99 #define VM_STAT_COND_ADD(cond, stat) 100 #endif /* VM_STATS */ 101 102 #ifdef _KERNEL 103 104 /* 105 * PAGE_LLOCK_SIZE is 2 * NCPU, but no smaller than 128. 106 * PAGE_LLOCK_SHIFT is log2(PAGE_LLOCK_SIZE). 107 */ 108 #if ((2*NCPU_P2) > 128) 109 #define PAGE_LLOCK_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 110 #else 111 #define PAGE_LLOCK_SHIFT 7U 112 #endif 113 #define PAGE_LLOCK_SIZE (1 << PAGE_LLOCK_SHIFT) 114 115 /* 116 * The number of low order 0 (or less variable) bits in the page_t address. 117 */ 118 #if defined(__sparc) 119 #define PP_SHIFT 7 120 #else 121 #define PP_SHIFT 6 122 #endif 123 124 /* 125 * pp may be the root of a large page, and many low order bits will be 0. 126 * Shift and XOR multiple times to capture the good bits across the range of 127 * possible page sizes. 128 */ 129 #define PAGE_LLOCK_HASH(pp) \ 130 (((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 131 ((uintptr_t)(pp) >> (PAGE_LLOCK_SHIFT + PP_SHIFT))) ^ \ 132 ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 2) + PP_SHIFT)) ^ \ 133 ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 3) + PP_SHIFT))) & \ 134 (PAGE_LLOCK_SIZE - 1)) 135 136 #define page_struct_lock(pp) \ 137 mutex_enter(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex) 138 #define page_struct_unlock(pp) \ 139 mutex_exit(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex) 140 141 #endif /* _KERNEL */ 142 143 #include <sys/t_lock.h> 144 145 struct as; 146 147 /* 148 * Each physical page has a page structure, which is used to maintain 149 * these pages as a cache. A page can be found via a hashed lookup 150 * based on the [vp, offset]. If a page has an [vp, offset] identity, 151 * then it is entered on a doubly linked circular list off the 152 * vnode using the vpnext/vpprev pointers. If the p_free bit 153 * is on, then the page is also on a doubly linked circular free 154 * list using next/prev pointers. If the "p_selock" and "p_iolock" 155 * are held, then the page is currently being read in (exclusive p_selock) 156 * or written back (shared p_selock). In this case, the next/prev pointers 157 * are used to link the pages together for a consecutive i/o request. If 158 * the page is being brought in from its backing store, then other processes 159 * will wait for the i/o to complete before attaching to the page since it 160 * will have an "exclusive" lock. 161 * 162 * Each page structure has the locks described below along with 163 * the fields they protect: 164 * 165 * p_selock This is a per-page shared/exclusive lock that is 166 * used to implement the logical shared/exclusive 167 * lock for each page. The "shared" lock is normally 168 * used in most cases while the "exclusive" lock is 169 * required to destroy or retain exclusive access to 170 * a page (e.g., while reading in pages). The appropriate 171 * lock is always held whenever there is any reference 172 * to a page structure (e.g., during i/o). 173 * (Note that with the addition of the "writer-lock-wanted" 174 * semantics (via SE_EWANTED), threads must not acquire 175 * multiple reader locks or else a deadly embrace will 176 * occur in the following situation: thread 1 obtains a 177 * reader lock; next thread 2 fails to get a writer lock 178 * but specified SE_EWANTED so it will wait by either 179 * blocking (when using page_lock_es) or spinning while 180 * retrying (when using page_try_reclaim_lock) until the 181 * reader lock is released; then thread 1 attempts to 182 * get another reader lock but is denied due to 183 * SE_EWANTED being set, and now both threads are in a 184 * deadly embrace.) 185 * 186 * p_hash 187 * p_vnode 188 * p_offset 189 * 190 * p_free 191 * p_age 192 * 193 * p_iolock This is a binary semaphore lock that provides 194 * exclusive access to the i/o list links in each 195 * page structure. It is always held while the page 196 * is on an i/o list (i.e., involved in i/o). That is, 197 * even though a page may be only `shared' locked 198 * while it is doing a write, the following fields may 199 * change anyway. Normally, the page must be 200 * `exclusively' locked to change anything in it. 201 * 202 * p_next 203 * p_prev 204 * 205 * The following fields are protected by the global page_llocks[]: 206 * 207 * p_lckcnt 208 * p_cowcnt 209 * 210 * The following lists are protected by the global page_freelock: 211 * 212 * page_cachelist 213 * page_freelist 214 * 215 * The following, for our purposes, are protected by 216 * the global freemem_lock: 217 * 218 * freemem 219 * freemem_wait 220 * freemem_cv 221 * 222 * The following fields are protected by hat layer lock(s). When a page 223 * structure is not mapped and is not associated with a vnode (after a call 224 * to page_hashout() for example) the p_nrm field may be modified with out 225 * holding the hat layer lock: 226 * 227 * p_nrm 228 * p_mapping 229 * p_share 230 * 231 * The following field is file system dependent. How it is used and 232 * the locking strategies applied are up to the individual file system 233 * implementation. 234 * 235 * p_fsdata 236 * 237 * The page structure is used to represent and control the system's 238 * physical pages. There is one instance of the structure for each 239 * page that is not permenately allocated. For example, the pages that 240 * hold the page structures are permanently held by the kernel 241 * and hence do not need page structures to track them. The array 242 * of page structures is allocated early on in the kernel's life and 243 * is based on the amount of available physical memory. 244 * 245 * Each page structure may simultaneously appear on several linked lists. 246 * The lists are: hash list, free or in i/o list, and a vnode's page list. 247 * Each type of list is protected by a different group of mutexes as described 248 * below: 249 * 250 * The hash list is used to quickly find a page when the page's vnode and 251 * offset within the vnode are known. Each page that is hashed is 252 * connected via the `p_hash' field. The anchor for each hash is in the 253 * array `page_hash'. An array of mutexes, `ph_mutex', protects the 254 * lists anchored by page_hash[]. To either search or modify a given hash 255 * list, the appropriate mutex in the ph_mutex array must be held. 256 * 257 * The free list contains pages that are `free to be given away'. For 258 * efficiency reasons, pages on this list are placed in two catagories: 259 * pages that are still associated with a vnode, and pages that are not 260 * associated with a vnode. Free pages always have their `p_free' bit set, 261 * free pages that are still associated with a vnode also have their 262 * `p_age' bit set. Pages on the free list are connected via their 263 * `p_next' and `p_prev' fields. When a page is involved in some sort 264 * of i/o, it is not free and these fields may be used to link associated 265 * pages together. At the moment, the free list is protected by a 266 * single mutex `page_freelock'. The list of free pages still associated 267 * with a vnode is anchored by `page_cachelist' while other free pages 268 * are anchored in architecture dependent ways (to handle page coloring etc.). 269 * 270 * Pages associated with a given vnode appear on a list anchored in the 271 * vnode by the `v_pages' field. They are linked together with 272 * `p_vpnext' and `p_vpprev'. The field `p_offset' contains a page's 273 * offset within the vnode. The pages on this list are not kept in 274 * offset order. These lists, in a manner similar to the hash lists, 275 * are protected by an array of mutexes called `vph_hash'. Before 276 * searching or modifying this chain the appropriate mutex in the 277 * vph_hash[] array must be held. 278 * 279 * Again, each of the lists that a page can appear on is protected by a 280 * mutex. Before reading or writing any of the fields comprising the 281 * list, the appropriate lock must be held. These list locks should only 282 * be held for very short intervals. 283 * 284 * In addition to the list locks, each page structure contains a 285 * shared/exclusive lock that protects various fields within it. 286 * To modify one of these fields, the `p_selock' must be exclusively held. 287 * To read a field with a degree of certainty, the lock must be at least 288 * held shared. 289 * 290 * Removing a page structure from one of the lists requires holding 291 * the appropriate list lock and the page's p_selock. A page may be 292 * prevented from changing identity, being freed, or otherwise modified 293 * by acquiring p_selock shared. 294 * 295 * To avoid deadlocks, a strict locking protocol must be followed. Basically 296 * there are two cases: In the first case, the page structure in question 297 * is known ahead of time (e.g., when the page is to be added or removed 298 * from a list). In the second case, the page structure is not known and 299 * must be found by searching one of the lists. 300 * 301 * When adding or removing a known page to one of the lists, first the 302 * page must be exclusively locked (since at least one of its fields 303 * will be modified), second the lock protecting the list must be acquired, 304 * third the page inserted or deleted, and finally the list lock dropped. 305 * 306 * The more interesting case occures when the particular page structure 307 * is not known ahead of time. For example, when a call is made to 308 * page_lookup(), it is not known if a page with the desired (vnode and 309 * offset pair) identity exists. So the appropriate mutex in ph_mutex is 310 * acquired, the hash list searched, and if the desired page is found 311 * an attempt is made to lock it. The attempt to acquire p_selock must 312 * not block while the hash list lock is held. A deadlock could occure 313 * if some other process was trying to remove the page from the list. 314 * The removing process (following the above protocol) would have exclusively 315 * locked the page, and be spinning waiting to acquire the lock protecting 316 * the hash list. Since the searching process holds the hash list lock 317 * and is waiting to acquire the page lock, a deadlock occurs. 318 * 319 * The proper scheme to follow is: first, lock the appropriate list, 320 * search the list, and if the desired page is found either use 321 * page_trylock() (which will not block) or pass the address of the 322 * list lock to page_lock(). If page_lock() can not acquire the page's 323 * lock, it will drop the list lock before going to sleep. page_lock() 324 * returns a value to indicate if the list lock was dropped allowing the 325 * calling program to react appropriately (i.e., retry the operation). 326 * 327 * If the list lock was dropped before the attempt at locking the page 328 * was made, checks would have to be made to ensure that the page had 329 * not changed identity before its lock was obtained. This is because 330 * the interval between dropping the list lock and acquiring the page 331 * lock is indeterminate. 332 * 333 * In addition, when both a hash list lock (ph_mutex[]) and a vnode list 334 * lock (vph_mutex[]) are needed, the hash list lock must be acquired first. 335 * The routine page_hashin() is a good example of this sequence. 336 * This sequence is ASSERTed by checking that the vph_mutex[] is not held 337 * just before each acquisition of one of the mutexs in ph_mutex[]. 338 * 339 * So, as a quick summary: 340 * 341 * pse_mutex[]'s protect the p_selock and p_cv fields. 342 * 343 * p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash, 344 * 345 * ph_mutex[]'s protect the page_hash[] array and its chains. 346 * 347 * vph_mutex[]'s protect the v_pages field and the vp page chains. 348 * 349 * First lock the page, then the hash chain, then the vnode chain. When 350 * this is not possible `trylocks' must be used. Sleeping while holding 351 * any of these mutexes (p_selock is not a mutex) is not allowed. 352 * 353 * 354 * field reading writing ordering 355 * ====================================================================== 356 * p_vnode p_selock(E,S) p_selock(E) 357 * p_offset 358 * p_free 359 * p_age 360 * ===================================================================== 361 * p_hash p_selock(E,S) p_selock(E) && p_selock, ph_mutex 362 * ph_mutex[] 363 * ===================================================================== 364 * p_vpnext p_selock(E,S) p_selock(E) && p_selock, vph_mutex 365 * p_vpprev vph_mutex[] 366 * ===================================================================== 367 * When the p_free bit is set: 368 * 369 * p_next p_selock(E,S) p_selock(E) && p_selock, 370 * p_prev page_freelock page_freelock 371 * 372 * When the p_free bit is not set: 373 * 374 * p_next p_selock(E,S) p_selock(E) && p_selock, p_iolock 375 * p_prev p_iolock 376 * ===================================================================== 377 * p_selock pse_mutex[] pse_mutex[] can`t acquire any 378 * p_cv other mutexes or 379 * sleep while holding 380 * this lock. 381 * ===================================================================== 382 * p_lckcnt p_selock(E,S) p_selock(E) 383 * OR 384 * p_selock(S) && 385 * page_llocks[] 386 * p_cowcnt 387 * ===================================================================== 388 * p_nrm hat layer lock hat layer lock 389 * p_mapping 390 * p_pagenum 391 * ===================================================================== 392 * 393 * where: 394 * E----> exclusive version of p_selock. 395 * S----> shared version of p_selock. 396 * 397 * 398 * Global data structures and variable: 399 * 400 * field reading writing ordering 401 * ===================================================================== 402 * page_hash[] ph_mutex[] ph_mutex[] can hold this lock 403 * before acquiring 404 * a vph_mutex or 405 * pse_mutex. 406 * ===================================================================== 407 * vp->v_pages vph_mutex[] vph_mutex[] can only acquire 408 * a pse_mutex while 409 * holding this lock. 410 * ===================================================================== 411 * page_cachelist page_freelock page_freelock can't acquire any 412 * page_freelist page_freelock page_freelock 413 * ===================================================================== 414 * freemem freemem_lock freemem_lock can't acquire any 415 * freemem_wait other mutexes while 416 * freemem_cv holding this mutex. 417 * ===================================================================== 418 * 419 * Page relocation, PG_NORELOC and P_NORELOC. 420 * 421 * Pages may be relocated using the page_relocate() interface. Relocation 422 * involves moving the contents and identity of a page to another, free page. 423 * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent 424 * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL 425 * lock must not be held indefinitely). If the page is going to be held 426 * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed 427 * to page_create_va so that pages that are prevented from being relocated 428 * can be managed differently by the platform specific layer. 429 * 430 * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0) 431 * are guaranteed to be held in memory, but can still be relocated 432 * providing the SE_EXCL lock can be obtained. 433 * 434 * The P_NORELOC bit in the page_t.p_state field is provided for use by 435 * the platform specific code in managing pages when the PG_NORELOC 436 * hint is used. 437 * 438 * Memory delete and page locking. 439 * 440 * The set of all usable pages is managed using the global page list as 441 * implemented by the memseg structure defined below. When memory is added 442 * or deleted this list changes. Additions to this list guarantee that the 443 * list is never corrupt. In order to avoid the necessity of an additional 444 * lock to protect against failed accesses to the memseg being deleted and, 445 * more importantly, the page_ts, the memseg structure is never freed and the 446 * page_t virtual address space is remapped to a page (or pages) of 447 * zeros. If a page_t is manipulated while it is p_selock'd, or if it is 448 * locked indirectly via a hash or freelist lock, it is not possible for 449 * memory delete to collect the page and so that part of the page list is 450 * prevented from being deleted. If the page is referenced outside of one 451 * of these locks, it is possible for the page_t being referenced to be 452 * deleted. Examples of this are page_t pointers returned by 453 * page_numtopp_nolock, page_first and page_next. Providing the page_t 454 * is re-checked after taking the p_selock (for p_vnode != NULL), the 455 * remapping to the zero pages will be detected. 456 * 457 * 458 * Page size (p_szc field) and page locking. 459 * 460 * p_szc field of free pages is changed by free list manager under freelist 461 * locks and is of no concern to the rest of VM subsystem. 462 * 463 * p_szc changes of allocated anonymous (swapfs) can only be done only after 464 * exclusively locking all constituent pages and calling hat_pageunload() on 465 * each of them. To prevent p_szc changes of non free anonymous (swapfs) large 466 * pages it's enough to either lock SHARED any of constituent pages or prevent 467 * hat_pageunload() by holding hat level lock that protects mapping lists (this 468 * method is for hat code only) 469 * 470 * To increase (promote) p_szc of allocated non anonymous file system pages 471 * one has to first lock exclusively all involved constituent pages and call 472 * hat_pageunload() on each of them. To prevent p_szc promote it's enough to 473 * either lock SHARED any of constituent pages that will be needed to make a 474 * large page or prevent hat_pageunload() by holding hat level lock that 475 * protects mapping lists (this method is for hat code only). 476 * 477 * To decrease (demote) p_szc of an allocated non anonymous file system large 478 * page one can either use the same method as used for changeing p_szc of 479 * anonymous large pages or if it's not possible to lock all constituent pages 480 * exclusively a different method can be used. In the second method one only 481 * has to exclusively lock one of constituent pages but then one has to 482 * acquire further locks by calling page_szc_lock() and 483 * hat_page_demote(). hat_page_demote() acquires hat level locks and then 484 * demotes the page. This mechanism relies on the fact that any code that 485 * needs to prevent p_szc of a file system large page from changeing either 486 * locks all constituent large pages at least SHARED or locks some pages at 487 * least SHARED and calls page_szc_lock() or uses hat level page locks. 488 * Demotion using this method is implemented by page_demote_vp_pages(). 489 * Please see comments in front of page_demote_vp_pages(), hat_page_demote() 490 * and page_szc_lock() for more details. 491 * 492 * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist, 493 * hat level locks. 494 */ 495 496 typedef struct page { 497 u_offset_t p_offset; /* offset into vnode for this page */ 498 struct vnode *p_vnode; /* vnode that this page is named by */ 499 selock_t p_selock; /* shared/exclusive lock on the page */ 500 #if defined(_LP64) 501 uint_t p_vpmref; /* vpm ref - index of the vpmap_t */ 502 #endif 503 struct page *p_hash; /* hash by [vnode, offset] */ 504 struct page *p_vpnext; /* next page in vnode list */ 505 struct page *p_vpprev; /* prev page in vnode list */ 506 struct page *p_next; /* next page in free/intrans lists */ 507 struct page *p_prev; /* prev page in free/intrans lists */ 508 ushort_t p_lckcnt; /* number of locks on page data */ 509 ushort_t p_cowcnt; /* number of copy on write lock */ 510 kcondvar_t p_cv; /* page struct's condition var */ 511 kcondvar_t p_io_cv; /* for iolock */ 512 uchar_t p_iolock_state; /* replaces p_iolock */ 513 volatile uchar_t p_szc; /* page size code */ 514 uchar_t p_fsdata; /* file system dependent byte */ 515 uchar_t p_state; /* p_free, p_noreloc */ 516 uchar_t p_nrm; /* non-cache, ref, mod readonly bits */ 517 #if defined(__sparc) 518 uchar_t p_vcolor; /* virtual color */ 519 #else 520 uchar_t p_embed; /* x86 - changes p_mapping & p_index */ 521 #endif 522 uchar_t p_index; /* MPSS mapping info. Not used on x86 */ 523 uchar_t p_toxic; /* page has an unrecoverable error */ 524 void *p_mapping; /* hat specific translation info */ 525 pfn_t p_pagenum; /* physical page number */ 526 527 uint_t p_share; /* number of translations */ 528 #if defined(_LP64) 529 uint_t p_sharepad; /* pad for growing p_share */ 530 #endif 531 uint_t p_slckcnt; /* number of softlocks */ 532 #if defined(__sparc) 533 uint_t p_kpmref; /* number of kpm mapping sharers */ 534 struct kpme *p_kpmelist; /* kpm specific mapping info */ 535 #else 536 /* index of entry in p_map when p_embed is set */ 537 uint_t p_mlentry; 538 #endif 539 #if defined(_LP64) 540 kmutex_t p_ilock; /* protects p_vpmref */ 541 #else 542 uint64_t p_msresv_2; /* page allocation debugging */ 543 #endif 544 } page_t; 545 546 547 typedef page_t devpage_t; 548 #define devpage page 549 550 #define PAGE_LOCK_MAXIMUM \ 551 ((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1) 552 553 #define PAGE_SLOCK_MAXIMUM UINT_MAX 554 555 /* 556 * Page hash table is a power-of-two in size, externally chained 557 * through the hash field. PAGE_HASHAVELEN is the average length 558 * desired for this chain, from which the size of the page_hash 559 * table is derived at boot time and stored in the kernel variable 560 * page_hashsz. In the hash function it is given by PAGE_HASHSZ. 561 * 562 * PAGE_HASH_FUNC returns an index into the page_hash[] array. This 563 * index is also used to derive the mutex that protects the chain. 564 * 565 * In constructing the hash function, first we dispose of unimportant bits 566 * (page offset from "off" and the low 3 bits of "vp" which are zero for 567 * struct alignment). Then shift and sum the remaining bits a couple times 568 * in order to get as many source bits from the two source values into the 569 * resulting hashed value. Note that this will perform quickly, since the 570 * shifting/summing are fast register to register operations with no additional 571 * memory references). 572 * 573 * PH_SHIFT_SIZE is the amount to use for the successive shifts in the hash 574 * function below. The actual value is LOG2(PH_TABLE_SIZE), so that as many 575 * bits as possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX(). 576 */ 577 #if defined(_LP64) 578 579 #if NCPU < 4 580 #define PH_TABLE_SIZE 128 581 #define PH_SHIFT_SIZE 7 582 #else 583 #define PH_TABLE_SIZE (2 * NCPU_P2) 584 #define PH_SHIFT_SIZE (NCPU_LOG2 + 1) 585 #endif 586 587 #else /* 32 bits */ 588 589 #if NCPU < 4 590 #define PH_TABLE_SIZE 16 591 #define PH_SHIFT_SIZE 4 592 #else 593 #define PH_TABLE_SIZE 128 594 #define PH_SHIFT_SIZE 7 595 #endif 596 597 #endif /* _LP64 */ 598 599 /* 600 * 601 * We take care to get as much randomness as possible from both the vp and 602 * the offset. Workloads can have few vnodes with many offsets, many vnodes 603 * with few offsets or a moderate mix of both. This hash should perform 604 * equally well for each of these possibilities and for all types of memory 605 * allocations. 606 * 607 * vnodes representing files are created over a long period of time and 608 * have good variation in the upper vp bits, and the right shifts below 609 * capture these bits. However, swap vnodes are created quickly in a 610 * narrow vp* range. Refer to comments at swap_alloc: vnum has exactly 611 * AN_VPSHIFT bits, so the kmem_alloc'd vnode addresses have approximately 612 * AN_VPSHIFT bits of variation above their VNODE_ALIGN low order 0 bits. 613 * Spread swap vnodes widely in the hash table by XOR'ing a term with the 614 * vp bits of variation left shifted to the top of the range. 615 */ 616 617 #define PAGE_HASHSZ page_hashsz 618 #define PAGE_HASHAVELEN 4 619 #define PAGE_HASH_FUNC(vp, off) \ 620 (((((uintptr_t)(off) >> PAGESHIFT) ^ \ 621 ((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE))) ^ \ 622 (((uintptr_t)(vp) >> 3) ^ \ 623 ((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) ^ \ 624 ((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE)) ^ \ 625 ((uintptr_t)(vp) << \ 626 (page_hashsz_shift - AN_VPSHIFT - VNODE_ALIGN_LOG2)))) & \ 627 (PAGE_HASHSZ - 1)) 628 #ifdef _KERNEL 629 630 /* 631 * The page hash value is re-hashed to an index for the ph_mutex array. 632 * 633 * For 64 bit kernels, the mutex array is padded out to prevent false 634 * sharing of cache sub-blocks (64 bytes) of adjacent mutexes. 635 * 636 * For 32 bit kernels, we don't want to waste kernel address space with 637 * padding, so instead we rely on the hash function to introduce skew of 638 * adjacent vnode/offset indexes (the left shift part of the hash function). 639 * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different 640 * 64 byte sub-block. 641 */ 642 extern pad_mutex_t ph_mutex[]; 643 644 #define PAGE_HASH_MUTEX(x) \ 645 &(ph_mutex[((x) ^ ((x) >> PH_SHIFT_SIZE) + ((x) << 3)) & \ 646 (PH_TABLE_SIZE - 1)].pad_mutex) 647 648 /* 649 * Flags used while creating pages. 650 */ 651 #define PG_EXCL 0x0001 652 #define PG_WAIT 0x0002 /* Blocking memory allocations */ 653 #define PG_PHYSCONTIG 0x0004 /* NOT SUPPORTED */ 654 #define PG_MATCH_COLOR 0x0008 /* SUPPORTED by free list routines */ 655 #define PG_NORELOC 0x0010 /* Non-relocatable alloc hint. */ 656 /* Page must be PP_ISNORELOC */ 657 #define PG_PANIC 0x0020 /* system will panic if alloc fails */ 658 #define PG_PUSHPAGE 0x0040 /* alloc may use reserve */ 659 #define PG_LOCAL 0x0080 /* alloc from given lgrp only */ 660 #define PG_NORMALPRI 0x0100 /* PG_WAIT like priority, but */ 661 /* non-blocking */ 662 /* 663 * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL 664 * access are given priority over all other waiting threads. 665 */ 666 #define SE_EWANTED 0x40000000 667 #define PAGE_LOCKED(pp) (((pp)->p_selock & ~SE_EWANTED) != 0) 668 #define PAGE_SHARED(pp) (((pp)->p_selock & ~SE_EWANTED) > 0) 669 #define PAGE_EXCL(pp) ((pp)->p_selock < 0) 670 #define PAGE_LOCKED_SE(pp, se) \ 671 ((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp)) 672 673 extern long page_hashsz; 674 extern unsigned int page_hashsz_shift; 675 extern page_t **page_hash; 676 677 extern pad_mutex_t page_llocks[]; /* page logical lock mutex */ 678 extern kmutex_t freemem_lock; /* freemem lock */ 679 680 extern pgcnt_t total_pages; /* total pages in the system */ 681 682 /* 683 * Variables controlling locking of physical memory. 684 */ 685 extern pgcnt_t pages_pp_maximum; /* tuning: lock + claim <= max */ 686 extern void init_pages_pp_maximum(void); 687 688 struct lgrp; 689 690 /* page_list_{add,sub} flags */ 691 692 /* which list */ 693 #define PG_FREE_LIST 0x0001 694 #define PG_CACHE_LIST 0x0002 695 696 /* where on list */ 697 #define PG_LIST_TAIL 0x0010 698 #define PG_LIST_HEAD 0x0020 699 700 /* called from */ 701 #define PG_LIST_ISINIT 0x1000 702 703 /* 704 * Page frame operations. 705 */ 706 page_t *page_lookup(struct vnode *, u_offset_t, se_t); 707 page_t *page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *, 708 spgcnt_t *, int); 709 page_t *page_lookup_nowait(struct vnode *, u_offset_t, se_t); 710 page_t *page_find(struct vnode *, u_offset_t); 711 page_t *page_exists(struct vnode *, u_offset_t); 712 int page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]); 713 int page_exists_forreal(struct vnode *, u_offset_t, uint_t *); 714 void page_needfree(spgcnt_t); 715 page_t *page_create(struct vnode *, u_offset_t, size_t, uint_t); 716 int page_alloc_pages(struct vnode *, struct seg *, caddr_t, page_t **, 717 page_t **, uint_t, int, int); 718 page_t *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, 719 uint_t flags, struct seg *seg, caddr_t vaddr, void *arg); 720 page_t *page_create_va(struct vnode *, u_offset_t, size_t, uint_t, 721 struct seg *, caddr_t); 722 int page_create_wait(pgcnt_t npages, uint_t flags); 723 void page_create_putback(spgcnt_t npages); 724 void page_free(page_t *, int); 725 void page_free_at_startup(page_t *); 726 void page_free_pages(page_t *); 727 void free_vp_pages(struct vnode *, u_offset_t, size_t); 728 int page_reclaim(page_t *, kmutex_t *); 729 int page_reclaim_pages(page_t *, kmutex_t *, uint_t); 730 void page_destroy(page_t *, int); 731 void page_destroy_pages(page_t *); 732 void page_destroy_free(page_t *); 733 void page_rename(page_t *, struct vnode *, u_offset_t); 734 int page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *); 735 void page_hashout(page_t *, kmutex_t *); 736 int page_num_hashin(pfn_t, struct vnode *, u_offset_t); 737 void page_add(page_t **, page_t *); 738 void page_add_common(page_t **, page_t *); 739 void page_sub(page_t **, page_t *); 740 void page_sub_common(page_t **, page_t *); 741 page_t *page_get_freelist(struct vnode *, u_offset_t, struct seg *, 742 caddr_t, size_t, uint_t, struct lgrp *); 743 744 page_t *page_get_cachelist(struct vnode *, u_offset_t, struct seg *, 745 caddr_t, uint_t, struct lgrp *); 746 #if defined(__i386) || defined(__amd64) 747 int page_chk_freelist(uint_t); 748 #endif 749 void page_list_add(page_t *, int); 750 void page_boot_demote(page_t *); 751 void page_promote_size(page_t *, uint_t); 752 void page_list_add_pages(page_t *, int); 753 void page_list_sub(page_t *, int); 754 void page_list_sub_pages(page_t *, uint_t); 755 void page_list_xfer(page_t *, int, int); 756 void page_list_break(page_t **, page_t **, size_t); 757 void page_list_concat(page_t **, page_t **); 758 void page_vpadd(page_t **, page_t *); 759 void page_vpsub(page_t **, page_t *); 760 int page_lock(page_t *, se_t, kmutex_t *, reclaim_t); 761 int page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int); 762 void page_lock_clr_exclwanted(page_t *); 763 int page_trylock(page_t *, se_t); 764 int page_try_reclaim_lock(page_t *, se_t, int); 765 int page_tryupgrade(page_t *); 766 void page_downgrade(page_t *); 767 void page_unlock(page_t *); 768 void page_unlock_nocapture(page_t *); 769 void page_lock_delete(page_t *); 770 int page_deleted(page_t *); 771 int page_pp_lock(page_t *, int, int); 772 void page_pp_unlock(page_t *, int, int); 773 int page_resv(pgcnt_t, uint_t); 774 void page_unresv(pgcnt_t); 775 void page_pp_useclaim(page_t *, page_t *, uint_t); 776 int page_addclaim(page_t *); 777 int page_subclaim(page_t *); 778 int page_addclaim_pages(page_t **); 779 int page_subclaim_pages(page_t **); 780 pfn_t page_pptonum(page_t *); 781 page_t *page_numtopp(pfn_t, se_t); 782 page_t *page_numtopp_noreclaim(pfn_t, se_t); 783 page_t *page_numtopp_nolock(pfn_t); 784 page_t *page_numtopp_nowait(pfn_t, se_t); 785 page_t *page_first(); 786 page_t *page_next(page_t *); 787 page_t *page_list_next(page_t *); 788 page_t *page_nextn(page_t *, ulong_t); 789 page_t *page_next_scan_init(void **); 790 page_t *page_next_scan_large(page_t *, ulong_t *, void **); 791 void prefetch_page_r(void *); 792 int ppcopy(page_t *, page_t *); 793 void page_relocate_hash(page_t *, page_t *); 794 void pagezero(page_t *, uint_t, uint_t); 795 void pagescrub(page_t *, uint_t, uint_t); 796 void page_io_lock(page_t *); 797 void page_io_unlock(page_t *); 798 int page_io_trylock(page_t *); 799 int page_iolock_assert(page_t *); 800 void page_iolock_init(page_t *); 801 void page_io_wait(page_t *); 802 int page_io_locked(page_t *); 803 pgcnt_t page_busy(int); 804 void page_lock_init(void); 805 ulong_t page_share_cnt(page_t *); 806 int page_isshared(page_t *); 807 int page_isfree(page_t *); 808 int page_isref(page_t *); 809 int page_ismod(page_t *); 810 int page_release(page_t *, int); 811 void page_retire_init(void); 812 int page_retire(uint64_t, uchar_t); 813 int page_retire_check(uint64_t, uint64_t *); 814 int page_unretire(uint64_t); 815 int page_unretire_pp(page_t *, int); 816 void page_tryretire(page_t *); 817 void page_retire_mdboot(); 818 uint64_t page_retire_pend_count(void); 819 uint64_t page_retire_pend_kas_count(void); 820 void page_retire_incr_pend_count(void *); 821 void page_retire_decr_pend_count(void *); 822 void page_clrtoxic(page_t *, uchar_t); 823 void page_settoxic(page_t *, uchar_t); 824 825 int page_mem_avail(pgcnt_t); 826 int page_reclaim_mem(pgcnt_t, pgcnt_t, int); 827 828 void page_set_props(page_t *, uint_t); 829 void page_clr_all_props(page_t *); 830 int page_clear_lck_cow(page_t *, int); 831 832 kmutex_t *page_vnode_mutex(struct vnode *); 833 kmutex_t *page_se_mutex(struct page *); 834 kmutex_t *page_szc_lock(struct page *); 835 int page_szc_lock_assert(struct page *pp); 836 837 /* 838 * Page relocation interfaces. page_relocate() is generic. 839 * page_get_replacement_page() is provided by the PSM. 840 * page_free_replacement_page() is generic. 841 */ 842 int group_page_trylock(page_t *, se_t); 843 void group_page_unlock(page_t *); 844 int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *); 845 int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *); 846 page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t); 847 void page_free_replacement_page(page_t *); 848 int page_relocate_cage(page_t **, page_t **); 849 850 int page_try_demote_pages(page_t *); 851 int page_try_demote_free_pages(page_t *); 852 void page_demote_free_pages(page_t *); 853 854 struct anon_map; 855 856 void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *, 857 ulong_t, vnode_t *, u_offset_t, int); 858 void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t); 859 860 /* 861 * Tell the PIM we are adding physical memory 862 */ 863 void add_physmem(page_t *, size_t, pfn_t); 864 void add_physmem_cb(page_t *, pfn_t); /* callback for page_t part */ 865 866 /* 867 * hw_page_array[] is configured with hardware supported page sizes by 868 * platform specific code. 869 */ 870 typedef struct { 871 size_t hp_size; 872 uint_t hp_shift; 873 uint_t hp_colors; 874 pgcnt_t hp_pgcnt; /* base pagesize cnt */ 875 } hw_pagesize_t; 876 877 extern hw_pagesize_t hw_page_array[]; 878 extern uint_t page_coloring_shift; 879 extern uint_t page_colors_mask; 880 extern int cpu_page_colors; 881 extern uint_t colorequiv; 882 extern uchar_t colorequivszc[]; 883 884 uint_t page_num_pagesizes(void); 885 uint_t page_num_user_pagesizes(int); 886 size_t page_get_pagesize(uint_t); 887 size_t page_get_user_pagesize(uint_t n); 888 pgcnt_t page_get_pagecnt(uint_t); 889 uint_t page_get_shift(uint_t); 890 int page_szc(size_t); 891 int page_szc_user_filtered(size_t); 892 893 /* page_get_replacement page flags */ 894 #define PGR_SAMESZC 0x1 /* only look for page size same as orig */ 895 #define PGR_NORELOC 0x2 /* allocate a P_NORELOC page */ 896 897 /* 898 * macros for "masked arithmetic" 899 * The purpose is to step through all combinations of a set of bits while 900 * keeping some other bits fixed. Fixed bits need not be contiguous. The 901 * variable bits need not be contiguous either, or even right aligned. The 902 * trick is to set all fixed bits to 1, then increment, then restore the 903 * fixed bits. If incrementing causes a carry from a low bit position, the 904 * carry propagates thru the fixed bits, because they are temporarily set to 1. 905 * v is the value 906 * i is the increment 907 * eq_mask defines the fixed bits 908 * mask limits the size of the result 909 */ 910 #define ADD_MASKED(v, i, eq_mask, mask) \ 911 (((((v) | (eq_mask)) + (i)) & (mask) & ~(eq_mask)) | ((v) & (eq_mask))) 912 913 /* 914 * convenience macro which increments by 1 915 */ 916 #define INC_MASKED(v, eq_mask, mask) ADD_MASKED(v, 1, eq_mask, mask) 917 918 #endif /* _KERNEL */ 919 920 /* 921 * Constants used for the p_iolock_state 922 */ 923 #define PAGE_IO_INUSE 0x1 924 #define PAGE_IO_WANTED 0x2 925 926 /* 927 * Constants used for page_release status 928 */ 929 #define PGREL_NOTREL 0x1 930 #define PGREL_CLEAN 0x2 931 #define PGREL_MOD 0x3 932 933 /* 934 * The p_state field holds what used to be the p_age and p_free 935 * bits. These fields are protected by p_selock (see above). 936 */ 937 #define P_FREE 0x80 /* Page on free list */ 938 #define P_NORELOC 0x40 /* Page is non-relocatable */ 939 #define P_MIGRATE 0x20 /* Migrate page on next touch */ 940 #define P_SWAP 0x10 /* belongs to vnode that is V_ISSWAP */ 941 #define P_BOOTPAGES 0x08 /* member of bootpages list */ 942 #define P_RAF 0x04 /* page retired at free */ 943 944 #define PP_ISFREE(pp) ((pp)->p_state & P_FREE) 945 #define PP_ISAGED(pp) (((pp)->p_state & P_FREE) && \ 946 ((pp)->p_vnode == NULL)) 947 #define PP_ISNORELOC(pp) ((pp)->p_state & P_NORELOC) 948 #define PP_ISKAS(pp) (VN_ISKAS((pp)->p_vnode)) 949 #define PP_ISNORELOCKERNEL(pp) (PP_ISNORELOC(pp) && PP_ISKAS(pp)) 950 #define PP_ISMIGRATE(pp) ((pp)->p_state & P_MIGRATE) 951 #define PP_ISSWAP(pp) ((pp)->p_state & P_SWAP) 952 #define PP_ISBOOTPAGES(pp) ((pp)->p_state & P_BOOTPAGES) 953 #define PP_ISRAF(pp) ((pp)->p_state & P_RAF) 954 955 #define PP_SETFREE(pp) ((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \ 956 | P_FREE) 957 #define PP_SETAGED(pp) ASSERT(PP_ISAGED(pp)) 958 #define PP_SETNORELOC(pp) ((pp)->p_state |= P_NORELOC) 959 #define PP_SETMIGRATE(pp) ((pp)->p_state |= P_MIGRATE) 960 #define PP_SETSWAP(pp) ((pp)->p_state |= P_SWAP) 961 #define PP_SETBOOTPAGES(pp) ((pp)->p_state |= P_BOOTPAGES) 962 #define PP_SETRAF(pp) ((pp)->p_state |= P_RAF) 963 964 #define PP_CLRFREE(pp) ((pp)->p_state &= ~P_FREE) 965 #define PP_CLRAGED(pp) ASSERT(!PP_ISAGED(pp)) 966 #define PP_CLRNORELOC(pp) ((pp)->p_state &= ~P_NORELOC) 967 #define PP_CLRMIGRATE(pp) ((pp)->p_state &= ~P_MIGRATE) 968 #define PP_CLRSWAP(pp) ((pp)->p_state &= ~P_SWAP) 969 #define PP_CLRBOOTPAGES(pp) ((pp)->p_state &= ~P_BOOTPAGES) 970 #define PP_CLRRAF(pp) ((pp)->p_state &= ~P_RAF) 971 972 /* 973 * Flags for page_t p_toxic, for tracking memory hardware errors. 974 * 975 * These flags are OR'ed into p_toxic with page_settoxic() to track which 976 * error(s) have occurred on a given page. The flags are cleared with 977 * page_clrtoxic(). Both page_settoxic() and page_cleartoxic use atomic 978 * primitives to manipulate the p_toxic field so no other locking is needed. 979 * 980 * When an error occurs on a page, p_toxic is set to record the error. The 981 * error could be a memory error or something else (i.e. a datapath). The Page 982 * Retire mechanism does not try to determine the exact cause of the error; 983 * Page Retire rightly leaves that sort of determination to FMA's Diagnostic 984 * Engine (DE). 985 * 986 * Note that, while p_toxic bits can be set without holding any locks, they 987 * should only be cleared while holding the page exclusively locked. 988 * There is one exception to this, the PR_CAPTURE bit is protected by a mutex 989 * within the page capture logic and thus to set or clear the bit, that mutex 990 * needs to be held. The page does not need to be locked but the page_clrtoxic 991 * function must be used as we need an atomic operation. 992 * Also note that there is what amounts to a hack to prevent recursion with 993 * large pages such that if we are unlocking a page and the PR_CAPTURE bit is 994 * set, we will only try to capture the page if the current threads T_CAPTURING 995 * flag is not set. If the flag is set, the unlock will not try to capture 996 * the page even though the PR_CAPTURE bit is set. 997 * 998 * Pages with PR_UE or PR_FMA flags are retired unconditionally, while pages 999 * with PR_MCE are retired if the system has not retired too many of them. 1000 * 1001 * A page must be exclusively locked to be retired. Pages can be retired if 1002 * they are mapped, modified, or both, as long as they are not marked PR_UE, 1003 * since pages with uncorrectable errors cannot be relocated in memory. 1004 * Once a page has been successfully retired it is zeroed, attached to the 1005 * retired_pages vnode and, finally, PR_RETIRED is set in p_toxic. The other 1006 * p_toxic bits are NOT cleared. Pages are not left locked after retiring them 1007 * to avoid special case code throughout the kernel; rather, page_*lock() will 1008 * fail to lock the page, unless SE_RETIRED is passed as an argument. 1009 * 1010 * While we have your attention, go take a look at the comments at the 1011 * beginning of page_retire.c too. 1012 */ 1013 #define PR_OK 0x00 /* no problem */ 1014 #define PR_MCE 0x01 /* page has seen two or more CEs */ 1015 #define PR_UE 0x02 /* page has an unhandled UE */ 1016 #define PR_UE_SCRUBBED 0x04 /* page has seen a UE but was cleaned */ 1017 #define PR_FMA 0x08 /* A DE wants this page retired */ 1018 #define PR_CAPTURE 0x10 /* page is hashed on page_capture_hash[] */ 1019 #define PR_RESV 0x20 /* Reserved for future use */ 1020 #define PR_MSG 0x40 /* message(s) already printed for this page */ 1021 #define PR_RETIRED 0x80 /* This page has been retired */ 1022 1023 #define PR_REASONS (PR_UE | PR_MCE | PR_FMA) 1024 #define PR_TOXIC (PR_UE) 1025 #define PR_ERRMASK (PR_UE | PR_UE_SCRUBBED | PR_MCE | PR_FMA) 1026 #define PR_TOXICFLAGS (0xCF) 1027 1028 #define PP_RETIRED(pp) ((pp)->p_toxic & PR_RETIRED) 1029 #define PP_TOXIC(pp) ((pp)->p_toxic & PR_TOXIC) 1030 #define PP_PR_REQ(pp) (((pp)->p_toxic & PR_REASONS) && !PP_RETIRED(pp)) 1031 #define PP_PR_NOSHARE(pp) \ 1032 ((((pp)->p_toxic & (PR_RETIRED | PR_FMA | PR_UE)) == PR_FMA) && \ 1033 !PP_ISKAS(pp)) 1034 1035 /* 1036 * Flags for page_unretire_pp 1037 */ 1038 #define PR_UNR_FREE 0x1 1039 #define PR_UNR_CLEAN 0x2 1040 #define PR_UNR_TEMP 0x4 1041 1042 /* 1043 * kpm large page description. 1044 * The virtual address range of segkpm is divided into chunks of 1045 * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort 1046 * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz 1047 * for 8K is 256M and 2G for 64K pages. It it kept as small as 1048 * possible to save physical memory space. 1049 * 1050 * There are 2 segkpm mapping windows within in the virtual address 1051 * space when we have to prevent VAC alias conflicts. The so called 1052 * Alias window (mappings are always by PAGESIZE) is controlled by 1053 * kp_refcnta. The regular window is controlled by kp_refcnt for the 1054 * normal operation, which is to use the largest available pagesize. 1055 * When VAC alias conflicts are present within a chunk in the regular 1056 * window the large page mapping is broken up into smaller PAGESIZE 1057 * mappings. kp_refcntc is used to control the pages that are invoked 1058 * in the conflict and kp_refcnts holds the active mappings done 1059 * with the small page size. In non vac conflict mode kp_refcntc is 1060 * also used as "go" indication (-1) for the trap level tsbmiss 1061 * handler. 1062 */ 1063 typedef struct kpm_page { 1064 short kp_refcnt; /* pages mapped large */ 1065 short kp_refcnta; /* pages mapped in Alias window */ 1066 short kp_refcntc; /* TL-tsbmiss flag; #vac alias conflict pages */ 1067 short kp_refcnts; /* vac alias: pages mapped small */ 1068 } kpm_page_t; 1069 1070 /* 1071 * Note: khl_lock offset changes must be reflected in sfmmu_asm.s 1072 */ 1073 typedef struct kpm_hlk { 1074 kmutex_t khl_mutex; /* kpm_page mutex */ 1075 uint_t khl_lock; /* trap level tsbmiss handling */ 1076 } kpm_hlk_t; 1077 1078 /* 1079 * kpm small page description. 1080 * When kpm_pgsz is equal to PAGESIZE a smaller representation is used 1081 * to save memory space. Alias range mappings and regular segkpm 1082 * mappings are done in units of PAGESIZE and can share the mapping 1083 * information and the mappings are always distinguishable by their 1084 * virtual address. Other information needed for VAC conflict prevention 1085 * is already available on a per page basis. 1086 * 1087 * The state about how a kpm page is mapped and whether it is ready to go 1088 * is indicated by the following 1 byte kpm_spage structure. This byte is 1089 * split into two 4-bit parts - kp_mapped and kp_mapped_go. 1090 * - kp_mapped == 1 the page is mapped cacheable 1091 * - kp_mapped == 2 the page is mapped non-cacheable 1092 * - kp_mapped_go == 1 the mapping is ready to be dropped in 1093 * - kp_mapped_go == 0 the mapping is not ready to be dropped in. 1094 * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict. 1095 * Otherwise, the assembly tsb miss handler can simply drop in the mapping 1096 * when a tsb miss occurs. 1097 */ 1098 typedef union kpm_spage { 1099 struct { 1100 #ifdef _BIG_ENDIAN 1101 uchar_t mapped_go: 4; /* go or nogo flag */ 1102 uchar_t mapped: 4; /* page mapped small */ 1103 #else 1104 uchar_t mapped: 4; /* page mapped small */ 1105 uchar_t mapped_go: 4; /* go or nogo flag */ 1106 #endif 1107 } kpm_spage_un; 1108 uchar_t kp_mapped_flag; 1109 } kpm_spage_t; 1110 1111 #define kp_mapped kpm_spage_un.mapped 1112 #define kp_mapped_go kpm_spage_un.mapped_go 1113 1114 /* 1115 * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s 1116 */ 1117 typedef struct kpm_shlk { 1118 uint_t kshl_lock; /* trap level tsbmiss handling */ 1119 } kpm_shlk_t; 1120 1121 /* 1122 * Each segment of physical memory is described by a memseg struct. 1123 * Within a segment, memory is considered contiguous. The members 1124 * can be categorized as follows: 1125 * . Platform independent: 1126 * pages, epages, pages_base, pages_end, next, lnext. 1127 * . 64bit only but platform independent: 1128 * kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages. 1129 * . Really platform or mmu specific: 1130 * pagespa, epagespa, nextpa, kpm_pagespa. 1131 * . Mixed: 1132 * msegflags. 1133 */ 1134 struct memseg { 1135 page_t *pages, *epages; /* [from, to] in page array */ 1136 pfn_t pages_base, pages_end; /* [from, to] in page numbers */ 1137 struct memseg *next; /* next segment in list */ 1138 struct memseg *lnext; /* next segment in deleted list */ 1139 #if defined(__sparc) 1140 uint64_t pagespa, epagespa; /* [from, to] page array physical */ 1141 uint64_t nextpa; /* physical next pointer */ 1142 pfn_t kpm_pbase; /* start of kpm range */ 1143 pgcnt_t kpm_nkpmpgs; /* # of kpm_pgsz pages */ 1144 union _mseg_un { 1145 kpm_page_t *kpm_lpgs; /* ptr to kpm_page array */ 1146 kpm_spage_t *kpm_spgs; /* ptr to kpm_spage array */ 1147 } mseg_un; 1148 uint64_t kpm_pagespa; /* physical ptr to kpm (s)pages array */ 1149 #endif /* __sparc */ 1150 uint_t msegflags; /* memseg flags */ 1151 }; 1152 1153 /* memseg union aliases */ 1154 #define kpm_pages mseg_un.kpm_lpgs 1155 #define kpm_spages mseg_un.kpm_spgs 1156 1157 /* msegflags */ 1158 #define MEMSEG_DYNAMIC 0x1 /* DR: memory was added dynamically */ 1159 #define MEMSEG_META_INCL 0x2 /* DR: memseg includes it's metadata */ 1160 #define MEMSEG_META_ALLOC 0x4 /* DR: memseg allocated it's metadata */ 1161 1162 /* memseg support macros */ 1163 #define MSEG_NPAGES(SEG) ((SEG)->pages_end - (SEG)->pages_base) 1164 1165 /* memseg hash */ 1166 #define MEM_HASH_SHIFT 0x9 1167 #define N_MEM_SLOTS 0x200 /* must be a power of 2 */ 1168 #define MEMSEG_PFN_HASH(pfn) (((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1)) 1169 1170 /* memseg externals */ 1171 extern struct memseg *memsegs; /* list of memory segments */ 1172 extern ulong_t mhash_per_slot; 1173 extern uint64_t memsegspa; /* memsegs as physical address */ 1174 1175 void build_pfn_hash(); 1176 extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum); 1177 1178 /* 1179 * page capture related info: 1180 * The page capture routines allow us to asynchronously capture given pages 1181 * for the explicit use of the requestor. New requestors can be added by 1182 * explicitly adding themselves to the PC_* flags below and incrementing 1183 * PC_NUM_CALLBACKS as necessary. 1184 * 1185 * Subsystems using page capture must register a callback before attempting 1186 * to capture a page. A duration of -1 will indicate that we will never give 1187 * up while trying to capture a page and will only stop trying to capture the 1188 * given page once we have successfully captured it. Thus the user needs to be 1189 * aware of the behavior of all callers who have a duration of -1. 1190 * 1191 * For now, only /dev/physmem and page retire use the page capture interface 1192 * and only a single request can be outstanding for a given page. Thus, if 1193 * /dev/phsymem wants a page and page retire also wants the same page, only 1194 * the page retire request will be honored until the point in time that the 1195 * page is actually retired, at which point in time, subsequent requests by 1196 * /dev/physmem will succeed if the CAPTURE_GET_RETIRED flag was set. 1197 */ 1198 1199 #define PC_RETIRE (0) 1200 #define PC_PHYSMEM (1) 1201 #define PC_NUM_CALLBACKS (2) 1202 #define PC_MASK ((1 << PC_NUM_CALLBACKS) - 1) 1203 1204 #define CAPTURE_RETIRE (1 << PC_RETIRE) 1205 #define CAPTURE_PHYSMEM (1 << PC_PHYSMEM) 1206 1207 #define CAPTURE_ASYNC (0x0200) 1208 1209 #define CAPTURE_GET_RETIRED (0x1000) 1210 #define CAPTURE_GET_CAGE (0x2000) 1211 1212 struct page_capture_callback { 1213 int cb_active; /* 1 means active, 0 means inactive */ 1214 clock_t duration; /* the length in time that we'll attempt to */ 1215 /* capture this page asynchronously. (in HZ) */ 1216 krwlock_t cb_rwlock; 1217 int (*cb_func)(page_t *, void *, uint_t); /* callback function */ 1218 }; 1219 1220 extern kcondvar_t pc_cv; 1221 1222 void page_capture_register_callback(uint_t index, clock_t duration, 1223 int (*cb_func)(page_t *, void *, uint_t)); 1224 void page_capture_unregister_callback(uint_t index); 1225 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 1226 void page_unlock_capture(page_t *pp); 1227 int page_capture_unretire_pp(page_t *); 1228 1229 extern int memsegs_trylock(int); 1230 extern void memsegs_lock(int); 1231 extern void memsegs_unlock(int); 1232 extern int memsegs_lock_held(void); 1233 extern void memlist_read_lock(void); 1234 extern void memlist_read_unlock(void); 1235 extern void memlist_write_lock(void); 1236 extern void memlist_write_unlock(void); 1237 1238 #ifdef __cplusplus 1239 } 1240 #endif 1241 1242 #endif /* _VM_PAGE_H */ 1243