Lines Matching defs:pp

556 	page_t	*pp,
566 "add_physmem:pp %p num %lu", pp, num);
584 for (; num; pp++, pnum++, num--) {
590 add_physmem_cb(pp, pnum);
592 pp->p_lckcnt = 0;
593 pp->p_cowcnt = 0;
594 pp->p_slckcnt = 0;
600 pp->p_selock = 0;
605 page_iolock_init(pp);
610 PP_SETFREE(pp);
611 page_clr_all_props(pp);
612 PP_SETAGED(pp);
613 pp->p_offset = (u_offset_t)-1;
614 pp->p_next = pp;
615 pp->p_prev = pp;
621 pp->p_szc = 0;
622 page_free_at_startup(pp);
637 pp->p_szc = 0;
638 page_free_at_startup(pp);
645 pp->p_szc = szc;
646 page_list_concat(&root, &pp);
673 pp->p_szc = 0;
674 page_free_at_startup(pp);
681 pp->p_szc = szc;
683 root = pp;
724 page_t *pp;
744 pp = page_hash_search(index, vp, off);
745 if (pp != NULL) {
751 if (!page_try_reclaim_lock(pp, se, es)) {
767 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
774 * Since `pp' is locked it can not change identity now.
788 if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
789 ((volatile u_offset_t)(pp->p_offset) != off)) {
793 (void *)pp);
796 page_unlock(pp);
804 * If page_trylock() was called, then pp may still be on
816 if ((!hash_locked) && (PP_ISFREE(pp))) {
817 ASSERT(PP_ISAGED(pp) == 0);
825 if (!page_reclaim(pp, NULL)) {
837 page_downgrade(pp);
845 if (newpp != NULL && pp->p_szc < newpp->p_szc &&
846 PAGE_EXCL(pp) && nrelocp != NULL) {
848 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
855 pp = newpp;
859 page_downgrade(pp);
864 if (PAGE_EXCL(pp) && se == SE_SHARED) {
865 page_downgrade(pp);
867 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
869 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
871 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
873 } else if (newpp != NULL && PAGE_EXCL(pp)) {
910 pp = newpp;
917 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
919 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
921 return (pp);
933 page_t *pp;
942 pp = page_hash_search(index, vp, off);
944 if (pp == NULL) {
950 pp = page_hash_search(index, vp, off);
953 if (pp == NULL || PP_ISFREE(pp)) {
955 pp = NULL;
957 if (!page_trylock(pp, se)) {
959 pp = NULL;
965 if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
966 ((u_offset_t)(pp->p_offset) != off)) {
970 (void *)pp);
973 page_unlock(pp);
976 if (PP_ISFREE(pp)) {
978 page_unlock(pp);
979 pp = NULL;
988 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
990 return (pp);
1001 page_t *pp;
1012 pp = page_hash_search(index, vp, off);
1015 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
1016 return (pp);
1065 page_t *pp;
1084 pp = page_hash_search(index, vp, off);
1089 if (pp == NULL) {
1095 rootpp = pp;
1098 if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
1100 if (!page_trylock(pp, SE_SHARED)) {
1107 if (pp->p_szc != pszc || pp->p_vnode != vp ||
1108 pp->p_offset != off || pp->p_pagenum != pfn) {
1110 page_unlock(pp);
1118 ASSERT(!PP_ISFREE(pp));
1120 page_unlock(pp);
1123 ppa[0] = pp;
1124 pp++;
1127 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1128 if (!page_trylock(pp, SE_SHARED)) {
1130 pp--;
1132 page_unlock(pp);
1133 pp--;
1138 if (pp->p_szc != pszc) {
1140 page_unlock(pp);
1141 pp--;
1143 page_unlock(pp);
1144 pp--;
1156 ASSERT(!PP_ISFREE(pp));
1157 if (pp->p_vnode != vp ||
1158 pp->p_offset != off) {
1162 ppa[i] = pp;
1163 ASSERT(pp->p_pagenum == pfn);
1194 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1195 if (!page_trylock(pp, SE_EXCL)) {
1202 if (pp->p_pagenum != pfn) {
1203 page_unlock(pp);
1206 if (pp->p_vnode != vp ||
1207 pp->p_offset != off) {
1209 page_unlock(pp);
1212 if (pp->p_szc >= szc) {
1214 page_unlock(pp);
1222 --pp;
1224 page_unlock(pp);
1225 --pp;
1230 pp = rootpp;
1231 for (i = 0; i < pages; i++, pp++) {
1232 if (PP_ISFREE(pp)) {
1234 ASSERT(!PP_ISAGED(pp));
1235 ASSERT(pp->p_szc == 0);
1236 if (!page_reclaim(pp, NULL)) {
1240 ASSERT(pp->p_szc < szc);
1242 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1252 pp = rootpp;
1253 for (j = 0; j < pages; j++, pp++) {
1255 page_unlock(pp);
1262 pp = rootpp;
1263 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
1264 ASSERT(PAGE_EXCL(pp));
1265 ASSERT(!PP_ISFREE(pp));
1266 ASSERT(!hat_page_is_mapped(pp));
1267 ASSERT(pp->p_vnode == vp);
1268 ASSERT(pp->p_offset == off);
1269 pp->p_szc = szc;
1271 pp = rootpp;
1272 for (i = 0; i < pages; i++, pp++) {
1274 page_unlock(pp);
1276 ppa[i] = pp;
1298 page_t *pp;
1311 pp = page_hash_search(index, vp, off);
1312 if (pp != NULL) {
1313 *szc = pp->p_szc;
1757 page_t *pp;
1849 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
1851 if (pp == NULL) {
1852 pp = page_get_cachelist(vp, off, seg, vaddr,
1855 if (pp == NULL) {
1914 return (pp);
1990 page_t *pplist = NULL, *pp;
2038 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2040 if (pp == NULL) {
2041 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2045 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2048 if (pp != NULL) {
2050 page_list_concat(&pplist, &pp);
2077 pp = pplist;
2085 ASSERT(PP_ISFREE(pp));
2086 ASSERT(PP_ISAGED(pp));
2090 PP_CLRFREE(pp);
2091 PP_CLRAGED(pp);
2092 page_sub(&pplist, pp);
2093 *ppa++ = pp;
2097 ASSERT(pp->p_szc != 0);
2098 curnpgs = page_get_pagecnt(pp->p_szc);
2099 page_list_break(&pp, &pplist, curnpgs);
2100 page_list_add_pages(pp, 0);
2105 pp = pplist;
2108 PP_CLRFREE(pp);
2109 PP_CLRAGED(pp);
2110 pp = pp->p_next;
2131 page_t *pp;
2232 pp = rootpp;
2234 ASSERT(PAGE_EXCL(pp));
2235 ASSERT(pp->p_vnode == NULL);
2236 ASSERT(!hat_page_is_mapped(pp));
2237 PP_CLRFREE(pp);
2238 PP_CLRAGED(pp);
2239 if (!page_hashin(pp, vp, off, NULL))
2241 (void *)pp);
2242 page_io_lock(pp);
2244 pp = pp->p_next;
2370 page_t *pp;
2439 pp = page_hash_search(index, vp, off);
2440 if (pp == NULL) {
2442 pp = npp;
2444 if (!page_hashin(pp, vp, off, phm)) {
2457 (void *)pp, (void *)vp, off, (void *)phm);
2475 page_set_props(pp, P_REF);
2488 pp = plist;
2489 page_sub(&plist, pp);
2490 page_io_unlock(pp);
2492 ASSERT(pp->p_szc == 0);
2494 VN_DISPOSE(pp, B_INVAL, 0, kcred);
2500 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
2513 if (PP_ISFREE(pp)) {
2514 ASSERT(PP_ISAGED(pp) == 0);
2516 page_list_sub(pp, PG_CACHE_LIST);
2517 PP_CLRFREE(pp);
2527 page_io_lock(pp);
2528 page_add(&plist, pp);
2620 page_free(page_t *pp, int dontneed)
2625 ASSERT((PAGE_EXCL(pp) &&
2626 !page_iolock_assert(pp)) || panicstr);
2628 if (PP_ISFREE(pp)) {
2629 panic("page_free: page %p is free", (void *)pp);
2632 if (pp->p_szc != 0) {
2633 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2634 PP_ISKAS(pp)) {
2636 "or no vnode large page %p", (void *)pp);
2638 page_demote_vp_pages(pp);
2639 ASSERT(pp->p_szc == 0);
2646 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
2647 pp->p_slckcnt != 0) {
2648 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
2649 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt,
2650 pp->p_cowcnt, pp->p_slckcnt);
2654 ASSERT(!hat_page_getshare(pp));
2656 PP_SETFREE(pp);
2657 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
2658 !hat_ismod(pp));
2659 page_clr_all_props(pp);
2660 ASSERT(!hat_page_getshare(pp));
2668 if (pp->p_vnode == NULL) {
2672 PP_SETAGED(pp);
2673 pp->p_offset = (u_offset_t)-1;
2674 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2677 "page_free_free:pp %p", pp);
2679 PP_CLRAGED(pp);
2683 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2687 "page_free_cache_tail:pp %p", pp);
2689 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2693 "page_free_cache_head:pp %p", pp);
2696 page_unlock(pp);
2742 page_free_at_startup(page_t *pp)
2747 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
2765 page_free_pages(page_t *pp)
2768 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
2770 uint_t szc = pp->p_szc;
2774 "page_free_free:pp %p", pp);
2776 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2777 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2778 panic("page_free_pages: not root page %p", (void *)pp);
2782 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2807 ASSERT(rootpp == pp);
2828 page_t *pp;
2845 pp = page_exists(vp, off);
2852 if (pp == NULL ||
2853 PP_ISFREE(pp) ||
2854 page_share_cnt(pp) > 0 ||
2855 !page_trylock(pp, SE_EXCL))
2859 * Once we have locked pp, verify that it's still the
2862 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
2863 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
2864 page_unlock(pp);
2871 (void) page_release(pp, 1);
2877 * If pp is part of a large pages, only the given constituent page is reclaimed
2892 page_reclaim(page_t *pp, kmutex_t *lock)
2900 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2923 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2960 * We really can't have page `pp'.
2973 page_unlock(pp);
3014 * page_list_sub will handle the case where pp is a large page.
3017 if (PP_ISAGED(pp)) {
3018 page_list_sub(pp, PG_FREE_LIST);
3020 "page_reclaim_free:pp %p", pp);
3022 page_list_sub(pp, PG_CACHE_LIST);
3024 "page_reclaim_cache:pp %p", pp);
3034 PP_CLRFREE(pp);
3035 PP_CLRAGED(pp);
3036 page_set_props(pp, P_REF);
3043 ASSERT(pp->p_szc == 0);
3054 page_destroy(page_t *pp, int dontfree)
3056 ASSERT((PAGE_EXCL(pp) &&
3057 !page_iolock_assert(pp)) || panicstr);
3058 ASSERT(pp->p_slckcnt == 0 || panicstr);
3060 if (pp->p_szc != 0) {
3061 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
3062 PP_ISKAS(pp)) {
3064 "large page %p", (void *)pp);
3066 page_demote_vp_pages(pp);
3067 ASSERT(pp->p_szc == 0);
3070 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
3076 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3077 page_hashout(pp, NULL);
3086 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
3088 if (pp->p_lckcnt != 0) {
3091 pp->p_lckcnt = 0;
3093 if (pp->p_cowcnt != 0) {
3094 availrmem += pp->p_cowcnt;
3095 pages_locked -= pp->p_cowcnt;
3096 pp->p_cowcnt = 0;
3103 page_free(pp, 0);
3108 page_destroy_pages(page_t *pp)
3112 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
3114 uint_t szc = pp->p_szc;
3116 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
3120 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
3122 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
3123 panic("page_destroy_pages: not root page %p", (void *)pp);
3127 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
3153 ASSERT(rootpp == pp);
3173 page_destroy_free(page_t *pp)
3175 ASSERT(PAGE_EXCL(pp));
3176 ASSERT(PP_ISFREE(pp));
3177 ASSERT(pp->p_vnode);
3178 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
3179 ASSERT(!hat_page_is_mapped(pp));
3180 ASSERT(PP_ISAGED(pp) == 0);
3181 ASSERT(pp->p_szc == 0);
3184 page_list_sub(pp, PG_CACHE_LIST);
3186 page_hashout(pp, NULL);
3187 ASSERT(pp->p_vnode == NULL);
3188 ASSERT(pp->p_offset == (u_offset_t)-1);
3189 ASSERT(pp->p_hash == NULL);
3191 PP_SETAGED(pp);
3192 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
3193 page_unlock(pp);
3218 page_t *pp;
3231 "page rename:pp %p vp %p off %llx", opp, vp, off);
3269 pp = page_hash_search(index, vp, off);
3270 if (pp != NULL) {
3281 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
3299 if (hat_page_is_mapped(pp)) {
3308 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3309 if (pp->p_szc != 0) {
3312 page_demote_vp_pages(pp);
3313 ASSERT(pp->p_szc == 0);
3316 } else if (pp->p_szc != 0) {
3320 page_demote_vp_pages(pp);
3321 ASSERT(pp->p_szc == 0);
3324 page_hashout(pp, phm);
3336 panic("page_rename: Can't hash in page: %p", (void *)pp);
3345 * with pp.
3347 if (pp != NULL) {
3348 ASSERT(!hat_page_is_mapped(pp));
3350 ASSERT(pp->p_szc == 0);
3357 olckcnt = pp->p_lckcnt;
3358 ocowcnt = pp->p_cowcnt;
3359 pp->p_lckcnt = pp->p_cowcnt = 0;
3366 VN_DISPOSE(pp, B_FREE, 0, kcred);
3379 * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
3388 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
3394 ASSERT(PAGE_EXCL(pp));
3404 pp->p_vnode = vp;
3405 pp->p_offset = offset;
3411 PP_SETSWAP(pp);
3422 pp->p_vnode = NULL;
3423 pp->p_offset = (u_offset_t)(-1);
3427 pp->p_hash = *listp;
3428 *listp = pp;
3433 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
3438 page_vpadd(listp, pp);
3444 * Add page `pp' to both the hash and vp chains for [vp, offset].
3450 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
3457 ASSERT(pp->p_fsdata == 0 || panicstr);
3460 "page_hashin:pp %p vp %p offset %llx",
3461 pp, vp, offset);
3475 rc = page_do_hashin(pp, vp, offset);
3485 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3489 page_do_hashout(page_t *pp)
3493 vnode_t *vp = pp->p_vnode;
3499 * First, take pp off of its hash chain.
3501 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
3505 if (hp == pp)
3513 *hpp = pp->p_hash;
3519 page_vpsub(&vp->v_pages, pp);
3521 pp->p_hash = NULL;
3522 page_clr_all_props(pp);
3523 PP_CLRSWAP(pp);
3524 pp->p_vnode = NULL;
3525 pp->p_offset = (u_offset_t)-1;
3526 pp->p_fsdata = 0;
3530 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3533 * hash list pp is on. It is not dropped.
3536 page_hashout(page_t *pp, kmutex_t *phm)
3545 ASSERT(pp->p_vnode != NULL);
3546 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
3547 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
3549 vp = pp->p_vnode;
3552 "page_hashout:pp %p vp %p", pp, vp);
3557 tnf_offset, offset, pp->p_offset);
3563 index = PAGE_HASH_FUNC(vp, pp->p_offset);
3578 page_do_hashout(pp);
3589 sep = page_se_mutex(pp);
3591 pp->p_selock &= ~SE_EWANTED;
3592 if (CV_HAS_WAITERS(&pp->p_cv))
3593 cv_broadcast(&pp->p_cv);
3603 page_add(page_t **ppp, page_t *pp)
3605 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3607 page_add_common(ppp, pp);
3616 page_add_common(page_t **ppp, page_t *pp)
3619 pp->p_next = pp->p_prev = pp;
3621 pp->p_next = *ppp;
3622 pp->p_prev = (*ppp)->p_prev;
3623 (*ppp)->p_prev = pp;
3624 pp->p_prev->p_next = pp;
3626 *ppp = pp;
3637 page_sub(page_t **ppp, page_t *pp)
3639 ASSERT((PP_ISFREE(pp)) ? 1 :
3640 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3642 if (*ppp == NULL || pp == NULL) {
3643 panic("page_sub: bad arg(s): pp %p, *ppp %p",
3644 (void *)pp, (void *)(*ppp));
3648 page_sub_common(ppp, pp);
3656 page_sub_common(page_t **ppp, page_t *pp)
3658 if (*ppp == pp)
3659 *ppp = pp->p_next; /* go to next page */
3661 if (*ppp == pp)
3664 pp->p_prev->p_next = pp->p_next;
3665 pp->p_next->p_prev = pp->p_prev;
3667 pp->p_prev = pp->p_next = pp; /* make pp a list of one */
3742 page_list_next(page_t *pp)
3744 return (pp->p_next);
3755 page_vpadd(page_t **ppp, page_t *pp)
3758 pp->p_vpnext = pp->p_vpprev = pp;
3760 pp->p_vpnext = *ppp;
3761 pp->p_vpprev = (*ppp)->p_vpprev;
3762 (*ppp)->p_vpprev = pp;
3763 pp->p_vpprev->p_vpnext = pp;
3765 *ppp = pp;
3775 page_vpsub(page_t **ppp, page_t *pp)
3777 if (*ppp == NULL || pp == NULL) {
3778 panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3779 (void *)pp, (void *)(*ppp));
3783 if (*ppp == pp)
3784 *ppp = pp->p_vpnext; /* go to next page */
3786 if (*ppp == pp)
3789 pp->p_vpprev->p_vpnext = pp->p_vpnext;
3790 pp->p_vpnext->p_vpprev = pp->p_vpprev;
3792 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */
3803 page_t *pp, /* page to be locked */
3809 ASSERT(PAGE_LOCKED(pp));
3811 page_struct_lock(pp);
3818 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3823 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3826 page_pptonum(pp));
3831 if (pp->p_lckcnt) {
3832 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3834 if (++pp->p_lckcnt ==
3838 page_pptonum(pp));
3844 ++pp->p_lckcnt;
3851 ++pp->p_lckcnt;
3858 page_struct_unlock(pp);
3868 page_t *pp, /* page to be unlocked */
3872 ASSERT(PAGE_LOCKED(pp));
3874 page_struct_lock(pp);
3884 if (pp->p_cowcnt) {
3886 pp->p_cowcnt--;
3892 if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
3901 page_struct_unlock(pp);
4040 page_addclaim(page_t *pp)
4044 ASSERT(PAGE_LOCKED(pp));
4046 page_struct_lock(pp);
4047 ASSERT(pp->p_lckcnt != 0);
4049 if (pp->p_lckcnt == 1) {
4050 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4051 --pp->p_lckcnt;
4053 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4056 page_pptonum(pp));
4062 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
4066 --pp->p_lckcnt;
4068 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4071 page_pptonum(pp));
4076 page_struct_unlock(pp);
4081 page_subclaim(page_t *pp)
4085 ASSERT(PAGE_LOCKED(pp));
4087 page_struct_lock(pp);
4088 ASSERT(pp->p_cowcnt != 0);
4090 if (pp->p_lckcnt) {
4091 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4101 pp->p_cowcnt--;
4103 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4106 page_pptonum(pp));
4111 pp->p_cowcnt--;
4112 pp->p_lckcnt++;
4114 page_struct_unlock(pp);
4212 page_t *pp;
4215 pp = page_numtopp_nolock(pfnum);
4216 if (pp == NULL) {
4223 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) {
4224 if (page_pptonum(pp) != pfnum)
4229 if (page_pptonum(pp) != pfnum) {
4230 page_unlock(pp);
4234 return (pp);
4240 page_t *pp;
4243 pp = page_numtopp_nolock(pfnum);
4244 if (pp == NULL) {
4251 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) {
4252 if (page_pptonum(pp) != pfnum)
4257 if (page_pptonum(pp) != pfnum) {
4258 page_unlock(pp);
4262 return (pp);
4272 page_t *pp;
4275 pp = page_numtopp_nolock(pfnum);
4276 if (pp == NULL) {
4283 if (PP_ISFREE(pp))
4284 pp = NULL;
4286 if (!page_trylock(pp, se))
4287 pp = NULL;
4289 if (page_pptonum(pp) != pfnum) {
4290 page_unlock(pp);
4293 if (PP_ISFREE(pp)) {
4294 page_unlock(pp);
4295 pp = NULL;
4299 return (pp);
4312 page_t *pp = page0;
4318 vnode_t *vp = pp->p_vnode;
4342 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) &&
4343 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
4349 if (!page_trylock(pp, SE_EXCL))
4352 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
4353 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
4354 !(hat_pagesync(pp,
4356 page_unlock(pp);
4359 off = pp->p_offset;
4361 page_unlock(pp);
4366 } while ((pp = page_next(pp)) != page0);
4395 page_t *pp;
4406 pp = page0 = page_first();
4416 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp))
4422 if (PP_ISFREE(pp) && PP_ISAGED(pp))
4434 if (!page_trylock(pp, SE_EXCL)) {
4437 } else if (PP_ISFREE(pp)) {
4438 if (!PP_ISAGED(pp)) {
4439 page_destroy_free(pp);
4441 page_unlock(pp);
4452 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
4453 page_unlock(pp);
4462 if (!page_try_demote_pages(pp)) {
4463 page_unlock(pp);
4471 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
4474 offset = pp->p_offset;
4481 page_unlock(pp);
4492 VN_DISPOSE(pp, B_INVAL, 0, kcred);
4494 } while ((pp = page_next(pp)) != page0);
4646 * pp. If pp is a large page, then it succeeds in locking all the
4653 * pages of a large page pp belongs to can't change. To achieve this we
4654 * recheck szc of pp after locking all constituent pages and retry if szc
4658 * outside of this large page (i.e. pp belonged to a larger large page) is
4659 * already done with all constituent pages of pp since the root's p_szc is
4661 * locked a constituent page outside of pp's current large page.
4668 group_page_trylock(page_t *pp, se_t se)
4672 uint_t pszc = pp->p_szc;
4680 if (pp != PP_GROUPLEADER(pp, pszc)) {
4685 ASSERT(PAGE_LOCKED_SE(pp, se));
4686 ASSERT(!PP_ISFREE(pp));
4691 tpp = pp + 1;
4694 tpp = pp + 1;
4701 if (pp->p_szc != pszc) {
4702 ASSERT(pp->p_szc < pszc);
4703 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) &&
4704 !IS_SWAPFSVP(pp->p_vnode));
4705 tpp = pp + 1;
4709 pszc = pp->p_szc;
4716 group_page_unlock(page_t *pp)
4721 ASSERT(PAGE_LOCKED(pp));
4722 ASSERT(!PP_ISFREE(pp));
4723 ASSERT(pp == PP_PAGEROOT(pp));
4724 npgs = page_get_pagecnt(pp->p_szc);
4725 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
5012 page_t *pp;
5018 pp = pplist;
5019 if (pp->p_szc == 0) {
5020 page_sub(&pplist, pp);
5021 page_clr_all_props(pp);
5022 PP_SETFREE(pp);
5023 PP_SETAGED(pp);
5024 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
5025 page_unlock(pp);
5028 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
5030 page_list_break(&pp, &pplist, curnpgs);
5031 tpp = pp;
5038 } while ((tpp = tpp->p_next) != pp);
5039 page_list_add_pages(pp, 0);
5090 page_release(page_t *pp, int checkmod)
5094 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
5095 (pp->p_vnode != NULL));
5097 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
5098 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
5099 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
5100 !hat_page_is_mapped(pp)) {
5110 if (checkmod && hat_ismod(pp)) {
5111 page_unlock(pp);
5115 VN_DISPOSE(pp, B_FREE, 0, kcred);
5119 page_unlock(pp);
5132 page_try_demote_free_pages(page_t *pp)
5134 page_t *rootpp = pp;
5135 pfn_t pfn = page_pptonum(pp);
5137 uint_t szc = pp->p_szc;
5139 ASSERT(PP_ISFREE(pp));
5140 ASSERT(PAGE_EXCL(pp));
5143 * Adjust rootpp and lock it, if `pp' is not the base
5146 npgs = page_get_pagecnt(pp->p_szc);
5156 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
5161 if (pp != rootpp)
5168 if (pp != rootpp)
5171 ASSERT(PP_ISFREE(pp));
5172 ASSERT(PAGE_EXCL(pp));
5183 page_try_demote_pages(page_t *pp)
5185 page_t *tpp, *rootpp = pp;
5186 pfn_t pfn = page_pptonum(pp);
5188 uint_t szc = pp->p_szc;
5189 vnode_t *vp = pp->p_vnode;
5191 ASSERT(PAGE_EXCL(pp));
5195 if (pp->p_szc == 0) {
5202 page_demote_vp_pages(pp);
5203 ASSERT(pp->p_szc == 0);
5211 npgs = page_get_pagecnt(pp->p_szc);
5236 if (tpp != pp && !page_trylock(tpp, SE_EXCL))
5249 if (tpp != pp)
5269 if (tpp != pp)
5325 * hat_page_demote() removes all large mappings that map pp and then decreases
5333 * We are guaranteed that all constituent pages of pp's large page belong to
5337 * large mappings to pp even though we don't lock any constituent page except
5338 * pp (i.e. we won't unload e.g. kernel locked page).
5341 page_demote_vp_pages(page_t *pp)
5345 ASSERT(PAGE_EXCL(pp));
5346 ASSERT(!PP_ISFREE(pp));
5347 ASSERT(pp->p_vnode != NULL);
5348 ASSERT(!IS_SWAPFSVP(pp->p_vnode));
5349 ASSERT(!PP_ISKAS(pp));
5353 mtx = page_szc_lock(pp);
5355 hat_page_demote(pp);
5358 ASSERT(pp->p_szc == 0);
5379 page_t *pp0, *pp;
5426 pp = NULL;
5428 pp = page_lookup(curvp, off, SE_SHARED);
5434 if (pp == NULL) {
5442 pfn = page_pptonum(pp);
5449 pszc = pp->p_szc;
5456 page_unlock(pp);
5467 if (!page_tryupgrade(pp)) {
5468 page_unlock(pp);
5475 pp0 = pp++;
5487 if (!page_trylock(pp, SE_EXCL)) {
5490 if (PP_ISFREE(pp) ||
5491 pp->p_szc != pszc) {
5496 page_unlock(pp);
5499 pp++;
5508 while (pp0 < pp) {
5531 while (pp0 < pp) {
5555 page_t *pp;
5565 pp = *ppa;
5566 pszc = pp->p_szc;
5588 if (!PP_ISMIGRATE(pp))
5594 pfn = page_pptonum(pp);
5642 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
5656 PP_CLRMIGRATE(pp);
5657 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
5672 for (i = 0; i < page_cnt; ++i, ++pp) {
5676 pp = newpp;
5677 page_sub(&newpp, pp);
5678 page_downgrade(pp);
5742 * Some data structures for pfn to pp lookup.
5751 page_t *pp;
5775 pp = seg->pages + (pfnum - seg->pages_base);
5776 if (pp->p_pagenum == pfnum) {
5778 return ((page_t *)pp);
5787 pp = seg->pages + (pfnum - seg->pages_base);
5788 if (pp->p_pagenum == pfnum) {
5790 return ((page_t *)pp);
5798 pp = seg->pages + (pfnum - seg->pages_base);
5799 if (pp->p_pagenum == pfnum) {
5801 return ((page_t *)pp);
5816 page_t *pp;
5827 pp = seg->pages + (pfnum - seg->pages_base);
5828 if (pp->p_pagenum == pfnum) {
5837 pp = seg->pages + (pfnum - seg->pages_base);
5838 if (pp->p_pagenum == pfnum) {
5857 page_nextn(page_t *pp, ulong_t n)
5881 !(pp >= seg->pages && pp < seg->epages)) {
5884 if (pp >= seg->pages && pp < seg->epages)
5892 pp = seg->pages;
5897 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
5898 n -= seg->epages - pp;
5902 pp = seg->pages;
5928 page_t *pp,
5941 ASSERT(pp != NULL);
5942 if (pp->p_szc == 0) {
5945 pfn = page_pptonum(pp);
5946 cnt = page_get_pagecnt(pp->p_szc);
5950 new_pp = pp + cnt;
5979 page_next(page_t *pp)
5981 return (page_nextn(pp, 1));
6042 * Return the pagenum for the pp
6045 page_pptonum(page_t *pp)
6047 return (pp->p_pagenum);
6056 page_set_props(page_t *pp, uint_t flags)
6059 pp->p_nrm |= (uchar_t)flags;
6063 page_clr_all_props(page_t *pp)
6065 pp->p_nrm = 0;
6072 page_clear_lck_cow(page_t *pp, int adjust)
6076 ASSERT(PAGE_EXCL(pp));
6083 if (pp->p_lckcnt) {
6085 pp->p_lckcnt = 0;
6087 if (pp->p_cowcnt) {
6088 f_amount += pp->p_cowcnt;
6089 pp->p_cowcnt = 0;
6106 page_share_cnt(page_t *pp)
6108 return (hat_page_getshare(pp));
6112 page_isshared(page_t *pp)
6114 return (hat_page_checkshare(pp, 1));
6118 page_isfree(page_t *pp)
6120 return (PP_ISFREE(pp));
6124 page_isref(page_t *pp)
6126 return (hat_page_getattr(pp, P_REF));
6130 page_ismod(page_t *pp)
6132 return (hat_page_getattr(pp, P_MOD));
6177 page_t *pp;
6191 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI)
6227 #define PAGE_CAPTURE_HASH(pp) \
6228 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1)))
6232 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
6289 page_clrtoxic(head->pp, PR_CAPTURE);
6309 * Find pp in the active list and move it to the walked list if it
6311 * Note that most often pp should be at the front of the active list
6317 page_capture_move_to_walked(page_t *pp)
6322 index = PAGE_CAPTURE_HASH(pp);
6327 if (bp->pp == pp) {
6348 bp->pri = PAGE_CAPTURE_PRIO(pp);
6367 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
6384 bp1->pp = pp;
6412 index = PAGE_CAPTURE_HASH(pp);
6423 if (!(pp->p_toxic & PR_CAPTURE)) {
6429 if (tp1->pp == pp) {
6430 panic("page pp 0x%p already on hash "
6432 (void *)pp, (void *)tp1);
6439 page_settoxic(pp, PR_CAPTURE);
6440 pri = PAGE_CAPTURE_PRIO(pp);
6471 if (bp2->pp == pp) {
6502 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n",
6503 (void *)pp);
6514 page_capture_clean_page(page_t *pp)
6523 ASSERT(PAGE_EXCL(pp));
6524 ASSERT(!PP_RETIRED(pp));
6527 if (PP_ISFREE(pp)) {
6528 if (!page_reclaim(pp, NULL)) {
6533 ASSERT(pp->p_szc == 0);
6534 if (pp->p_vnode != NULL) {
6540 page_hashout(pp, NULL);
6550 if (pp->p_toxic & PR_UE) {
6558 if (pp->p_vnode == NULL) {
6569 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) {
6570 if (page_try_demote_pages(pp) == 0) {
6575 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL);
6590 if (pp->p_szc > 0) {
6592 extra = page_get_pagecnt(pp->p_szc) - 1;
6594 tpp = pp->p_next;
6595 page_sub(&pp, tpp);
6601 ASSERT(pp->p_next == pp && pp->p_prev == pp);
6602 pp->p_szc = 0;
6619 if (pp->p_szc > 0) {
6620 if (page_try_demote_pages(pp) == 0) {
6626 ASSERT(pp->p_szc == 0);
6628 if (hat_ismod(pp)) {
6632 if (PP_ISKAS(pp)) {
6636 if (pp->p_lckcnt || pp->p_cowcnt) {
6641 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
6642 ASSERT(!hat_page_is_mapped(pp));
6644 if (hat_ismod(pp)) {
6652 if (pp->p_vnode != NULL) {
6653 page_hashout(pp, NULL);
6664 page_unlock(pp);
6667 ASSERT(pp->p_szc == 0);
6668 ASSERT(PAGE_EXCL(pp));
6670 pp->p_next = pp;
6671 pp->p_prev = pp;
6688 page_capture_pre_checks(page_t *pp, uint_t flags)
6690 ASSERT(pp != NULL);
6693 if (pp->p_vnode == &promvp) {
6697 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) &&
6702 if (PP_ISNORELOCKERNEL(pp)) {
6706 if (PP_ISKAS(pp)) {
6732 * Failure cases must release the SE_EXCL lock on pp (usually via page_free).
6735 page_capture_take_action(page_t *pp, uint_t flags, void *datap)
6745 ASSERT(PAGE_EXCL(pp));
6765 index = PAGE_CAPTURE_HASH(pp);
6771 if (bp1->pp == pp) {
6776 page_clrtoxic(pp, PR_CAPTURE);
6789 page_free(pp, 1);
6808 ret = pc_cb[cb_index].cb_func(pp, datap, flags);
6850 if (!(pp->p_toxic & PR_CAPTURE)) {
6852 page_settoxic(pp, PR_CAPTURE);
6856 bp1->pri = PAGE_CAPTURE_PRIO(pp);
6871 if (bp2->pp == pp) {
6889 bp2->pri = PAGE_CAPTURE_PRIO(pp);
6899 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp);
6925 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
6931 ASSERT(PAGE_EXCL(pp));
6936 ret = page_capture_pre_checks(pp, flags);
6941 if (!page_trylock(pp, SE_EXCL)) {
6950 if (PP_RETIRED(pp)) {
6952 if (!page_unretire_pp(pp, PR_UNR_TEMP)) {
6958 page_capture_add_hash(pp, szc,
6967 page_capture_add_hash(pp, szc, flags, datap);
6972 ASSERT(PAGE_EXCL(pp));
6978 page_unlock(pp);
6982 ret = page_capture_clean_page(pp);
6987 page_capture_add_hash(pp, szc, flags, datap);
6993 ASSERT(PAGE_EXCL(pp));
6994 ASSERT(pp->p_szc == 0);
6997 ret = page_capture_take_action(pp, flags, datap);
7008 page_capture_add_hash(pp, szc, flags, datap);
7014 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
7019 ret = page_itrycapture(pp, szc, flags, datap);
7029 page_unlock_capture(page_t *pp)
7052 page_unlock_nocapture(pp);
7055 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) {
7056 page_unlock_nocapture(pp);
7060 index = PAGE_CAPTURE_HASH(pp);
7067 if (bp->pp == pp) {
7072 (void) page_trycapture(pp, szc, flags, datap);
7080 page_clrtoxic(pp, PR_CAPTURE);
7081 page_unlock(pp);
7117 page_t *pp;
7137 pp = bp->pp;
7138 if (PP_TOXIC(pp)) {
7139 if (page_trylock(pp, SE_EXCL)) {
7140 PP_CLRFREE(pp);
7141 pagescrub(pp, 0, PAGESIZE);
7142 page_unlock(pp);
7159 page_t *pp;
7197 page_deleted(bp1->pp)) {
7210 page_clrtoxic(bp1->pp, PR_CAPTURE);
7217 pp = bp1->pp;
7222 if (page_trylock(pp, SE_EXCL)) {
7223 ret = page_trycapture(pp, szc,
7231 (void) page_capture_move_to_walked(pp);