Lines Matching refs:ppa

3230 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)  in segvn_full_szcpages()  argument
3248 pp = ppa[i]; in segvn_full_szcpages()
3293 if (pfn - 1 != page_pptonum(ppa[i - 1])) { in segvn_full_szcpages()
3304 ASSERT(ppa[i]->p_szc < szc); in segvn_full_szcpages()
3305 if (!page_tryupgrade(ppa[i])) { in segvn_full_szcpages()
3307 page_downgrade(ppa[j]); in segvn_full_szcpages()
3309 *pszc = ppa[i]->p_szc; in segvn_full_szcpages()
3327 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); in segvn_full_szcpages()
3330 ppa[i]->p_szc = szc; in segvn_full_szcpages()
3333 ASSERT(PAGE_EXCL(ppa[i])); in segvn_full_szcpages()
3334 page_downgrade(ppa[i]); in segvn_full_szcpages()
3358 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, in segvn_fill_vp_pages() argument
3520 ppa[pgidx] = pp; in segvn_fill_vp_pages()
3585 ppa[pgidx] = pp; in segvn_fill_vp_pages()
3629 ppa[pgidx++] = pp; in segvn_fill_vp_pages()
3637 ASSERT(ppa[i] != NULL); in segvn_fill_vp_pages()
3638 ASSERT(PAGE_EXCL(ppa[i])); in segvn_fill_vp_pages()
3639 ASSERT(ppa[i]->p_vnode == vp); in segvn_fill_vp_pages()
3640 ASSERT(ppa[i]->p_offset == in segvn_fill_vp_pages()
3642 page_downgrade(ppa[i]); in segvn_fill_vp_pages()
3644 ppa[pages] = NULL; in segvn_fill_vp_pages()
3653 ASSERT(ppa[i] != NULL); in segvn_fill_vp_pages()
3654 ASSERT(PAGE_EXCL(ppa[i])); in segvn_fill_vp_pages()
3655 ASSERT(ppa[i]->p_vnode == vp); in segvn_fill_vp_pages()
3656 ASSERT(ppa[i]->p_offset == in segvn_fill_vp_pages()
3658 page_unlock(ppa[i]); in segvn_fill_vp_pages()
3660 ppa[0] = NULL; in segvn_fill_vp_pages()
3791 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ argument
3792 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3795 ASSERT((ppa)[i]->p_vnode == \
3796 (ppa)[0]->p_vnode); \
3797 hat_setmod((ppa)[i]); \
3802 ASSERT((ppa)[i]->p_vnode == \
3803 (ppa)[0]->p_vnode); \
3804 if (!hat_ismod((ppa)[i])) { \
3844 page_t **ppa; in segvn_fault_vnodepages() local
3920 ppa = kmem_alloc(ppasize, KM_SLEEP); in segvn_fault_vnodepages()
4001 ppa[0] = NULL; in segvn_fault_vnodepages()
4004 segtype == MAP_PRIVATE ? ppa : NULL)) { in segvn_fault_vnodepages()
4019 vp, off, szc, ppa, &pplist, in segvn_fault_vnodepages()
4031 ppa[0] == NULL); in segvn_fault_vnodepages()
4032 if (physcontig && ppa[0] == NULL) { in segvn_fault_vnodepages()
4036 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { in segvn_fault_vnodepages()
4044 ppa[0] = NULL; in segvn_fault_vnodepages()
4046 &vpprot, ppa, pgsz, seg, a, arw, in segvn_fault_vnodepages()
4051 ASSERT(PAGE_LOCKED(ppa[i])); in segvn_fault_vnodepages()
4052 ASSERT(!PP_ISFREE(ppa[i])); in segvn_fault_vnodepages()
4053 ASSERT(ppa[i]->p_vnode == vp); in segvn_fault_vnodepages()
4054 ASSERT(ppa[i]->p_offset == in segvn_fault_vnodepages()
4125 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4174 seg, a, prot, ppa, vpage, segvn_anypgsz, in segvn_fault_vnodepages()
4185 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); in segvn_fault_vnodepages()
4192 hat_memload_array(hat, a, pgsz, ppa, prot, in segvn_fault_vnodepages()
4198 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4209 pfn = page_pptonum(ppa[0]); in segvn_fault_vnodepages()
4235 if ((pszc = ppa[0]->p_szc) == szc && in segvn_fault_vnodepages()
4241 ASSERT(PAGE_LOCKED(ppa[i])); in segvn_fault_vnodepages()
4242 ASSERT(!PP_ISFREE(ppa[i])); in segvn_fault_vnodepages()
4243 ASSERT(page_pptonum(ppa[i]) == in segvn_fault_vnodepages()
4245 ASSERT(ppa[i]->p_szc == szc); in segvn_fault_vnodepages()
4246 ASSERT(ppa[i]->p_vnode == vp); in segvn_fault_vnodepages()
4247 ASSERT(ppa[i]->p_offset == in segvn_fault_vnodepages()
4263 if (PP_ISMIGRATE(ppa[0])) { in segvn_fault_vnodepages()
4264 page_migrate(seg, a, ppa, pages); in segvn_fault_vnodepages()
4266 SEGVN_UPDATE_MODBITS(ppa, pages, rw, in segvn_fault_vnodepages()
4270 ppa, prot & vpprot, hat_flag, in segvn_fault_vnodepages()
4283 ppa[i], prot & vpprot, in segvn_fault_vnodepages()
4290 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4324 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4344 !segvn_full_szcpages(ppa, szc, &upgrdfail, in segvn_fault_vnodepages()
4363 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4380 SEGVN_UPDATE_MODBITS(ppa, pages, rw, in segvn_fault_vnodepages()
4385 ppa, prot & vpprot, hat_flag, in segvn_fault_vnodepages()
4391 ppa[i], prot & vpprot, in segvn_fault_vnodepages()
4397 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4411 ASSERT(pszc == ppa[0]->p_szc); in segvn_fault_vnodepages()
4427 szcmtx = page_szc_lock(ppa[0]); in segvn_fault_vnodepages()
4428 pszc = ppa[0]->p_szc; in segvn_fault_vnodepages()
4430 ASSERT(ppa[0]->p_szc <= pszc); in segvn_fault_vnodepages()
4448 SEGVN_UPDATE_MODBITS(ppa, pages, rw, in segvn_fault_vnodepages()
4450 hat_memload_array_region(hat, a, pgsz, ppa, in segvn_fault_vnodepages()
4455 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4475 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4489 segvn_relocate_pages(ppa, pplist); in segvn_fault_vnodepages()
4497 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); in segvn_fault_vnodepages()
4502 ASSERT(ppa[i]->p_szc < szc); in segvn_fault_vnodepages()
4505 ppa[i], prot & vpprot, hat_flag, in segvn_fault_vnodepages()
4510 hat_memload_array_region(hat, a, pgsz, ppa, in segvn_fault_vnodepages()
4515 ASSERT(PAGE_SHARED(ppa[i])); in segvn_fault_vnodepages()
4516 page_unlock(ppa[i]); in segvn_fault_vnodepages()
4607 kmem_free(ppa, ppasize); in segvn_fault_vnodepages()
4670 page_t **ppa; in segvn_fault_anonpages() local
4721 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); in segvn_fault_anonpages()
4753 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, in segvn_fault_anonpages()
4771 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); in segvn_fault_anonpages()
4774 ppa[0]->p_szc <= szc); in segvn_fault_anonpages()
4776 ppa[0]->p_szc >= szc); in segvn_fault_anonpages()
4782 page_migrate(seg, a, ppa, pages); in segvn_fault_anonpages()
4790 hat_memload_array(hat, a, pgsz, ppa, in segvn_fault_anonpages()
4798 page_unlock(ppa[i]); in segvn_fault_anonpages()
4906 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); in segvn_fault_anonpages()
4911 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); in segvn_fault_anonpages()
6496 page_t **ppa; in segvn_claim_pages() local
6522 ppa = kmem_alloc(ppasize, KM_SLEEP); in segvn_claim_pages()
6532 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { in segvn_claim_pages()
6548 ppa[pg_idx++] = pp; in segvn_claim_pages()
6552 if (ppa[0] == NULL) { in segvn_claim_pages()
6553 kmem_free(ppa, ppasize); in segvn_claim_pages()
6558 ppa[pg_idx] = NULL; in segvn_claim_pages()
6564 if (ppa[0]->p_szc == seg->s_szc) { in segvn_claim_pages()
6566 err = page_addclaim_pages(ppa); in segvn_claim_pages()
6568 err = page_subclaim_pages(ppa); in segvn_claim_pages()
6570 for (i = 0; ppa[i]; i += pgcnt) { in segvn_claim_pages()
6571 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); in segvn_claim_pages()
6573 err = page_addclaim_pages(&ppa[i]); in segvn_claim_pages()
6575 err = page_subclaim_pages(&ppa[i]); in segvn_claim_pages()
6582 ASSERT(ppa[i] != NULL); in segvn_claim_pages()
6583 page_unlock(ppa[i]); in segvn_claim_pages()
6586 kmem_free(ppa, ppasize); in segvn_claim_pages()