Lines Matching defs:pp

74  * PAGE_SE_MUTEX(pp) returns the address of the appropriate mutex
91 #define PAGE_IO_MUTEX(pp) \
92 &pio_mutex[(((uintptr_t)pp) >> PIO_SHIFT) & (PIO_TABLE_SIZE - 1)]
98 extern pad_mutex_t *pse_mutex; /* Locks protecting pp->p_selock */
101 #define PAGE_SE_MUTEX(pp) &pse_mutex[ \
102 ((((uintptr_t)(pp) >> pse_shift) ^ ((uintptr_t)(pp))) >> 7) & \
244 page_lock(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim)
246 return (page_lock_es(pp, se, lock, reclaim, 0));
295 page_lock_es(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim, int es)
298 kmutex_t *pse = PAGE_SE_MUTEX(pp);
314 if (PP_RETIRED(pp) && !(es & SE_RETIRED)) {
320 if (se == SE_SHARED && es == 1 && pp->p_selock == 0) {
324 if ((reclaim == P_RECLAIM) && (PP_ISFREE(pp))) {
359 if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) {
366 } else if ((pp->p_selock & ~SE_EWANTED) == 0) {
369 pp->p_selock = SE_WRITER;
375 pp->p_selock |= SE_EWANTED;
381 if (pp->p_selock >= 0) {
382 if ((pp->p_selock & SE_EWANTED) == 0) {
383 pp->p_selock += SE_READER;
390 if ((pp->p_selock & ~SE_EWANTED) == SE_DELETED) {
411 cv_wait(&pp->p_cv, pse);
416 * blocked. If we are willing to depend on "pp"
440 ((PP_ISFREE(pp)) && PAGE_EXCL(pp)) : 1);
455 if (!page_reclaim(pp, lock)) {
461 page_downgrade(pp);
478 page_lock_clr_exclwanted(page_t *pp)
480 kmutex_t *pse = PAGE_SE_MUTEX(pp);
483 pp->p_selock &= ~SE_EWANTED;
484 if (CV_HAS_WAITERS(&pp->p_cv))
485 cv_broadcast(&pp->p_cv);
503 page_try_reclaim_lock(page_t *pp, se_t se, int es)
505 kmutex_t *pse = PAGE_SE_MUTEX(pp);
510 old = pp->p_selock;
515 if (PP_RETIRED(pp) && !(es & SE_RETIRED)) {
526 if (!PP_ISFREE(pp)) {
532 pp->p_selock = old + SE_READER;
555 pp->p_selock = SE_WRITER;
562 pp->p_selock |= SE_EWANTED;
573 page_trylock(page_t *pp, se_t se)
575 kmutex_t *pse = PAGE_SE_MUTEX(pp);
578 if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) ||
579 (se == SE_SHARED && PP_PR_NOSHARE(pp))) {
591 if (pp->p_selock == 0) {
592 pp->p_selock = SE_WRITER;
597 if (pp->p_selock >= 0) {
598 pp->p_selock += SE_READER;
614 page_unlock_nocapture(page_t *pp)
616 kmutex_t *pse = PAGE_SE_MUTEX(pp);
621 old = pp->p_selock;
623 pp->p_selock = old & ~SE_READER;
624 if (CV_HAS_WAITERS(&pp->p_cv))
625 cv_broadcast(&pp->p_cv);
627 panic("page_unlock_nocapture: page %p is deleted", (void *)pp);
629 pp->p_selock &= SE_EWANTED;
630 if (CV_HAS_WAITERS(&pp->p_cv))
631 cv_broadcast(&pp->p_cv);
633 pp->p_selock = old - SE_READER;
636 (void *)pp);
647 page_unlock(page_t *pp)
649 kmutex_t *pse = PAGE_SE_MUTEX(pp);
654 old = pp->p_selock;
656 pp->p_selock = old & ~SE_READER;
657 if (CV_HAS_WAITERS(&pp->p_cv))
658 cv_broadcast(&pp->p_cv);
660 panic("page_unlock: page %p is deleted", (void *)pp);
662 pp->p_selock &= SE_EWANTED;
663 if (CV_HAS_WAITERS(&pp->p_cv))
664 cv_broadcast(&pp->p_cv);
666 pp->p_selock = old - SE_READER;
668 panic("page_unlock: page %p is not locked", (void *)pp);
671 if (pp->p_selock == 0) {
678 if ((pp->p_toxic & PR_CAPTURE) &&
680 !PP_RETIRED(pp)) {
681 pp->p_selock = SE_WRITER;
683 page_unlock_capture(pp);
702 page_tryupgrade(page_t *pp)
704 kmutex_t *pse = PAGE_SE_MUTEX(pp);
707 if (!(pp->p_selock & SE_EWANTED)) {
709 if (pp->p_selock == SE_READER) {
711 pp->p_selock = SE_WRITER;
725 page_downgrade(page_t *pp)
727 kmutex_t *pse = PAGE_SE_MUTEX(pp);
730 ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED);
731 ASSERT(PAGE_EXCL(pp));
734 excl_waiting = pp->p_selock & SE_EWANTED;
735 pp->p_selock = SE_READER | excl_waiting;
736 if (CV_HAS_WAITERS(&pp->p_cv))
737 cv_broadcast(&pp->p_cv);
742 page_lock_delete(page_t *pp)
744 kmutex_t *pse = PAGE_SE_MUTEX(pp);
746 ASSERT(PAGE_EXCL(pp));
747 ASSERT(pp->p_vnode == NULL);
748 ASSERT(pp->p_offset == (u_offset_t)-1);
749 ASSERT(!PP_ISFREE(pp));
752 pp->p_selock = SE_DELETED;
753 if (CV_HAS_WAITERS(&pp->p_cv))
754 cv_broadcast(&pp->p_cv);
759 page_deleted(page_t *pp)
761 return (pp->p_selock == SE_DELETED);
768 page_iolock_init(page_t *pp)
770 pp->p_iolock_state = 0;
771 cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL);
778 page_io_lock(page_t *pp)
782 pio = PAGE_IO_MUTEX(pp);
784 while (pp->p_iolock_state & PAGE_IO_INUSE) {
785 cv_wait(&(pp->p_io_cv), pio);
787 pp->p_iolock_state |= PAGE_IO_INUSE;
795 page_io_unlock(page_t *pp)
799 pio = PAGE_IO_MUTEX(pp);
801 cv_broadcast(&pp->p_io_cv);
802 pp->p_iolock_state &= ~PAGE_IO_INUSE;
811 page_io_trylock(page_t *pp)
815 if (pp->p_iolock_state & PAGE_IO_INUSE)
818 pio = PAGE_IO_MUTEX(pp);
821 if (pp->p_iolock_state & PAGE_IO_INUSE) {
825 pp->p_iolock_state |= PAGE_IO_INUSE;
835 page_io_wait(page_t *pp)
839 pio = PAGE_IO_MUTEX(pp);
841 while (pp->p_iolock_state & PAGE_IO_INUSE) {
842 cv_wait(&(pp->p_io_cv), pio);
851 page_io_locked(page_t *pp)
853 return (pp->p_iolock_state & PAGE_IO_INUSE);
861 page_iolock_assert(page_t *pp)
863 return (page_io_locked(pp));
897 page_se_mutex(page_t *pp)
899 return (PAGE_SE_MUTEX(pp));
923 * If NULL is returned pp's p_szc is guaranteed to be 0. If non NULL is
924 * returned pp's p_szc may be any value.
927 page_szc_lock(page_t *pp)
933 uint_t pszc = pp->p_szc;
935 ASSERT(pp != NULL);
936 ASSERT(PAGE_LOCKED(pp));
937 ASSERT(!PP_ISFREE(pp));
938 ASSERT(pp->p_vnode != NULL);
939 ASSERT(!IS_SWAPFSVP(pp->p_vnode));
940 ASSERT(!PP_ISKAS(pp));
950 rootpp = PP_GROUPLEADER(pp, pszc);
955 * since p_szc can only decrease if pp == rootpp
958 * If location of pp's root didn't change after we took
961 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc) {
971 szc = pp->p_szc;
994 page_szc_lock_assert(page_t *pp)
996 page_t *rootpp = PP_PAGEROOT(pp);