Lines Matching refs:sdp

313 #define	ID(sdp)		((sdp)->tag)
661 SegProc *sdp;
666 sdp = seglock_allocclient(lp);
672 if (sdp->lockseg == NULL) {
673 sdp->lockseg = dhp;
674 } else if (sdp->unlockseg == NULL) {
675 sdp->unlockseg = dhp;
682 *pvtp = sdp;
706 SegProc *sdp = (SegProc *)oldpvt;
708 SegLock *lp = sdp->lp;
711 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
717 * parent sdp itself!
725 ASSERT(ndp != sdp);
727 if (sdp->lockseg == dhp) {
731 ASSERT(sdp->unlockseg == dhp);
734 if (sdp->flag & TRASHPAGE) {
750 SegProc *sdp = (SegProc *)pvtp;
751 SegLock *lp = sdp->lp;
760 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
762 if (sdp == lp->owner) {
770 ASSERT(sdp != lp->owner);
771 if (sdp->lockseg == dhp) {
772 sdp->lockseg = NULL;
774 ASSERT(sdp->unlockseg == dhp);
775 sdp->unlockseg = NULL;
776 sdp->flag &= ~TRASHPAGE; /* clear flag if set */
779 garbage_collect_lock(lp, sdp);
787 SegProc *sdp = (SegProc *)pvt;
788 SegLock *lp = sdp->lp;
796 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
799 ASSERT(ID(sdp) == CURPROC_ID);
806 if ((sdp->lockseg == NULL) || (sdp->unlockseg == NULL)) {
809 err = seglock_lockfault(dhp, sdp, lp, rw);
1000 SegProc *sdp;
1004 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1005 if (ID(sdp) == tag)
1008 return (sdp);
1016 SegProc *sdp;
1022 sdp = seglock_find_specific(lp, tag);
1023 if (sdp != NULL)
1024 return (sdp);
1030 sdp = kmem_zalloc(sizeof (SegProc), KM_SLEEP);
1031 sdp->next = lp->clients;
1032 lp->clients = sdp;
1033 sdp->lp = lp;
1034 ID(sdp) = tag;
1035 return (sdp);
1043 seglock_deleteclient(SegLock *lp, SegProc *sdp)
1046 ASSERT(lp->owner != sdp); /* Not current owner of lock */
1047 ASSERT(sdp->lockseg == NULL); /* Mappings torn down */
1048 ASSERT(sdp->unlockseg == NULL);
1052 if (lp->clients == sdp) {
1053 lp->clients = sdp->next;
1056 while (tmp->next != sdp) {
1060 tmp->next = sdp->next;
1062 kmem_free(sdp, sizeof (SegProc));
1069 * Can be called with sdp == NULL if want to verify only the lock state
1074 garbage_collect_lock(SegLock *lp, SegProc *sdp)
1078 if ((sdp != NULL) && (sdp->lockseg == NULL) && (sdp->unlockseg == NULL))
1079 seglock_deleteclient(lp, sdp);
1217 SegProc *sdp;
1241 sdp = seglock_allocclient(lp);
1242 sdp->flag = sdp->flag & KFLAGS | wlt.sy_flags & UFLAGS;
1243 } else if ((sdp = seglock_findclient(lp)) != NULL) {
1244 sdp->flag = sdp->flag & KFLAGS;
1246 garbage_collect_lock(lp, sdp);
1257 SegProc *sdp;
1272 if ((sdp = seglock_findclient(lp)) != NULL) {
1273 wlt.sy_flags = sdp->flag & UFLAGS;
1293 seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw)
1300 "seglock_lockfault: hdl=%p, sdp=%p, lp=%p owner=%p\n",
1301 (void *)dhp, (void *)sdp, (void *)lp, (void *)owner));
1303 /* lockfault is always called with sdp in current process context */
1304 ASSERT(ID(sdp) == CURPROC_ID);
1309 return (give_mapping(lp, sdp, rw));
1312 if (owner == sdp) {
1318 if ((sdp->lockseg == dhp) || (lp->sleepers == 0)) {
1320 return (give_mapping(lp, sdp, rw));
1329 ASSERT((dhp == sdp->unlockseg) && (lp->sleepers != 0));
1335 ASSERT(owner != sdp);
1344 if ((sdp->unlockseg == dhp) && (sdp->flag & TRASHPAGE)) {
1346 return (devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1373 return (give_mapping(lp, sdp, rw));
1434 return (give_mapping(lp, sdp, rw));
1443 give_mapping(SegLock *lp, SegProc *sdp, uint_t rw)
1449 /* give_mapping is always called with sdp in current process context */
1450 ASSERT(ID(sdp) == CURPROC_ID);
1453 if (sdp->flag & TRASHPAGE) {
1455 ASSERT(sdp != lp->owner);
1460 if ((err = devmap_umem_remap(sdp->unlockseg, winlock_dip,
1470 sdp->flag &= ~TRASHPAGE;
1474 lp->owner = sdp;
1476 if ((err = devmap_load(sdp->lockseg, lp->cookie, PAGESIZE,
1487 err = devmap_unload(sdp->unlockseg, lp->cookie, PAGESIZE);
1494 err = devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1641 SegProc *sdp;
1655 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1658 (void *)sdp, sdp == lp->owner ? " (owner)" : "",
1659 (void *)sdp->lp, sdp->flag, (void *)ID(sdp),
1660 (void *)sdp->lockseg, (void *)sdp->unlockseg);