Lines Matching defs:lp
306 struct seglock *lp; /* associated lock context */
334 #define LOCK(lp) (*((lp)->lockptr))
375 * All fields in list of SegProc (lp->clients)
385 * But before this pointer is used to access any fields (other than the 'lp')
386 * lp->mutex must be held.
405 #define seglock_findclient(lp) seglock_find_specific((lp), CURPROC_ID)
406 #define seglock_allocclient(lp) seglock_alloc_specific((lp), CURPROC_ID)
587 SegLock *lp;
597 if ((lp = seglock_findlock((uint_t)off)) == NULL) {
606 ((off ^ (uintptr_t)(lp->lockptr)) & (offset_t)PAGEOFFSET) != 0) {
609 off, (void *)lp->lockptr));
610 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
616 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
634 lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT,
636 mutex_exit(&lp->mutex); /* held by seglock_findlock */
645 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
660 SegLock *lp = seglock_findlock((uint_t)off); /* returns w/ mutex held */
666 sdp = seglock_allocclient(lp);
678 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
683 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
690 * lp->alloccount is NOT incremented, so child should not do a free().
708 SegLock *lp = sdp->lp;
710 mutex_enter(&lp->mutex);
723 ndp = seglock_alloc_specific(lp,
738 mutex_exit(&lp->mutex);
751 SegLock *lp = sdp->lp;
759 mutex_enter(&lp->mutex);
762 if (sdp == lp->owner) {
767 (void) lock_giveup(lp, 0);
770 ASSERT(sdp != lp->owner);
779 garbage_collect_lock(lp, sdp);
788 SegLock *lp = sdp->lp;
795 mutex_enter(&lp->mutex);
809 err = seglock_lockfault(dhp, sdp, lp, rw);
811 mutex_exit(&lp->mutex);
833 SegLock *lp;
837 for (lp = lock_list; lp != NULL; lp = lp->next) {
838 mutex_enter(&lp->mutex);
839 if (cookie == lp->cookie) {
840 break; /* return with lp->mutex held */
842 mutex_exit(&lp->mutex);
845 return (lp);
855 SegLock *lp;
861 for (lp = lock_list; lp != NULL; lp = lp->next) {
862 mutex_enter(&lp->mutex);
863 if (key == lp->key)
865 mutex_exit(&lp->mutex);
867 return (lp);
878 SegLock *lp;
885 lp = lock_free_list;
886 lock_free_list = lp->next;
890 lp = kmem_zalloc(sizeof (SegLock), KM_SLEEP);
891 lp->cookie = (next_lock + 1) * (uint_t)PAGESIZE;
892 mutex_init(&lp->mutex, NULL, MUTEX_DEFAULT, NULL);
893 cv_init(&lp->locksleep, NULL, CV_DEFAULT, NULL);
897 mutex_enter(&lp->mutex);
898 ASSERT((lp->cookie/PAGESIZE) <= next_lock);
901 lp->lockptr = (int *)ddi_umem_alloc(PAGESIZE,
902 DDI_UMEM_SLEEP, &(lp->umem_cookie));
904 lp->lockptr = ((int *)lockpage) + ((lp->cookie/PAGESIZE) - 1);
905 lp->umem_cookie = lockpage_cookie;
908 ASSERT(lp->lockptr != NULL);
909 lp->style = style;
910 lp->sleepers = 0;
911 lp->alloccount = 1;
912 lp->timeout = LOCKTIME*hz;
913 lp->clients = NULL;
914 lp->owner = NULL;
915 LOCK(lp) = 0;
916 lp->next = lock_list;
917 lock_list = lp;
918 return (lp);
923 * This routine is called while holding the lp->mutex but not the
928 seglock_destroylock(SegLock *lp)
930 ASSERT(MUTEX_HELD(&lp->mutex));
934 lp->cookie, lp->key));
936 ASSERT(lp->alloccount == 0);
937 ASSERT(lp->clients == NULL);
938 ASSERT(lp->owner == NULL);
939 ASSERT(lp->sleepers == 0);
941 /* clean up/release fields in lp */
942 if (lp->style == OLDSTYLE_LOCK) {
943 ddi_umem_free(lp->umem_cookie);
945 lp->umem_cookie = NULL;
946 lp->lockptr = NULL;
947 lp->key = 0;
955 lp->cookie--;
958 mutex_exit(&lp->mutex);
960 mutex_enter(&lp->mutex);
963 lp->cookie++;
964 ASSERT((lp->cookie & PAGEOFFSET) == 0);
965 ASSERT(lp->alloccount == 0);
966 ASSERT(lp->clients == NULL);
967 ASSERT(lp->owner == NULL);
968 ASSERT(lp->sleepers == 0);
970 /* Remove lp from lock_list */
971 if (lock_list == lp) {
972 lock_list = lp->next;
975 while (tmp->next != lp) {
979 tmp->next = lp->next;
983 lp->next = lock_free_list;
984 lock_free_list = lp;
985 mutex_exit(&lp->mutex);
998 seglock_find_specific(SegLock *lp, void *tag)
1002 ASSERT(MUTEX_HELD(&lp->mutex));
1004 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1014 seglock_alloc_specific(SegLock *lp, void *tag)
1018 ASSERT(MUTEX_HELD(&lp->mutex));
1022 sdp = seglock_find_specific(lp, tag);
1027 tag, lp->cookie));
1031 sdp->next = lp->clients;
1032 lp->clients = sdp;
1033 sdp->lp = lp;
1043 seglock_deleteclient(SegLock *lp, SegProc *sdp)
1045 ASSERT(MUTEX_HELD(&lp->mutex));
1046 ASSERT(lp->owner != sdp); /* Not current owner of lock */
1051 ddi_get_pid(), lp->cookie));
1052 if (lp->clients == sdp) {
1053 lp->clients = sdp->next;
1055 SegProc *tmp = lp->clients;
1070 * caller should hold the lp->mutex
1074 garbage_collect_lock(SegLock *lp, SegProc *sdp)
1076 ASSERT(MUTEX_HELD(&lp->mutex));
1079 seglock_deleteclient(lp, sdp);
1082 if ((lp->clients == NULL) && (lp->alloccount == 0)) {
1083 seglock_destroylock(lp);
1085 mutex_exit(&lp->mutex);
1106 struct seglock *lp;
1137 if ((lp = seglock_findkey(key)) != NULL) {
1139 key, lp->cookie));
1140 ++lp->alloccount;
1141 } else if ((lp = seglock_createlock(style)) != NULL) {
1143 key, lp->cookie));
1144 lp->key = key;
1150 ASSERT((lp != NULL) && MUTEX_HELD(&lp->mutex));
1155 err = ddi_copyout((caddr_t)&lp->cookie, (caddr_t)arg,
1156 sizeof (lp->cookie), mode);
1158 wla.sy_ident = lp->cookie +
1159 (uint_t)((uintptr_t)(lp->lockptr) & PAGEOFFSET);
1166 lp->alloccount--;
1169 garbage_collect_lock(lp, NULL);
1173 mutex_exit(&lp->mutex);
1180 struct seglock *lp;
1189 if ((lp = seglock_findlock(offset)) == NULL) {
1194 lp->key, lp->cookie, lp->alloccount));
1196 if (lp->alloccount > 0)
1197 lp->alloccount--;
1200 garbage_collect_lock(lp, NULL);
1216 SegLock *lp;
1224 if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1227 lp->timeout = MSEC_TO_TICK_ROUNDUP(wlt.sy_timeout);
1229 if (lp->sleepers > 0) {
1230 cv_broadcast(&lp->locksleep);
1241 sdp = seglock_allocclient(lp);
1243 } else if ((sdp = seglock_findclient(lp)) != NULL) {
1246 garbage_collect_lock(lp, sdp);
1249 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
1256 SegLock *lp;
1263 if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1266 wlt.sy_timeout = TICK_TO_MSEC(lp->timeout);
1272 if ((sdp = seglock_findclient(lp)) != NULL) {
1277 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
1293 seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw)
1295 SegProc *owner = lp->owner;
1298 ASSERT(MUTEX_HELD(&lp->mutex));
1300 "seglock_lockfault: hdl=%p, sdp=%p, lp=%p owner=%p\n",
1301 (void *)dhp, (void *)sdp, (void *)lp, (void *)owner));
1309 return (give_mapping(lp, sdp, rw));
1318 if ((sdp->lockseg == dhp) || (lp->sleepers == 0)) {
1320 return (give_mapping(lp, sdp, rw));
1329 ASSERT((dhp == sdp->unlockseg) && (lp->sleepers != 0));
1331 return (lock_giveup(lp, 1));
1346 return (devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1353 * Before reading lock value in LOCK(lp), we must make sure that
1360 err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1361 err |= devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1369 if (LOCK(lp) == 0) {
1372 (void *)lp, ddi_get_pid()));
1373 return (give_mapping(lp, sdp, rw));
1384 lp->sleepers++;
1385 while ((owner = lp->owner) != NULL) {
1388 if ((lp->timeout == 0) || (owner->flag & SY_NOTIMEOUT)) {
1393 rval = cv_wait_sig(&lp->locksleep, &lp->mutex);
1405 rval = cv_reltimedwait_sig(&lp->locksleep,
1406 &lp->mutex, lp->timeout, TR_CLOCK_TICK);
1413 if ((rval == -1) && (lp->owner == owner)) {
1419 ddi_get_pid(), lp->cookie);
1420 (void) lock_giveup(lp, 1);
1424 ddi_get_pid(), lp->cookie);
1425 lp->sleepers--;
1430 lp->sleepers--;
1434 return (give_mapping(lp, sdp, rw));
1443 give_mapping(SegLock *lp, SegProc *sdp, uint_t rw)
1447 ASSERT(MUTEX_HELD(&lp->mutex));
1448 ASSERT(!((lp->owner == NULL) && (LOCK(lp) != 0)));
1455 ASSERT(sdp != lp->owner);
1461 lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) {
1474 lp->owner = sdp;
1476 if ((err = devmap_load(sdp->lockseg, lp->cookie, PAGESIZE,
1482 if (lp->sleepers) {
1486 lp->sleepers));
1487 err = devmap_unload(sdp->unlockseg, lp->cookie, PAGESIZE);
1494 err = devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1506 lock_giveup(SegLock *lp, int trash)
1508 SegProc *owner = lp->owner;
1510 DEBUGF(4, (CE_CONT, "winlock_giveup: lp=%p, owner=%p, trash %d\n",
1511 (void *)lp, (void *)ID(lp->owner), trash));
1513 ASSERT(MUTEX_HELD(&lp->mutex));
1527 (void) devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1528 (void) devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1532 if (err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE)) {
1557 (void) devmap_load(owner->unlockseg, lp->cookie,
1562 lp->owner = NULL;
1565 LOCK(lp) = 0;
1567 if (lp->sleepers) {
1568 DEBUGF(4, (CE_CONT, " waking up, lp=%p\n", (void *)lp));
1569 cv_broadcast(&lp->locksleep);
1581 SegLock *lp, *lpnext;
1587 for (lp = lock_free_list; lp != NULL; lp = lpnext) {
1588 mutex_enter(&lp->mutex);
1589 lpnext = lp->next;
1590 ASSERT(lp->clients == NULL);
1591 ASSERT(lp->owner == NULL);
1592 ASSERT(lp->alloccount == 0);
1593 mutex_destroy(&lp->mutex);
1594 cv_destroy(&lp->locksleep);
1595 kmem_free(lp, sizeof (SegLock));
1606 SegLock *lp;
1612 for (lp = lock_list; lp != NULL; lp = lp->next) {
1613 mutex_enter(&lp->mutex);
1615 lp->cookie, lp->key, lp->alloccount,
1616 lp->clients ? 'Y' : 'N',
1617 lp->owner ? 'Y' : 'N',
1618 lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1619 lp->sleepers);
1620 mutex_exit(&lp->mutex);
1623 for (lp = lock_free_list; lp != NULL; lp = lp->next) {
1624 mutex_enter(&lp->mutex);
1626 lp->cookie, lp->key, lp->alloccount,
1627 lp->clients ? 'Y' : 'N',
1628 lp->owner ? 'Y' : 'N',
1629 lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1630 lp->sleepers);
1631 mutex_exit(&lp->mutex);
1640 for (lp = lock_list; lp != NULL; lp = lp->next) {
1643 mutex_enter(&lp->mutex);
1646 (void *)lp, lp->key, lp->cookie, lp->alloccount,
1647 lp->lockptr != 0 ? LOCK(lp) : -1, lp->sleepers);
1651 lp->style, (void *)lp->lockptr, lp->timeout,
1652 (void *)lp->clients, (void *)lp->owner);
1655 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1656 cmn_err(CE_CONT, " client %p%s, lp=%p, flag=%x, "
1658 (void *)sdp, sdp == lp->owner ? " (owner)" : "",
1659 (void *)sdp->lp, sdp->flag, (void *)ID(sdp),
1662 mutex_exit(&lp->mutex);