Lines Matching refs:lock

74 			cmn_err(CE_PANIC, "Illegal lock transition \
111 * lock: mutex]
136 static kmutex_t nlm_reg_lock; /* lock to protect arrary */
140 * Although we need a global lock dependency graph (and associated data
141 * structures), we also need a per-zone notion of whether the lock manager is
142 * running, and so whether to allow lock manager requests or not.
145 * (flk_lockmgr_status), protected by flock_lock, and set when the lock
151 * The per-graph copies are used to synchronize lock requests with shutdown
164 static void flk_free_lock(lock_descriptor_t *lock);
238 * KLM module not loaded; lock manager definitely not running.
259 * different file description for the same file will not drop the lock (i.e.
264 * Because these locks are per-description a lock ptr lives at the f_filocks
266 * to enable unique lock identification and management.
277 * currently the single lock must cover the entire file. This is validated in
280 * managed independently of the lock list on the vnode itself and it needs to
361 * values then check the validity of the lock range.
396 /* Get the lock graph for a particular vnode */
452 * Remove any lock on the vnode belonging to the given file_t.
456 * lock associated with fp.
461 lock_descriptor_t *fplock, *lock, *nlock;
482 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
484 if (lock) {
486 nlock = lock->l_next;
487 if (fplock == lock) {
488 CANCEL_WAKEUP(lock);
491 lock = nlock;
492 } while (lock->l_vnode == vp);
495 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
497 if (lock) {
499 nlock = lock->l_next;
500 if (fplock == lock) {
501 flk_delete_active_lock(lock, 0);
502 flk_wakeup(lock, 1);
503 flk_free_lock(lock);
506 lock = nlock;
507 } while (lock->l_vnode == vp);
521 * file will drop the lock (e.g. lock /etc/passwd, call a library function
523 * file descriptor the application loses its lock and does not know).
527 * This is why any close will drop the lock and is also why once the process
528 * forks then the lock is no longer related to the new process. These locks can
592 /* check the validity of the lock range */
641 * If the lock request is an NLM server request ....
646 * Bail out if this is a lock manager request and the
647 * lock manager is not supposed to be running.
672 * previous lock requests) and its state is
675 * error to deny the lock request.
685 /* Now get the lock graph for a particular vnode */
732 * Recovery mechanism to release lock manager locks when
789 * dependents for this lock or EINTR from flk_wait_execute_
1010 flk_free_lock(lock_descriptor_t *lock)
1014 ASSERT(IS_DEAD(lock));
1016 if ((fp = lock->l_ofd) != NULL)
1019 if (IS_REFERENCED(lock)) {
1020 lock->l_state |= DELETED_LOCK;
1024 kmem_free((void *)lock, sizeof (lock_descriptor_t));
1028 flk_set_state(lock_descriptor_t *lock, int new_state)
1032 * and more than once. If a sleeping lock is signaled awake more
1043 if (IS_INTERRUPTED(lock)) {
1050 if (IS_CANCELLED(lock)) {
1056 CHECK_LOCK_TRANSITION(lock->l_status, new_state);
1057 if (IS_PXFS(lock)) {
1058 cl_flk_state_transition_notify(lock, lock->l_status, new_state);
1060 lock->l_status = new_state;
1066 * The policy followed is if a write lock is sleeping we don't allow read
1067 * locks before this write lock even though there may not be any active
1083 lock_descriptor_t *lock;
1100 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1103 if (lock) {
1105 if (BLOCKS(lock, request)) {
1112 * Grant lock if it is for the same owner holding active
1113 * lock that covers the request.
1116 if (SAME_OWNER(lock, request) &&
1117 COVERS(lock, request) &&
1120 lock = lock->l_next;
1121 } while (lock->l_vnode == vp);
1133 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1134 if (lock) {
1136 if (BLOCKS(lock, request)) {
1137 if (IS_GRANTED(lock)) {
1144 lock = lock->l_next;
1145 } while ((lock->l_vnode == vp));
1146 first_glock = lock->l_prev;
1165 * lock's range, block.
1172 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1173 if (lock) {
1175 flk_recompute_dependencies(lock, lk, 1, 0);
1176 lock = lock->l_next;
1177 } while (lock->l_vnode == vp);
1179 lock = first_glock;
1180 if (lock) {
1182 if (IS_GRANTED(lock)) {
1183 flk_recompute_dependencies(lock, lk, 1, 0);
1185 lock = lock->l_prev;
1186 } while ((lock->l_vnode == vp));
1200 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1203 * If we find a sleeping write lock that is a superset of the
1205 * edge to this write lock we have paths to all locks in the
1208 * case is when this process that owns the sleeping write lock 'l1'
1212 * lock l5 owned by a process different from that owning l1, because
1217 if (lock) {
1219 if (BLOCKS(lock, request)) {
1222 if (COVERS(lock, request) &&
1223 lock->l_type == F_WRLCK) {
1225 !SAME_OWNER(lock, covered_by)) {
1230 covered_by = lock;
1233 !SAME_OWNER(lock, covered_by)) {
1234 lock = lock->l_next;
1237 if ((error = flk_add_edge(request, lock,
1241 lock = lock->l_next;
1242 } while (lock->l_vnode == vp);
1254 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1255 ASSERT(lock != NULL);
1257 if (BLOCKS(lock, request)) {
1259 !SAME_OWNER(lock, covered_by)) {
1260 lock = lock->l_next;
1263 if ((error = flk_add_edge(request, lock,
1267 lock = lock->l_next;
1268 } while (lock->l_vnode == vp);
1305 lock_descriptor_t *lock, *lock1;
1322 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1324 if (lock == NULL && request->l_type == F_UNLCK)
1326 if (lock == NULL) {
1332 lock1 = lock->l_next;
1333 if (SAME_OWNER(request, lock)) {
1334 done_searching = flk_relation(lock, request);
1336 lock = lock1;
1337 } while (lock->l_vnode == vp && !done_searching);
1376 * If the request is an NLM server lock request,
1377 * and the NLM state of the lock request is not
1379 * down), then cancel the sleeping lock and
1395 * when the lock is granted.
1405 * the callback must happen after putting the lock on the
1442 * If the lock manager is shutting down, return an
1602 * Check the relationship of request with lock and perform the
1603 * recomputation of dependencies, break lock if required, and return
1606 * The 'lock' and 'request' are compared and in case of overlap we
1607 * delete the 'lock' and form new locks to represent the non-overlapped
1608 * portion of original 'lock'. This function has side effects such as
1609 * 'lock' will be freed, new locks will be added to the active list.
1613 flk_relation(lock_descriptor_t *lock, lock_descriptor_t *request)
1621 graph_t *gp = (lock->l_graph);
1634 lock->l_type == F_WRLCK)
1637 lock->l_type == F_RDLCK)
1642 if (lock->l_end < request->l_start) {
1643 if (lock->l_end == request->l_start - 1 &&
1646 request->l_start = lock->l_start;
1654 if (lock->l_start > request->l_end) {
1655 if (request->l_end == lock->l_start - 1 &&
1658 request->l_end = lock->l_end;
1666 if (request->l_end < lock->l_end) {
1667 if (request->l_start > lock->l_start) {
1669 request->l_start = lock->l_start;
1670 request->l_end = lock->l_end;
1676 COPY(lock1, lock);
1677 COPY(lock2, lock);
1678 lock1->l_start = lock->l_start;
1681 lock2->l_end = lock->l_end;
1687 } else if (request->l_start < lock->l_start) {
1689 request->l_end = lock->l_end;
1694 COPY(lock1, lock);
1702 request->l_start = lock->l_start;
1703 request->l_end = lock->l_end;
1708 COPY(lock1, lock);
1715 } else if (request->l_end > lock->l_end) {
1716 if (request->l_start > lock->l_start) {
1718 request->l_start = lock->l_start;
1723 COPY(lock1, lock);
1729 } else if (request->l_start < lock->l_start) {
1737 if (request->l_start > lock->l_start) {
1739 request->l_start = lock->l_start;
1744 COPY(lock1, lock);
1750 } else if (request->l_start < lock->l_start) {
1758 flk_delete_active_lock(lock, 0);
1759 flk_wakeup(lock, 1);
1760 flk_free_lock(lock);
1772 * dependencies because no lock will add an edge to this.
1784 ASSERT(FIRST_ADJ(lock) == HEAD(lock));
1788 * 'lock'.
1791 ep = FIRST_IN(lock);
1792 while (ep != HEAD(lock)) {
1797 flk_delete_active_lock(lock, 0);
1801 flk_recompute_dependencies(lock, topology, nvertex, 1);
1818 flk_wakeup(lock, 0);
1820 ep = FIRST_IN(lock);
1821 while (ep != HEAD(lock)) {
1822 lock->l_sedge = NEXT_IN(ep);
1826 ep = lock->l_sedge;
1829 flk_free_lock(lock);
1837 * Insert a lock into the active queue.
1845 lock_descriptor_t *first_lock, *lock;
1849 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1850 first_lock = lock;
1853 for (; (lock->l_vnode == vp &&
1854 lock->l_start < new_lock->l_start); lock = lock->l_next)
1857 lock = ACTIVE_HEAD(gp);
1860 lock->l_prev->l_next = new_lock;
1861 new_lock->l_next = lock;
1862 new_lock->l_prev = lock->l_prev;
1863 lock->l_prev = new_lock;
1876 * Delete the active lock : Performs two functions depending on the
1878 * only and other is to both remove and free the lock.
1882 flk_delete_active_lock(lock_descriptor_t *lock, int free_lock)
1884 vnode_t *vp = lock->l_vnode;
1885 graph_t *gp = lock->l_graph;
1889 ASSERT(NO_DEPENDENTS(lock));
1890 ASSERT(NOT_BLOCKED(lock));
1891 ASSERT(IS_ACTIVE(lock));
1895 if (vp->v_filocks == (struct filock *)lock) {
1897 ((lock->l_next->l_vnode == vp) ? lock->l_next :
1900 lock->l_next->l_prev = lock->l_prev;
1901 lock->l_prev->l_next = lock->l_next;
1902 lock->l_next = lock->l_prev = NULL;
1903 flk_set_state(lock, FLK_DEAD_STATE);
1904 lock->l_state &= ~ACTIVE_LOCK;
1907 flk_free_lock(lock);
1921 lock_descriptor_t *lock;
1926 for (lock = gp->sleeping_locks.l_next; (lock != &gp->sleeping_locks &&
1927 lock->l_vnode < vp); lock = lock->l_next)
1930 lock->l_prev->l_next = request;
1931 request->l_prev = lock->l_prev;
1932 lock->l_prev = request;
1933 request->l_next = lock;
1939 * Cancelling a sleeping lock implies removing a vertex from the
1952 lock_descriptor_t *vertex, *lock;
1997 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
1999 if (lock) {
2001 if (IS_RECOMPUTE(lock)) {
2002 lock->l_index = nvertex;
2003 topology[nvertex++] = lock;
2005 lock->l_color = NO_COLOR;
2006 lock = lock->l_next;
2007 } while (lock->l_vnode == vp);
2010 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2012 if (lock) {
2014 if (IS_RECOMPUTE(lock)) {
2015 lock->l_index = nvertex;
2016 topology[nvertex++] = lock;
2018 lock->l_color = NO_COLOR;
2019 lock = lock->l_next;
2020 } while (lock->l_vnode == vp);
2090 lock_descriptor_t *lock;
2094 for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
2095 lock = lock->l_next)
2096 lock->l_color = 0;
2098 for (lock = SLEEPING_HEAD(gp)->l_next; lock != SLEEPING_HEAD(gp);
2099 lock = lock->l_next)
2100 lock->l_color = 0;
2107 * Wake up locks that are blocked on the given lock.
2111 flk_wakeup(lock_descriptor_t *lock, int adj_list_remove)
2114 graph_t *gp = lock->l_graph;
2118 if (NO_DEPENDENTS(lock))
2120 ep = FIRST_IN(lock);
2134 lock->l_sedge = NEXT_IN(ep);
2137 ep = lock->l_sedge;
2138 } while (ep != HEAD(lock));
2139 ASSERT(NO_DEPENDENTS(lock));
2149 * If lock l1 in the dependent set of request is dependent (blocked by)
2150 * on lock l2 in topology but does not have a path to it, we add an edge
2168 lock_descriptor_t *vertex, *lock;
2202 * dependencies for this lock in the
2214 lock = topology[i];
2215 if (COLORED(lock))
2217 if (BLOCKS(lock, vertex)) {
2218 (void) flk_add_edge(vertex, lock,
2220 COLOR(lock);
2222 count += flk_color_reachables(lock);
2242 lock = ep->from_vertex;
2243 STACK_PUSH(vertex_stack, lock, l_stack);
2244 lock->l_sedge = FIRST_IN(lock);
2262 lock_descriptor_t *ver, *lock;
2276 lock = ep->to_vertex;
2277 if (COLORED(lock))
2279 COLOR(lock);
2280 if (IS_RECOMPUTE(lock))
2282 STACK_PUSH(vertex_stack, lock, l_stack1);
2291 * the barrier count of barrier vertices that are reachable from lock.
2295 flk_update_barriers(lock_descriptor_t *lock)
2303 STACK_PUSH(vertex_stack, lock, l_stack1);
2332 * Finds all vertices that are reachable from 'lock' more than once and
2334 * The barrier count is one minus the total number of paths from lock
2339 flk_find_barriers(lock_descriptor_t *lock)
2348 STACK_PUSH(vertex_stack, lock, l_stack1);
2373 * Finds the first lock that is mainly responsible for blocking this
2374 * request. If there is no such lock, request->l_flock.l_type is set to
2376 * of the blocking lock.
2378 * Note: It is possible a request is blocked by a sleeping lock because
2388 lock_descriptor_t *lock, *blocker;
2392 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2394 if (lock) {
2396 if (BLOCKS(lock, request)) {
2397 blocker = lock;
2400 lock = lock->l_next;
2401 } while (lock->l_vnode == vp);
2406 * No active lock is blocking this request, but if a read
2407 * lock is requested, it may also get blocked by a waiting
2411 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2412 if (lock) {
2414 if (BLOCKS(lock, request)) {
2415 blocker = lock;
2418 lock = lock->l_next;
2419 } while (lock->l_vnode == vp);
2503 lock_descriptor_t *lock;
2522 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2524 if (lock) {
2525 while (lock->l_vnode == vp) {
2527 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
2530 * If NLM server request _and_ nlmid of lock matches
2531 * nlmid of argument, then we've found a remote lock.
2533 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
2537 lock = lock->l_next;
2541 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2543 if (lock) {
2544 while (lock->l_vnode == vp) {
2546 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
2549 * If NLM server request _and_ nlmid of lock matches
2550 * nlmid of argument, then we've found a remote lock.
2552 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
2556 lock = lock->l_next;
2577 lock_descriptor_t *lock;
2588 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2590 if (lock) {
2591 while (lock->l_vnode == vp) {
2592 if (IS_REMOTE(lock)) {
2596 lock = lock->l_next;
2600 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2602 if (lock) {
2603 while (lock->l_vnode == vp) {
2604 if (IS_REMOTE(lock)) {
2608 lock = lock->l_next;
2625 lock_descriptor_t *lock;
2639 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2641 if (lock) {
2642 while (lock->l_vnode == vp) {
2643 if (lock->l_flock.l_sysid == sysid) {
2647 lock = lock->l_next;
2651 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2653 if (lock) {
2654 while (lock->l_vnode == vp) {
2655 if (lock->l_flock.l_sysid == sysid) {
2659 lock = lock->l_next;
2682 lock_descriptor_t *lock;
2697 for (lock = ACTIVE_HEAD(gp)->l_next;
2698 lock != ACTIVE_HEAD(gp) && !has_locks;
2699 lock = lock->l_next) {
2700 if (lock->l_flock.l_sysid == sysid)
2706 for (lock = SLEEPING_HEAD(gp)->l_next;
2707 lock != SLEEPING_HEAD(gp) && !has_locks;
2708 lock = lock->l_next) {
2709 if (lock->l_flock.l_sysid == sysid)
2735 lock_descriptor_t *lock, *nlock;
2753 mutex_enter(&gp->gp_mutex); /* get mutex on lock graph */
2756 lock = SLEEPING_HEAD(gp)->l_next;
2757 while (lock != SLEEPING_HEAD(gp)) {
2758 nlock = lock->l_next;
2759 if (lock->l_flock.l_sysid == sysid) {
2760 INTERRUPT_WAKEUP(lock);
2762 lock = nlock;
2766 lock = ACTIVE_HEAD(gp)->l_next;
2767 while (lock != ACTIVE_HEAD(gp)) {
2768 nlock = lock->l_next;
2769 if (lock->l_flock.l_sysid == sysid) {
2770 flk_delete_active_lock(lock, 0);
2771 flk_wakeup(lock, 1);
2772 flk_free_lock(lock);
2774 lock = nlock;
2776 mutex_exit(&gp->gp_mutex); /* release mutex on lock graph */
2788 lock_descriptor_t *lock, *nlock;
2808 lock = SLEEPING_HEAD(gp)->l_next;
2809 while (lock != SLEEPING_HEAD(gp)) {
2810 nlock = lock->l_next;
2811 if (lock->l_flock.l_sysid == sysid) {
2812 INTERRUPT_WAKEUP(lock);
2814 lock = nlock;
2818 lock = ACTIVE_HEAD(gp)->l_next;
2819 while (lock != ACTIVE_HEAD(gp)) {
2820 nlock = lock->l_next;
2821 if (lock->l_flock.l_sysid == sysid) {
2822 flk_delete_active_lock(lock, 0);
2823 flk_wakeup(lock, 1);
2824 flk_free_lock(lock);
2826 lock = nlock;
2842 lock_descriptor_t *lock, *nlock;
2857 lock = SLEEPING_HEAD(gp)->l_next;
2858 while (lock != SLEEPING_HEAD(gp)) {
2859 nlock = lock->l_next;
2860 if (lock->l_vnode->v_vfsp == vfsp) {
2861 ASSERT(IS_PXFS(lock));
2862 if (GETPXFSID(lock->l_flock.l_sysid) ==
2864 flk_set_state(lock,
2866 flk_cancel_sleeping_lock(lock, 1);
2869 lock = nlock;
2873 lock = ACTIVE_HEAD(gp)->l_next;
2874 while (lock != ACTIVE_HEAD(gp)) {
2875 nlock = lock->l_next;
2876 if (lock->l_vnode->v_vfsp == vfsp) {
2877 ASSERT(IS_PXFS(lock));
2878 if (GETPXFSID(lock->l_flock.l_sysid) ==
2880 flk_delete_active_lock(lock, 0);
2881 flk_wakeup(lock, 1);
2882 flk_free_lock(lock);
2885 lock = nlock;
2892 * Search for a sleeping lock manager lock which matches exactly this lock
2895 * Return 1 if a matching lock was found, 0 otherwise.
2901 lock_descriptor_t *lock, *nlock;
2907 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2909 if (lock) {
2910 while (lock->l_vnode == vp) {
2911 nlock = lock->l_next;
2912 if (SAME_OWNER(lock, request) &&
2913 lock->l_start == request->l_start &&
2914 lock->l_end == request->l_end) {
2915 INTERRUPT_WAKEUP(lock);
2918 lock = nlock;
2934 lock_descriptor_t *lock, *nlock;
2948 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2950 if (lock) {
2952 nlock = lock->l_next;
2953 if ((lock->l_flock.l_pid == pid ||
2955 lock->l_flock.l_sysid == sysid) {
2956 CANCEL_WAKEUP(lock);
2958 lock = nlock;
2959 } while (lock->l_vnode == vp);
2962 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2964 if (lock) {
2966 nlock = lock->l_next;
2967 if ((lock->l_flock.l_pid == pid ||
2969 lock->l_flock.l_sysid == sysid) {
2970 flk_delete_active_lock(lock, 0);
2971 STACK_PUSH(link_stack, lock, l_stack);
2973 lock = nlock;
2974 } while (lock->l_vnode == vp);
2977 while ((lock = STACK_TOP(link_stack)) != NULL) {
2979 flk_wakeup(lock, 1);
2980 flk_free_lock(lock);
3074 * Function checks for deadlock due to the new 'lock'. If deadlock found
3075 * edges of this lock are freed and returned.
3079 flk_check_deadlock(lock_descriptor_t *lock)
3092 if (lock->l_ofd != NULL)
3098 start_vertex = flk_get_proc_vertex(lock);
3103 ep = FIRST_ADJ(lock);
3104 while (ep != HEAD(lock)) {
3126 ep = FIRST_IN(lock);
3128 while (ep != HEAD(lock)) {
3185 /* we remove all lock edges and proc edges */
3187 ep = FIRST_ADJ(lock);
3188 while (ep != HEAD(lock)) {
3215 ep = FIRST_IN(lock);
3216 while (ep != HEAD(lock)) {
3249 * Get a proc vertex. If lock's pvertex value gets a correct proc vertex
3255 flk_get_proc_vertex(lock_descriptor_t *lock)
3262 if (lock->pvertex != -1) {
3263 ASSERT(lock->pvertex >= 0);
3264 pv = pgraph.proc[lock->pvertex];
3265 if (pv != NULL && PROC_SAME_OWNER(lock, pv)) {
3271 if (pv != NULL && PROC_SAME_OWNER(lock, pv)) {
3272 lock->pvertex = pv->index = i;
3277 pv->pid = lock->l_flock.l_pid;
3278 pv->sysid = lock->l_flock.l_sysid;
3284 lock->pvertex = pv->index = i;
3302 pv->index = lock->pvertex = pgraph.gcount;
3374 * recomputed lock graph. Otherwise we might miss a deadlock detection.
3376 * dependencies() otherwise if a process tries to lock a vnode hashed
3451 * Set the control status for lock manager requests.
3466 * before sleeping (so they're not holding the lock for the graph). If
3467 * such a thread reacquires the graph's lock (to go to sleep) after
3524 * with an LLM that doesn't already know about it (never sent a lock
3526 * lock request. Suppose that a shutdown request from the NLM server
3528 * service the request. Now suppose a new lock request is in
3533 * having done nothing, and the lock request will proceed and
3535 * by the lock request because there was no record of that NLM server
3538 * been discarded, but in fact there's still one lock held.
3540 * its state immediately to NLM_SHUTTING_DOWN. The lock request in
3542 * this lock and discard it.
3559 * have their lock requests cancelled and descriptors
3560 * removed from the sleeping lock list. Note that the NLM
3561 * server state associated with each lock descriptor is
3581 * Set the control status for lock manager requests.
3586 * before sleeping (so they're not holding the lock for the graph). If
3587 * such a thread reacquires the graph's lock (to go to sleep) after
3609 * If the lock manager is coming back up, all that's needed is to
3610 * propagate this information to the graphs. If the lock manager
3648 * describing the lock is returned. Each element in the list is
3655 * the current lock information, and that it is a snapshot of a moving
3663 lock_descriptor_t *lock;
3705 for (lock = graph_head->l_next;
3706 lock != graph_head;
3707 lock = lock->l_next) {
3708 if (use_sysid && lock->l_flock.l_sysid != sysid)
3710 if (pid != NOPID && lock->l_flock.l_pid != pid)
3712 if (vp != NULL && lock->l_vnode != vp)
3714 if (lock_state && !(lock_state & lock->l_state))
3716 if (zoneid != lock->l_zoneid && zoneid != ALL_ZONES)
3719 * A matching lock was found. Allocate
3725 VN_HOLD(lock->l_vnode);
3726 llp->ll_vp = lock->l_vnode;
3727 create_flock(lock, &(llp->ll_flock));
3832 * b. For each lock descriptor in the list do
3833 * i. If the requested lock is an NLM server request AND
3835 * change the lock descriptor's state field to
3838 * d. For each lock descriptor in the list do
3839 * i. If the requested lock is an NLM server request AND
3841 * change the lock descriptor's state field to
3846 graph_t *gp; /* lock graph */
3847 lock_descriptor_t *lock; /* lock */
3848 lock_descriptor_t *nlock = NULL; /* next lock */
3859 /* Get list of sleeping locks in current lock graph. */
3861 for (lock = SLEEPING_HEAD(gp)->l_next;
3862 lock != SLEEPING_HEAD(gp);
3863 lock = nlock) {
3864 nlock = lock->l_next;
3866 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3869 * If NLM server request AND nlmid of lock matches
3871 * lock to "nlm_state."
3873 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
3874 SET_NLM_STATE(lock, nlm_state);
3878 /* Get list of active locks in current lock graph. */
3879 for (lock = ACTIVE_HEAD(gp)->l_next;
3880 lock != ACTIVE_HEAD(gp);
3881 lock = nlock) {
3882 nlock = lock->l_next;
3884 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3887 * If NLM server request AND nlmid of lock matches
3889 * lock to "nlm_state."
3891 if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) {
3892 ASSERT(IS_ACTIVE(lock));
3893 SET_NLM_STATE(lock, nlm_state);
3902 * Effects: Find all sleeping lock manager requests _only_ for the NLM server
3903 * identified by "nlmid." Poke those lock requests.
3908 lock_descriptor_t *lock;
3909 lock_descriptor_t *nlock = NULL; /* next lock */
3923 for (lock = SLEEPING_HEAD(gp)->l_next;
3924 lock != SLEEPING_HEAD(gp);
3925 lock = nlock) {
3926 nlock = lock->l_next;
3928 * If NLM server request _and_ nlmid of lock matches
3930 * lock to NLM_SHUTTING_DOWN, and wake up sleeping
3933 if (IS_LOCKMGR(lock)) {
3936 GETNLMID(lock->l_flock.l_sysid);
3938 SET_NLM_STATE(lock,
3940 INTERRUPT_WAKEUP(lock);
3950 * Effects: Find all active (granted) lock manager locks _only_ for the
3956 lock_descriptor_t *lock;
3957 lock_descriptor_t *nlock = NULL; /* next lock */
3971 for (lock = ACTIVE_HEAD(gp)->l_next;
3972 lock != ACTIVE_HEAD(gp);
3973 lock = nlock) {
3974 nlock = lock->l_next;
3975 ASSERT(IS_ACTIVE(lock));
3979 * the lock matches nlmid of argument, then
3980 * remove the active lock the list, wakup blocked
3981 * threads, and free the storage for the lock.
3983 * of this lock to NLM_DOWN because the lock will
3986 if (IS_LOCKMGR(lock)) {
3988 lock_nlmid = GETNLMID(lock->l_flock.l_sysid);
3990 flk_delete_active_lock(lock, 0);
3991 flk_wakeup(lock, 1);
3992 flk_free_lock(lock);
4001 * Find all sleeping lock manager requests and poke them.
4006 lock_descriptor_t *lock;
4007 lock_descriptor_t *nlock = NULL; /* next lock */
4022 for (lock = SLEEPING_HEAD(gp)->l_next;
4023 lock != SLEEPING_HEAD(gp);
4024 lock = nlock) {
4025 nlock = lock->l_next;
4026 if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) {
4027 INTERRUPT_WAKEUP(lock);
4036 * Find all active (granted) lock manager locks and release them.
4041 lock_descriptor_t *lock;
4042 lock_descriptor_t *nlock = NULL; /* next lock */
4057 for (lock = ACTIVE_HEAD(gp)->l_next;
4058 lock != ACTIVE_HEAD(gp);
4059 lock = nlock) {
4060 nlock = lock->l_next;
4061 if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) {
4062 ASSERT(IS_ACTIVE(lock));
4063 flk_delete_active_lock(lock, 0);
4064 flk_wakeup(lock, 1);
4065 flk_free_lock(lock);
4074 * Wait until a lock is granted, cancelled, or interrupted.
4094 * Create an flock structure from the existing lock information
4096 * This routine is used to create flock structures for the lock manager
4097 * to use in a reclaim request. Since the lock was originated on this
4118 * Convert flock_t data describing a lock range into unsigned long starting
4171 * Check the validity of lock data. This can used by the NFS
4197 * Fill in request->l_flock with information about the lock blocking the
4198 * request. The complexity here is that lock manager requests are allowed
4202 * What should be done when "blocker" is a lock manager lock that uses the
4281 * lock.
4292 lock_descriptor_t *lock;
4311 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
4313 for (; lock && lock->l_vnode == vp; lock = lock->l_next) {
4314 if ((svmand || (lock->l_state & NBMAND_LOCK)) &&
4315 (lock->l_flock.l_sysid != sysid ||
4316 lock->l_flock.l_pid != pid) &&
4318 lock->l_type, lock->l_start, lock->l_end)) {
4329 * Return non-zero if the given I/O request conflicts with the given lock.
4354 lock_descriptor_t *lock, *lock1;
4357 for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
4358 lock = lock->l_next) {
4359 ASSERT(IS_ACTIVE(lock));
4360 ASSERT(NOT_BLOCKED(lock));
4361 ASSERT(!IS_BARRIER(lock));
4363 ep = FIRST_IN(lock);
4365 while (ep != HEAD(lock)) {
4371 for (lock1 = lock->l_next; lock1 != ACTIVE_HEAD(gp);
4373 if (lock1->l_vnode == lock->l_vnode) {
4374 if (BLOCKS(lock1, lock)) {
4376 "active lock %p blocks %p",
4377 (void *)lock1, (void *)lock);
4378 } else if (BLOCKS(lock, lock1)) {
4380 "active lock %p blocks %p",
4381 (void *)lock, (void *)lock1);
4538 lock_descriptor_t *lock;
4544 SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
4546 if (lock) {
4547 while (lock != ACTIVE_HEAD(gp) && (lock->l_vnode == vp)) {
4548 if (lock->l_flock.l_pid == pid &&
4549 lock->l_flock.l_sysid == sysid)
4551 "owner pid %d's lock %p in active queue",
4552 pid, (void *)lock);
4553 lock = lock->l_next;
4556 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
4558 if (lock) {
4559 while (lock != SLEEPING_HEAD(gp) && (lock->l_vnode == vp)) {
4560 if (lock->l_flock.l_pid == pid &&
4561 lock->l_flock.l_sysid == sysid)
4563 "owner pid %d's lock %p in sleep queue",
4564 pid, (void *)lock);
4565 lock = lock->l_next;