17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*da6c28aaSamw * Common Development and Distribution License (the "License"). 6*da6c28aaSamw * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 227c478bd9Sstevel@tonic-gate 237c478bd9Sstevel@tonic-gate /* 24*da6c28aaSamw * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 257c478bd9Sstevel@tonic-gate * Use is subject to license terms. 267c478bd9Sstevel@tonic-gate */ 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 297c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate #include <sys/flock_impl.h> 347c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 357c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> /* for <sys/callb.h> */ 367c478bd9Sstevel@tonic-gate #include <sys/callb.h> 377c478bd9Sstevel@tonic-gate #include <sys/clconf.h> 387c478bd9Sstevel@tonic-gate #include <sys/cladm.h> 397c478bd9Sstevel@tonic-gate #include <sys/nbmlock.h> 407c478bd9Sstevel@tonic-gate #include <sys/cred.h> 417c478bd9Sstevel@tonic-gate #include <sys/policy.h> 427c478bd9Sstevel@tonic-gate 437c478bd9Sstevel@tonic-gate /* 447c478bd9Sstevel@tonic-gate * The following four variables are for statistics purposes and they are 457c478bd9Sstevel@tonic-gate * not protected by locks. They may not be accurate but will at least be 467c478bd9Sstevel@tonic-gate * close to the actual value. 477c478bd9Sstevel@tonic-gate */ 487c478bd9Sstevel@tonic-gate 497c478bd9Sstevel@tonic-gate int flk_lock_allocs; 507c478bd9Sstevel@tonic-gate int flk_lock_frees; 517c478bd9Sstevel@tonic-gate int edge_allocs; 527c478bd9Sstevel@tonic-gate int edge_frees; 537c478bd9Sstevel@tonic-gate int flk_proc_vertex_allocs; 547c478bd9Sstevel@tonic-gate int flk_proc_edge_allocs; 557c478bd9Sstevel@tonic-gate int flk_proc_vertex_frees; 567c478bd9Sstevel@tonic-gate int flk_proc_edge_frees; 577c478bd9Sstevel@tonic-gate 587c478bd9Sstevel@tonic-gate static kmutex_t flock_lock; 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate #ifdef DEBUG 617c478bd9Sstevel@tonic-gate int check_debug = 0; 627c478bd9Sstevel@tonic-gate #define CHECK_ACTIVE_LOCKS(gp) if (check_debug) \ 637c478bd9Sstevel@tonic-gate check_active_locks(gp); 647c478bd9Sstevel@tonic-gate #define CHECK_SLEEPING_LOCKS(gp) if (check_debug) \ 657c478bd9Sstevel@tonic-gate check_sleeping_locks(gp); 667c478bd9Sstevel@tonic-gate #define CHECK_OWNER_LOCKS(gp, pid, sysid, vp) \ 677c478bd9Sstevel@tonic-gate if (check_debug) \ 687c478bd9Sstevel@tonic-gate check_owner_locks(gp, pid, sysid, vp); 697c478bd9Sstevel@tonic-gate #define CHECK_LOCK_TRANSITION(old_state, new_state) \ 707c478bd9Sstevel@tonic-gate { \ 717c478bd9Sstevel@tonic-gate if (check_lock_transition(old_state, new_state)) { \ 727c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Illegal lock transition \ 737c478bd9Sstevel@tonic-gate from %d to %d", old_state, new_state); \ 747c478bd9Sstevel@tonic-gate } \ 757c478bd9Sstevel@tonic-gate } 767c478bd9Sstevel@tonic-gate #else 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate #define CHECK_ACTIVE_LOCKS(gp) 797c478bd9Sstevel@tonic-gate #define CHECK_SLEEPING_LOCKS(gp) 807c478bd9Sstevel@tonic-gate #define CHECK_OWNER_LOCKS(gp, pid, sysid, vp) 817c478bd9Sstevel@tonic-gate #define CHECK_LOCK_TRANSITION(old_state, new_state) 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate struct kmem_cache *flk_edge_cache; 867c478bd9Sstevel@tonic-gate 877c478bd9Sstevel@tonic-gate graph_t *lock_graph[HASH_SIZE]; 887c478bd9Sstevel@tonic-gate proc_graph_t pgraph; 897c478bd9Sstevel@tonic-gate 907c478bd9Sstevel@tonic-gate /* 917c478bd9Sstevel@tonic-gate * Clustering. 927c478bd9Sstevel@tonic-gate * 937c478bd9Sstevel@tonic-gate * NLM REGISTRY TYPE IMPLEMENTATION 947c478bd9Sstevel@tonic-gate * 957c478bd9Sstevel@tonic-gate * Assumptions: 967c478bd9Sstevel@tonic-gate * 1. Nodes in a cluster are numbered starting at 1; always non-negative 977c478bd9Sstevel@tonic-gate * integers; maximum node id is returned by clconf_maximum_nodeid(). 987c478bd9Sstevel@tonic-gate * 2. We use this node id to identify the node an NLM server runs on. 997c478bd9Sstevel@tonic-gate */ 1007c478bd9Sstevel@tonic-gate 1017c478bd9Sstevel@tonic-gate /* 1027c478bd9Sstevel@tonic-gate * NLM registry object keeps track of NLM servers via their 1037c478bd9Sstevel@tonic-gate * nlmids (which are the node ids of the node in the cluster they run on) 1047c478bd9Sstevel@tonic-gate * that have requested locks at this LLM with which this registry is 1057c478bd9Sstevel@tonic-gate * associated. 1067c478bd9Sstevel@tonic-gate * 1077c478bd9Sstevel@tonic-gate * Representation of abstraction: 1087c478bd9Sstevel@tonic-gate * rep = record[ states: array[nlm_state], 1097c478bd9Sstevel@tonic-gate * lock: mutex] 1107c478bd9Sstevel@tonic-gate * 1117c478bd9Sstevel@tonic-gate * Representation invariants: 1127c478bd9Sstevel@tonic-gate * 1. index i of rep.states is between 0 and n - 1 where n is number 1137c478bd9Sstevel@tonic-gate * of elements in the array, which happen to be the maximum number 1147c478bd9Sstevel@tonic-gate * of nodes in the cluster configuration + 1. 1157c478bd9Sstevel@tonic-gate * 2. map nlmid to index i of rep.states 1167c478bd9Sstevel@tonic-gate * 0 -> 0 1177c478bd9Sstevel@tonic-gate * 1 -> 1 1187c478bd9Sstevel@tonic-gate * 2 -> 2 1197c478bd9Sstevel@tonic-gate * n-1 -> clconf_maximum_nodeid()+1 1207c478bd9Sstevel@tonic-gate * 3. This 1-1 mapping is quite convenient and it avoids errors resulting 1217c478bd9Sstevel@tonic-gate * from forgetting to subtract 1 from the index. 1227c478bd9Sstevel@tonic-gate * 4. The reason we keep the 0th index is the following. A legitimate 1237c478bd9Sstevel@tonic-gate * cluster configuration includes making a UFS file system NFS 1247c478bd9Sstevel@tonic-gate * exportable. The code is structured so that if you're in a cluster 1257c478bd9Sstevel@tonic-gate * you do one thing; otherwise, you do something else. The problem 1267c478bd9Sstevel@tonic-gate * is what to do if you think you're in a cluster with PXFS loaded, 1277c478bd9Sstevel@tonic-gate * but you're using UFS not PXFS? The upper two bytes of the sysid 1287c478bd9Sstevel@tonic-gate * encode the node id of the node where NLM server runs; these bytes 1297c478bd9Sstevel@tonic-gate * are zero for UFS. Since the nodeid is used to index into the 1307c478bd9Sstevel@tonic-gate * registry, we can record the NLM server state information at index 1317c478bd9Sstevel@tonic-gate * 0 using the same mechanism used for PXFS file locks! 1327c478bd9Sstevel@tonic-gate */ 1337c478bd9Sstevel@tonic-gate static flk_nlm_status_t *nlm_reg_status = NULL; /* state array 0..N-1 */ 1347c478bd9Sstevel@tonic-gate static kmutex_t nlm_reg_lock; /* lock to protect arrary */ 1357c478bd9Sstevel@tonic-gate static uint_t nlm_status_size; /* size of state array */ 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate /* 1387c478bd9Sstevel@tonic-gate * Although we need a global lock dependency graph (and associated data 1397c478bd9Sstevel@tonic-gate * structures), we also need a per-zone notion of whether the lock manager is 1407c478bd9Sstevel@tonic-gate * running, and so whether to allow lock manager requests or not. 1417c478bd9Sstevel@tonic-gate * 1427c478bd9Sstevel@tonic-gate * Thus, on a per-zone basis we maintain a ``global'' variable 1437c478bd9Sstevel@tonic-gate * (flk_lockmgr_status), protected by flock_lock, and set when the lock 1447c478bd9Sstevel@tonic-gate * manager is determined to be changing state (starting or stopping). 1457c478bd9Sstevel@tonic-gate * 1467c478bd9Sstevel@tonic-gate * Each graph/zone pair also has a copy of this variable, which is protected by 1477c478bd9Sstevel@tonic-gate * the graph's mutex. 1487c478bd9Sstevel@tonic-gate * 1497c478bd9Sstevel@tonic-gate * The per-graph copies are used to synchronize lock requests with shutdown 1507c478bd9Sstevel@tonic-gate * requests. The global copy is used to initialize the per-graph field when a 1517c478bd9Sstevel@tonic-gate * new graph is created. 1527c478bd9Sstevel@tonic-gate */ 1537c478bd9Sstevel@tonic-gate struct flock_globals { 1547c478bd9Sstevel@tonic-gate flk_lockmgr_status_t flk_lockmgr_status; 1557c478bd9Sstevel@tonic-gate flk_lockmgr_status_t lockmgr_status[HASH_SIZE]; 1567c478bd9Sstevel@tonic-gate }; 1577c478bd9Sstevel@tonic-gate 1587c478bd9Sstevel@tonic-gate zone_key_t flock_zone_key; 1597c478bd9Sstevel@tonic-gate 1607c478bd9Sstevel@tonic-gate static void create_flock(lock_descriptor_t *, flock64_t *); 1617c478bd9Sstevel@tonic-gate static lock_descriptor_t *flk_get_lock(void); 1627c478bd9Sstevel@tonic-gate static void flk_free_lock(lock_descriptor_t *lock); 1637c478bd9Sstevel@tonic-gate static void flk_get_first_blocking_lock(lock_descriptor_t *request); 1647c478bd9Sstevel@tonic-gate static int flk_process_request(lock_descriptor_t *); 1657c478bd9Sstevel@tonic-gate static int flk_add_edge(lock_descriptor_t *, lock_descriptor_t *, int, int); 1667c478bd9Sstevel@tonic-gate static edge_t *flk_get_edge(void); 1677c478bd9Sstevel@tonic-gate static int flk_wait_execute_request(lock_descriptor_t *); 1687c478bd9Sstevel@tonic-gate static int flk_relation(lock_descriptor_t *, lock_descriptor_t *); 1697c478bd9Sstevel@tonic-gate static void flk_insert_active_lock(lock_descriptor_t *); 1707c478bd9Sstevel@tonic-gate static void flk_delete_active_lock(lock_descriptor_t *, int); 1717c478bd9Sstevel@tonic-gate static void flk_insert_sleeping_lock(lock_descriptor_t *); 1727c478bd9Sstevel@tonic-gate static void flk_graph_uncolor(graph_t *); 1737c478bd9Sstevel@tonic-gate static void flk_wakeup(lock_descriptor_t *, int); 1747c478bd9Sstevel@tonic-gate static void flk_free_edge(edge_t *); 1757c478bd9Sstevel@tonic-gate static void flk_recompute_dependencies(lock_descriptor_t *, 1767c478bd9Sstevel@tonic-gate lock_descriptor_t **, int, int); 1777c478bd9Sstevel@tonic-gate static int flk_find_barriers(lock_descriptor_t *); 1787c478bd9Sstevel@tonic-gate static void flk_update_barriers(lock_descriptor_t *); 1797c478bd9Sstevel@tonic-gate static int flk_color_reachables(lock_descriptor_t *); 1807c478bd9Sstevel@tonic-gate static int flk_canceled(lock_descriptor_t *); 1817c478bd9Sstevel@tonic-gate static void flk_delete_locks_by_sysid(lock_descriptor_t *); 1827c478bd9Sstevel@tonic-gate static void report_blocker(lock_descriptor_t *, lock_descriptor_t *); 1837c478bd9Sstevel@tonic-gate static void wait_for_lock(lock_descriptor_t *); 1847c478bd9Sstevel@tonic-gate static void unlock_lockmgr_granted(struct flock_globals *); 1857c478bd9Sstevel@tonic-gate static void wakeup_sleeping_lockmgr_locks(struct flock_globals *); 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate /* Clustering hooks */ 1887c478bd9Sstevel@tonic-gate static void cl_flk_change_nlm_state_all_locks(int, flk_nlm_status_t); 1897c478bd9Sstevel@tonic-gate static void cl_flk_wakeup_sleeping_nlm_locks(int); 1907c478bd9Sstevel@tonic-gate static void cl_flk_unlock_nlm_granted(int); 1917c478bd9Sstevel@tonic-gate 1927c478bd9Sstevel@tonic-gate #ifdef DEBUG 1937c478bd9Sstevel@tonic-gate static int check_lock_transition(int, int); 1947c478bd9Sstevel@tonic-gate static void check_sleeping_locks(graph_t *); 1957c478bd9Sstevel@tonic-gate static void check_active_locks(graph_t *); 1967c478bd9Sstevel@tonic-gate static int no_path(lock_descriptor_t *, lock_descriptor_t *); 1977c478bd9Sstevel@tonic-gate static void path(lock_descriptor_t *, lock_descriptor_t *); 1987c478bd9Sstevel@tonic-gate static void check_owner_locks(graph_t *, pid_t, int, vnode_t *); 1997c478bd9Sstevel@tonic-gate static int level_one_path(lock_descriptor_t *, lock_descriptor_t *); 2007c478bd9Sstevel@tonic-gate static int level_two_path(lock_descriptor_t *, lock_descriptor_t *, int); 2017c478bd9Sstevel@tonic-gate #endif 2027c478bd9Sstevel@tonic-gate 203*da6c28aaSamw /* proc_graph function definitions */ 2047c478bd9Sstevel@tonic-gate static int flk_check_deadlock(lock_descriptor_t *); 2057c478bd9Sstevel@tonic-gate static void flk_proc_graph_uncolor(void); 2067c478bd9Sstevel@tonic-gate static proc_vertex_t *flk_get_proc_vertex(lock_descriptor_t *); 2077c478bd9Sstevel@tonic-gate static proc_edge_t *flk_get_proc_edge(void); 2087c478bd9Sstevel@tonic-gate static void flk_proc_release(proc_vertex_t *); 2097c478bd9Sstevel@tonic-gate static void flk_free_proc_edge(proc_edge_t *); 2107c478bd9Sstevel@tonic-gate static void flk_update_proc_graph(edge_t *, int); 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate /* Non-blocking mandatory locking */ 2137c478bd9Sstevel@tonic-gate static int lock_blocks_io(nbl_op_t, u_offset_t, ssize_t, int, u_offset_t, 2147c478bd9Sstevel@tonic-gate u_offset_t); 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate static struct flock_globals * 2177c478bd9Sstevel@tonic-gate flk_get_globals(void) 2187c478bd9Sstevel@tonic-gate { 2197c478bd9Sstevel@tonic-gate /* 2207c478bd9Sstevel@tonic-gate * The KLM module had better be loaded if we're attempting to handle 2217c478bd9Sstevel@tonic-gate * lockmgr requests. 2227c478bd9Sstevel@tonic-gate */ 2237c478bd9Sstevel@tonic-gate ASSERT(flock_zone_key != ZONE_KEY_UNINITIALIZED); 2247c478bd9Sstevel@tonic-gate return (zone_getspecific(flock_zone_key, curproc->p_zone)); 2257c478bd9Sstevel@tonic-gate } 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate static flk_lockmgr_status_t 2287c478bd9Sstevel@tonic-gate flk_get_lockmgr_status(void) 2297c478bd9Sstevel@tonic-gate { 2307c478bd9Sstevel@tonic-gate struct flock_globals *fg; 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&flock_lock)); 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate if (flock_zone_key == ZONE_KEY_UNINITIALIZED) { 2357c478bd9Sstevel@tonic-gate /* 2367c478bd9Sstevel@tonic-gate * KLM module not loaded; lock manager definitely not running. 2377c478bd9Sstevel@tonic-gate */ 2387c478bd9Sstevel@tonic-gate return (FLK_LOCKMGR_DOWN); 2397c478bd9Sstevel@tonic-gate } 2407c478bd9Sstevel@tonic-gate fg = flk_get_globals(); 2417c478bd9Sstevel@tonic-gate return (fg->flk_lockmgr_status); 2427c478bd9Sstevel@tonic-gate } 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate /* 2457c478bd9Sstevel@tonic-gate * Routine called from fs_frlock in fs/fs_subr.c 2467c478bd9Sstevel@tonic-gate */ 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate int 2497c478bd9Sstevel@tonic-gate reclock(vnode_t *vp, 2507c478bd9Sstevel@tonic-gate flock64_t *lckdat, 2517c478bd9Sstevel@tonic-gate int cmd, 2527c478bd9Sstevel@tonic-gate int flag, 2537c478bd9Sstevel@tonic-gate u_offset_t offset, 2547c478bd9Sstevel@tonic-gate flk_callback_t *flk_cbp) 2557c478bd9Sstevel@tonic-gate { 2567c478bd9Sstevel@tonic-gate lock_descriptor_t stack_lock_request; 2577c478bd9Sstevel@tonic-gate lock_descriptor_t *lock_request; 2587c478bd9Sstevel@tonic-gate int error = 0; 2597c478bd9Sstevel@tonic-gate graph_t *gp; 2607c478bd9Sstevel@tonic-gate int nlmid; 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * Check access permissions 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate if ((cmd & SETFLCK) && 2667c478bd9Sstevel@tonic-gate ((lckdat->l_type == F_RDLCK && (flag & FREAD) == 0) || 2677c478bd9Sstevel@tonic-gate (lckdat->l_type == F_WRLCK && (flag & FWRITE) == 0))) 2687c478bd9Sstevel@tonic-gate return (EBADF); 2697c478bd9Sstevel@tonic-gate 2707c478bd9Sstevel@tonic-gate /* 2717c478bd9Sstevel@tonic-gate * for query and unlock we use the stack_lock_request 2727c478bd9Sstevel@tonic-gate */ 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate if ((lckdat->l_type == F_UNLCK) || 2757c478bd9Sstevel@tonic-gate !((cmd & INOFLCK) || (cmd & SETFLCK))) { 2767c478bd9Sstevel@tonic-gate lock_request = &stack_lock_request; 2777c478bd9Sstevel@tonic-gate (void) bzero((caddr_t)lock_request, 2787c478bd9Sstevel@tonic-gate sizeof (lock_descriptor_t)); 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate /* 2817c478bd9Sstevel@tonic-gate * following is added to make the assertions in 2827c478bd9Sstevel@tonic-gate * flk_execute_request() to pass through 2837c478bd9Sstevel@tonic-gate */ 2847c478bd9Sstevel@tonic-gate 2857c478bd9Sstevel@tonic-gate lock_request->l_edge.edge_in_next = &lock_request->l_edge; 2867c478bd9Sstevel@tonic-gate lock_request->l_edge.edge_in_prev = &lock_request->l_edge; 2877c478bd9Sstevel@tonic-gate lock_request->l_edge.edge_adj_next = &lock_request->l_edge; 2887c478bd9Sstevel@tonic-gate lock_request->l_edge.edge_adj_prev = &lock_request->l_edge; 2897c478bd9Sstevel@tonic-gate lock_request->l_status = FLK_INITIAL_STATE; 2907c478bd9Sstevel@tonic-gate } else { 2917c478bd9Sstevel@tonic-gate lock_request = flk_get_lock(); 2927c478bd9Sstevel@tonic-gate } 2937c478bd9Sstevel@tonic-gate lock_request->l_state = 0; 2947c478bd9Sstevel@tonic-gate lock_request->l_vnode = vp; 2957c478bd9Sstevel@tonic-gate lock_request->l_zoneid = getzoneid(); 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate /* 2987c478bd9Sstevel@tonic-gate * Convert the request range into the canonical start and end 2997c478bd9Sstevel@tonic-gate * values. The NLM protocol supports locking over the entire 3007c478bd9Sstevel@tonic-gate * 32-bit range, so there's no range checking for remote requests, 3017c478bd9Sstevel@tonic-gate * but we still need to verify that local requests obey the rules. 3027c478bd9Sstevel@tonic-gate */ 3037c478bd9Sstevel@tonic-gate /* Clustering */ 3047c478bd9Sstevel@tonic-gate if ((cmd & (RCMDLCK | PCMDLCK)) != 0) { 3057c478bd9Sstevel@tonic-gate ASSERT(lckdat->l_whence == 0); 3067c478bd9Sstevel@tonic-gate lock_request->l_start = lckdat->l_start; 3077c478bd9Sstevel@tonic-gate lock_request->l_end = (lckdat->l_len == 0) ? MAX_U_OFFSET_T : 3087c478bd9Sstevel@tonic-gate lckdat->l_start + (lckdat->l_len - 1); 3097c478bd9Sstevel@tonic-gate } else { 3107c478bd9Sstevel@tonic-gate /* check the validity of the lock range */ 3117c478bd9Sstevel@tonic-gate error = flk_convert_lock_data(vp, lckdat, 3127c478bd9Sstevel@tonic-gate &lock_request->l_start, &lock_request->l_end, 3137c478bd9Sstevel@tonic-gate offset); 3147c478bd9Sstevel@tonic-gate if (error) { 3157c478bd9Sstevel@tonic-gate goto done; 3167c478bd9Sstevel@tonic-gate } 3177c478bd9Sstevel@tonic-gate error = flk_check_lock_data(lock_request->l_start, 3187c478bd9Sstevel@tonic-gate lock_request->l_end, MAXEND); 3197c478bd9Sstevel@tonic-gate if (error) { 3207c478bd9Sstevel@tonic-gate goto done; 3217c478bd9Sstevel@tonic-gate } 3227c478bd9Sstevel@tonic-gate } 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate ASSERT(lock_request->l_end >= lock_request->l_start); 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate lock_request->l_type = lckdat->l_type; 3277c478bd9Sstevel@tonic-gate if (cmd & INOFLCK) 3287c478bd9Sstevel@tonic-gate lock_request->l_state |= IO_LOCK; 3297c478bd9Sstevel@tonic-gate if (cmd & SLPFLCK) 3307c478bd9Sstevel@tonic-gate lock_request->l_state |= WILLING_TO_SLEEP_LOCK; 3317c478bd9Sstevel@tonic-gate if (cmd & RCMDLCK) 3327c478bd9Sstevel@tonic-gate lock_request->l_state |= LOCKMGR_LOCK; 3337c478bd9Sstevel@tonic-gate if (cmd & NBMLCK) 3347c478bd9Sstevel@tonic-gate lock_request->l_state |= NBMAND_LOCK; 3357c478bd9Sstevel@tonic-gate /* 3367c478bd9Sstevel@tonic-gate * Clustering: set flag for PXFS locks 3377c478bd9Sstevel@tonic-gate * We do not _only_ check for the PCMDLCK flag because PXFS locks could 3387c478bd9Sstevel@tonic-gate * also be of type 'RCMDLCK'. 3397c478bd9Sstevel@tonic-gate * We do not _only_ check the GETPXFSID() macro because local PXFS 3407c478bd9Sstevel@tonic-gate * clients use a pxfsid of zero to permit deadlock detection in the LLM. 3417c478bd9Sstevel@tonic-gate */ 3427c478bd9Sstevel@tonic-gate 3437c478bd9Sstevel@tonic-gate if ((cmd & PCMDLCK) || (GETPXFSID(lckdat->l_sysid) != 0)) { 3447c478bd9Sstevel@tonic-gate lock_request->l_state |= PXFS_LOCK; 3457c478bd9Sstevel@tonic-gate } 3467c478bd9Sstevel@tonic-gate if (!((cmd & SETFLCK) || (cmd & INOFLCK))) { 3477c478bd9Sstevel@tonic-gate if (lock_request->l_type == F_RDLCK || 3487c478bd9Sstevel@tonic-gate lock_request->l_type == F_WRLCK) 3497c478bd9Sstevel@tonic-gate lock_request->l_state |= QUERY_LOCK; 3507c478bd9Sstevel@tonic-gate } 3517c478bd9Sstevel@tonic-gate lock_request->l_flock = (*lckdat); 3527c478bd9Sstevel@tonic-gate lock_request->l_callbacks = flk_cbp; 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate /* 3557c478bd9Sstevel@tonic-gate * We are ready for processing the request 3567c478bd9Sstevel@tonic-gate */ 3577c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock_request)) { 3587c478bd9Sstevel@tonic-gate /* 3597c478bd9Sstevel@tonic-gate * If the lock request is an NLM server request .... 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate if (nlm_status_size == 0) { /* not booted as cluster */ 3627c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 3637c478bd9Sstevel@tonic-gate /* 3647c478bd9Sstevel@tonic-gate * Bail out if this is a lock manager request and the 3657c478bd9Sstevel@tonic-gate * lock manager is not supposed to be running. 3667c478bd9Sstevel@tonic-gate */ 3677c478bd9Sstevel@tonic-gate if (flk_get_lockmgr_status() != FLK_LOCKMGR_UP) { 3687c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 3697c478bd9Sstevel@tonic-gate error = ENOLCK; 3707c478bd9Sstevel@tonic-gate goto done; 3717c478bd9Sstevel@tonic-gate } 3727c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 3737c478bd9Sstevel@tonic-gate } else { /* booted as a cluster */ 3747c478bd9Sstevel@tonic-gate nlmid = GETNLMID(lock_request->l_flock.l_sysid); 3757c478bd9Sstevel@tonic-gate ASSERT(nlmid <= nlm_status_size && nlmid >= 0); 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate mutex_enter(&nlm_reg_lock); 3787c478bd9Sstevel@tonic-gate /* 3797c478bd9Sstevel@tonic-gate * If the NLM registry does not know about this 3807c478bd9Sstevel@tonic-gate * NLM server making the request, add its nlmid 3817c478bd9Sstevel@tonic-gate * to the registry. 3827c478bd9Sstevel@tonic-gate */ 3837c478bd9Sstevel@tonic-gate if (FLK_REGISTRY_IS_NLM_UNKNOWN(nlm_reg_status, 3847c478bd9Sstevel@tonic-gate nlmid)) { 3857c478bd9Sstevel@tonic-gate FLK_REGISTRY_ADD_NLMID(nlm_reg_status, nlmid); 3867c478bd9Sstevel@tonic-gate } else if (!FLK_REGISTRY_IS_NLM_UP(nlm_reg_status, 3877c478bd9Sstevel@tonic-gate nlmid)) { 3887c478bd9Sstevel@tonic-gate /* 3897c478bd9Sstevel@tonic-gate * If the NLM server is already known (has made 3907c478bd9Sstevel@tonic-gate * previous lock requests) and its state is 3917c478bd9Sstevel@tonic-gate * not NLM_UP (means that NLM server is 3927c478bd9Sstevel@tonic-gate * shutting down), then bail out with an 3937c478bd9Sstevel@tonic-gate * error to deny the lock request. 3947c478bd9Sstevel@tonic-gate */ 3957c478bd9Sstevel@tonic-gate mutex_exit(&nlm_reg_lock); 3967c478bd9Sstevel@tonic-gate error = ENOLCK; 3977c478bd9Sstevel@tonic-gate goto done; 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate mutex_exit(&nlm_reg_lock); 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate } 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate /* Now get the lock graph for a particular vnode */ 4047c478bd9Sstevel@tonic-gate gp = flk_get_lock_graph(vp, FLK_INIT_GRAPH); 4057c478bd9Sstevel@tonic-gate 4067c478bd9Sstevel@tonic-gate /* 4077c478bd9Sstevel@tonic-gate * We drop rwlock here otherwise this might end up causing a 4087c478bd9Sstevel@tonic-gate * deadlock if this IOLOCK sleeps. (bugid # 1183392). 4097c478bd9Sstevel@tonic-gate */ 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate if (IS_IO_LOCK(lock_request)) { 4127c478bd9Sstevel@tonic-gate VOP_RWUNLOCK(vp, 4137c478bd9Sstevel@tonic-gate (lock_request->l_type == F_RDLCK) ? 4147c478bd9Sstevel@tonic-gate V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL); 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate lock_request->l_state |= REFERENCED_LOCK; 4197c478bd9Sstevel@tonic-gate lock_request->l_graph = gp; 4207c478bd9Sstevel@tonic-gate 4217c478bd9Sstevel@tonic-gate switch (lock_request->l_type) { 4227c478bd9Sstevel@tonic-gate case F_RDLCK: 4237c478bd9Sstevel@tonic-gate case F_WRLCK: 4247c478bd9Sstevel@tonic-gate if (IS_QUERY_LOCK(lock_request)) { 4257c478bd9Sstevel@tonic-gate flk_get_first_blocking_lock(lock_request); 4267c478bd9Sstevel@tonic-gate (*lckdat) = lock_request->l_flock; 4277c478bd9Sstevel@tonic-gate break; 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate /* process the request now */ 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate error = flk_process_request(lock_request); 4337c478bd9Sstevel@tonic-gate break; 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate case F_UNLCK: 4367c478bd9Sstevel@tonic-gate /* unlock request will not block so execute it immediately */ 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock_request) && 4397c478bd9Sstevel@tonic-gate flk_canceled(lock_request)) { 4407c478bd9Sstevel@tonic-gate error = 0; 4417c478bd9Sstevel@tonic-gate } else { 4427c478bd9Sstevel@tonic-gate error = flk_execute_request(lock_request); 4437c478bd9Sstevel@tonic-gate } 4447c478bd9Sstevel@tonic-gate break; 4457c478bd9Sstevel@tonic-gate 4467c478bd9Sstevel@tonic-gate case F_UNLKSYS: 4477c478bd9Sstevel@tonic-gate /* 4487c478bd9Sstevel@tonic-gate * Recovery mechanism to release lock manager locks when 4497c478bd9Sstevel@tonic-gate * NFS client crashes and restart. NFS server will clear 4507c478bd9Sstevel@tonic-gate * old locks and grant new locks. 4517c478bd9Sstevel@tonic-gate */ 4527c478bd9Sstevel@tonic-gate 4537c478bd9Sstevel@tonic-gate if (lock_request->l_flock.l_sysid == 0) { 4547c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 4557c478bd9Sstevel@tonic-gate return (EINVAL); 4567c478bd9Sstevel@tonic-gate } 4577c478bd9Sstevel@tonic-gate if (secpolicy_nfs(CRED()) != 0) { 4587c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 4597c478bd9Sstevel@tonic-gate return (EPERM); 4607c478bd9Sstevel@tonic-gate } 4617c478bd9Sstevel@tonic-gate flk_delete_locks_by_sysid(lock_request); 4627c478bd9Sstevel@tonic-gate lock_request->l_state &= ~REFERENCED_LOCK; 4637c478bd9Sstevel@tonic-gate flk_set_state(lock_request, FLK_DEAD_STATE); 4647c478bd9Sstevel@tonic-gate flk_free_lock(lock_request); 4657c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 4667c478bd9Sstevel@tonic-gate return (0); 4677c478bd9Sstevel@tonic-gate 4687c478bd9Sstevel@tonic-gate default: 4697c478bd9Sstevel@tonic-gate error = EINVAL; 4707c478bd9Sstevel@tonic-gate break; 4717c478bd9Sstevel@tonic-gate } 4727c478bd9Sstevel@tonic-gate 4737c478bd9Sstevel@tonic-gate /* Clustering: For blocked PXFS locks, return */ 4747c478bd9Sstevel@tonic-gate if (error == PXFS_LOCK_BLOCKED) { 4757c478bd9Sstevel@tonic-gate lock_request->l_state &= ~REFERENCED_LOCK; 4767c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 4777c478bd9Sstevel@tonic-gate return (error); 4787c478bd9Sstevel@tonic-gate } 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate /* 4817c478bd9Sstevel@tonic-gate * Now that we have seen the status of locks in the system for 4827c478bd9Sstevel@tonic-gate * this vnode we acquire the rwlock if it is an IO_LOCK. 4837c478bd9Sstevel@tonic-gate */ 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate if (IS_IO_LOCK(lock_request)) { 4867c478bd9Sstevel@tonic-gate (void) VOP_RWLOCK(vp, 4877c478bd9Sstevel@tonic-gate (lock_request->l_type == F_RDLCK) ? 4887c478bd9Sstevel@tonic-gate V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL); 4897c478bd9Sstevel@tonic-gate if (!error) { 4907c478bd9Sstevel@tonic-gate lckdat->l_type = F_UNLCK; 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 4937c478bd9Sstevel@tonic-gate * This wake up is needed otherwise 4947c478bd9Sstevel@tonic-gate * if IO_LOCK has slept the dependents on this 4957c478bd9Sstevel@tonic-gate * will not be woken up at all. (bugid # 1185482). 4967c478bd9Sstevel@tonic-gate */ 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate flk_wakeup(lock_request, 1); 4997c478bd9Sstevel@tonic-gate flk_set_state(lock_request, FLK_DEAD_STATE); 5007c478bd9Sstevel@tonic-gate flk_free_lock(lock_request); 5017c478bd9Sstevel@tonic-gate } 5027c478bd9Sstevel@tonic-gate /* 5037c478bd9Sstevel@tonic-gate * else if error had occurred either flk_process_request() 5047c478bd9Sstevel@tonic-gate * has returned EDEADLK in which case there will be no 5057c478bd9Sstevel@tonic-gate * dependents for this lock or EINTR from flk_wait_execute_ 5067c478bd9Sstevel@tonic-gate * request() in which case flk_cancel_sleeping_lock() 5077c478bd9Sstevel@tonic-gate * would have been done. same is true with EBADF. 5087c478bd9Sstevel@tonic-gate */ 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate if (lock_request == &stack_lock_request) { 5127c478bd9Sstevel@tonic-gate flk_set_state(lock_request, FLK_DEAD_STATE); 5137c478bd9Sstevel@tonic-gate } else { 5147c478bd9Sstevel@tonic-gate lock_request->l_state &= ~REFERENCED_LOCK; 5157c478bd9Sstevel@tonic-gate if ((error != 0) || IS_DELETED(lock_request)) { 5167c478bd9Sstevel@tonic-gate flk_set_state(lock_request, FLK_DEAD_STATE); 5177c478bd9Sstevel@tonic-gate flk_free_lock(lock_request); 5187c478bd9Sstevel@tonic-gate } 5197c478bd9Sstevel@tonic-gate } 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 5227c478bd9Sstevel@tonic-gate return (error); 5237c478bd9Sstevel@tonic-gate 5247c478bd9Sstevel@tonic-gate done: 5257c478bd9Sstevel@tonic-gate flk_set_state(lock_request, FLK_DEAD_STATE); 5267c478bd9Sstevel@tonic-gate if (lock_request != &stack_lock_request) 5277c478bd9Sstevel@tonic-gate flk_free_lock(lock_request); 5287c478bd9Sstevel@tonic-gate return (error); 5297c478bd9Sstevel@tonic-gate } 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate /* 5327c478bd9Sstevel@tonic-gate * Invoke the callbacks in the given list. If before sleeping, invoke in 5337c478bd9Sstevel@tonic-gate * list order. If after sleeping, invoke in reverse order. 5347c478bd9Sstevel@tonic-gate * 5357c478bd9Sstevel@tonic-gate * CPR (suspend/resume) support: if one of the callbacks returns a 5367c478bd9Sstevel@tonic-gate * callb_cpr_t, return it. This will be used to make the thread CPR-safe 5377c478bd9Sstevel@tonic-gate * while it is sleeping. There should be at most one callb_cpr_t for the 5387c478bd9Sstevel@tonic-gate * thread. 5397c478bd9Sstevel@tonic-gate * XXX This is unnecessarily complicated. The CPR information should just 5407c478bd9Sstevel@tonic-gate * get passed in directly through VOP_FRLOCK and reclock, rather than 5417c478bd9Sstevel@tonic-gate * sneaking it in via a callback. 5427c478bd9Sstevel@tonic-gate */ 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate callb_cpr_t * 5457c478bd9Sstevel@tonic-gate flk_invoke_callbacks(flk_callback_t *cblist, flk_cb_when_t when) 5467c478bd9Sstevel@tonic-gate { 5477c478bd9Sstevel@tonic-gate callb_cpr_t *cpr_callbackp = NULL; 5487c478bd9Sstevel@tonic-gate callb_cpr_t *one_result; 5497c478bd9Sstevel@tonic-gate flk_callback_t *cb; 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate if (cblist == NULL) 5527c478bd9Sstevel@tonic-gate return (NULL); 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate if (when == FLK_BEFORE_SLEEP) { 5557c478bd9Sstevel@tonic-gate cb = cblist; 5567c478bd9Sstevel@tonic-gate do { 5577c478bd9Sstevel@tonic-gate one_result = (*cb->cb_callback)(when, cb->cb_data); 5587c478bd9Sstevel@tonic-gate if (one_result != NULL) { 5597c478bd9Sstevel@tonic-gate ASSERT(cpr_callbackp == NULL); 5607c478bd9Sstevel@tonic-gate cpr_callbackp = one_result; 5617c478bd9Sstevel@tonic-gate } 5627c478bd9Sstevel@tonic-gate cb = cb->cb_next; 5637c478bd9Sstevel@tonic-gate } while (cb != cblist); 5647c478bd9Sstevel@tonic-gate } else { 5657c478bd9Sstevel@tonic-gate cb = cblist->cb_prev; 5667c478bd9Sstevel@tonic-gate do { 5677c478bd9Sstevel@tonic-gate one_result = (*cb->cb_callback)(when, cb->cb_data); 5687c478bd9Sstevel@tonic-gate if (one_result != NULL) { 5697c478bd9Sstevel@tonic-gate cpr_callbackp = one_result; 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate cb = cb->cb_prev; 5727c478bd9Sstevel@tonic-gate } while (cb != cblist->cb_prev); 5737c478bd9Sstevel@tonic-gate } 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate return (cpr_callbackp); 5767c478bd9Sstevel@tonic-gate } 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Initialize a flk_callback_t to hold the given callback. 5807c478bd9Sstevel@tonic-gate */ 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate void 5837c478bd9Sstevel@tonic-gate flk_init_callback(flk_callback_t *flk_cb, 5847c478bd9Sstevel@tonic-gate callb_cpr_t *(*cb_fcn)(flk_cb_when_t, void *), void *cbdata) 5857c478bd9Sstevel@tonic-gate { 5867c478bd9Sstevel@tonic-gate flk_cb->cb_next = flk_cb; 5877c478bd9Sstevel@tonic-gate flk_cb->cb_prev = flk_cb; 5887c478bd9Sstevel@tonic-gate flk_cb->cb_callback = cb_fcn; 5897c478bd9Sstevel@tonic-gate flk_cb->cb_data = cbdata; 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate /* 5937c478bd9Sstevel@tonic-gate * Initialize an flk_callback_t and then link it into the head of an 5947c478bd9Sstevel@tonic-gate * existing list (which may be NULL). 5957c478bd9Sstevel@tonic-gate */ 5967c478bd9Sstevel@tonic-gate 5977c478bd9Sstevel@tonic-gate void 5987c478bd9Sstevel@tonic-gate flk_add_callback(flk_callback_t *newcb, 5997c478bd9Sstevel@tonic-gate callb_cpr_t *(*cb_fcn)(flk_cb_when_t, void *), 6007c478bd9Sstevel@tonic-gate void *cbdata, flk_callback_t *cblist) 6017c478bd9Sstevel@tonic-gate { 6027c478bd9Sstevel@tonic-gate flk_init_callback(newcb, cb_fcn, cbdata); 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate if (cblist == NULL) 6057c478bd9Sstevel@tonic-gate return; 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate newcb->cb_prev = cblist->cb_prev; 6087c478bd9Sstevel@tonic-gate newcb->cb_next = cblist; 6097c478bd9Sstevel@tonic-gate cblist->cb_prev->cb_next = newcb; 6107c478bd9Sstevel@tonic-gate cblist->cb_prev = newcb; 6117c478bd9Sstevel@tonic-gate } 6127c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate /* 6157c478bd9Sstevel@tonic-gate * Initialize the flk_edge_cache data structure and create the 6167c478bd9Sstevel@tonic-gate * nlm_reg_status array. 6177c478bd9Sstevel@tonic-gate */ 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate void 6207c478bd9Sstevel@tonic-gate flk_init(void) 6217c478bd9Sstevel@tonic-gate { 6227c478bd9Sstevel@tonic-gate uint_t i; 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate flk_edge_cache = kmem_cache_create("flk_edges", 6257c478bd9Sstevel@tonic-gate sizeof (struct edge), 0, NULL, NULL, NULL, NULL, NULL, 0); 6267c478bd9Sstevel@tonic-gate if (flk_edge_cache == NULL) { 6277c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Couldn't create flk_edge_cache\n"); 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * Create the NLM registry object. 6317c478bd9Sstevel@tonic-gate */ 6327c478bd9Sstevel@tonic-gate 6337c478bd9Sstevel@tonic-gate if (cluster_bootflags & CLUSTER_BOOTED) { 6347c478bd9Sstevel@tonic-gate /* 6357c478bd9Sstevel@tonic-gate * This routine tells you the maximum node id that will be used 6367c478bd9Sstevel@tonic-gate * in the cluster. This number will be the size of the nlm 6377c478bd9Sstevel@tonic-gate * registry status array. We add 1 because we will be using 6387c478bd9Sstevel@tonic-gate * all entries indexed from 0 to maxnodeid; e.g., from 0 6397c478bd9Sstevel@tonic-gate * to 64, for a total of 65 entries. 6407c478bd9Sstevel@tonic-gate */ 6417c478bd9Sstevel@tonic-gate nlm_status_size = clconf_maximum_nodeid() + 1; 6427c478bd9Sstevel@tonic-gate } else { 6437c478bd9Sstevel@tonic-gate nlm_status_size = 0; 6447c478bd9Sstevel@tonic-gate } 6457c478bd9Sstevel@tonic-gate 6467c478bd9Sstevel@tonic-gate if (nlm_status_size != 0) { /* booted as a cluster */ 6477c478bd9Sstevel@tonic-gate nlm_reg_status = (flk_nlm_status_t *) 6487c478bd9Sstevel@tonic-gate kmem_alloc(sizeof (flk_nlm_status_t) * nlm_status_size, 6497c478bd9Sstevel@tonic-gate KM_SLEEP); 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate /* initialize all NLM states in array to NLM_UNKNOWN */ 6527c478bd9Sstevel@tonic-gate for (i = 0; i < nlm_status_size; i++) { 6537c478bd9Sstevel@tonic-gate nlm_reg_status[i] = FLK_NLM_UNKNOWN; 6547c478bd9Sstevel@tonic-gate } 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate } 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate /* 6597c478bd9Sstevel@tonic-gate * Zone constructor/destructor callbacks to be executed when a zone is 6607c478bd9Sstevel@tonic-gate * created/destroyed. 6617c478bd9Sstevel@tonic-gate */ 6627c478bd9Sstevel@tonic-gate /* ARGSUSED */ 6637c478bd9Sstevel@tonic-gate void * 6647c478bd9Sstevel@tonic-gate flk_zone_init(zoneid_t zoneid) 6657c478bd9Sstevel@tonic-gate { 6667c478bd9Sstevel@tonic-gate struct flock_globals *fg; 6677c478bd9Sstevel@tonic-gate uint_t i; 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate fg = kmem_alloc(sizeof (*fg), KM_SLEEP); 6707c478bd9Sstevel@tonic-gate fg->flk_lockmgr_status = FLK_LOCKMGR_UP; 6717c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) 6727c478bd9Sstevel@tonic-gate fg->lockmgr_status[i] = FLK_LOCKMGR_UP; 6737c478bd9Sstevel@tonic-gate return (fg); 6747c478bd9Sstevel@tonic-gate } 6757c478bd9Sstevel@tonic-gate 6767c478bd9Sstevel@tonic-gate /* ARGSUSED */ 6777c478bd9Sstevel@tonic-gate void 6787c478bd9Sstevel@tonic-gate flk_zone_fini(zoneid_t zoneid, void *data) 6797c478bd9Sstevel@tonic-gate { 6807c478bd9Sstevel@tonic-gate struct flock_globals *fg = data; 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate kmem_free(fg, sizeof (*fg)); 6837c478bd9Sstevel@tonic-gate } 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /* 686*da6c28aaSamw * Get a lock_descriptor structure with initialization of edge lists. 6877c478bd9Sstevel@tonic-gate */ 6887c478bd9Sstevel@tonic-gate 6897c478bd9Sstevel@tonic-gate static lock_descriptor_t * 6907c478bd9Sstevel@tonic-gate flk_get_lock(void) 6917c478bd9Sstevel@tonic-gate { 6927c478bd9Sstevel@tonic-gate lock_descriptor_t *l; 6937c478bd9Sstevel@tonic-gate 6947c478bd9Sstevel@tonic-gate l = kmem_zalloc(sizeof (lock_descriptor_t), KM_SLEEP); 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate cv_init(&l->l_cv, NULL, CV_DRIVER, NULL); 6977c478bd9Sstevel@tonic-gate l->l_edge.edge_in_next = &l->l_edge; 6987c478bd9Sstevel@tonic-gate l->l_edge.edge_in_prev = &l->l_edge; 6997c478bd9Sstevel@tonic-gate l->l_edge.edge_adj_next = &l->l_edge; 7007c478bd9Sstevel@tonic-gate l->l_edge.edge_adj_prev = &l->l_edge; 7017c478bd9Sstevel@tonic-gate l->pvertex = -1; 7027c478bd9Sstevel@tonic-gate l->l_status = FLK_INITIAL_STATE; 7037c478bd9Sstevel@tonic-gate flk_lock_allocs++; 7047c478bd9Sstevel@tonic-gate return (l); 7057c478bd9Sstevel@tonic-gate } 7067c478bd9Sstevel@tonic-gate 7077c478bd9Sstevel@tonic-gate /* 7087c478bd9Sstevel@tonic-gate * Free a lock_descriptor structure. Just sets the DELETED_LOCK flag 7097c478bd9Sstevel@tonic-gate * when some thread has a reference to it as in reclock(). 7107c478bd9Sstevel@tonic-gate */ 7117c478bd9Sstevel@tonic-gate 7127c478bd9Sstevel@tonic-gate void 7137c478bd9Sstevel@tonic-gate flk_free_lock(lock_descriptor_t *lock) 7147c478bd9Sstevel@tonic-gate { 7157c478bd9Sstevel@tonic-gate ASSERT(IS_DEAD(lock)); 7167c478bd9Sstevel@tonic-gate if (IS_REFERENCED(lock)) { 7177c478bd9Sstevel@tonic-gate lock->l_state |= DELETED_LOCK; 7187c478bd9Sstevel@tonic-gate return; 7197c478bd9Sstevel@tonic-gate } 7207c478bd9Sstevel@tonic-gate flk_lock_frees++; 7217c478bd9Sstevel@tonic-gate kmem_free((void *)lock, sizeof (lock_descriptor_t)); 7227c478bd9Sstevel@tonic-gate } 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate void 7257c478bd9Sstevel@tonic-gate flk_set_state(lock_descriptor_t *lock, int new_state) 7267c478bd9Sstevel@tonic-gate { 7277c478bd9Sstevel@tonic-gate /* 7287c478bd9Sstevel@tonic-gate * Locks in the sleeping list may be woken up in a number of ways, 729*da6c28aaSamw * and more than once. If a sleeping lock is signaled awake more 7307c478bd9Sstevel@tonic-gate * than once, then it may or may not change state depending on its 7317c478bd9Sstevel@tonic-gate * current state. 7327c478bd9Sstevel@tonic-gate * Also note that NLM locks that are sleeping could be moved to an 7337c478bd9Sstevel@tonic-gate * interrupted state more than once if the unlock request is 7347c478bd9Sstevel@tonic-gate * retransmitted by the NLM client - the second time around, this is 7357c478bd9Sstevel@tonic-gate * just a nop. 736*da6c28aaSamw * The ordering of being signaled awake is: 7377c478bd9Sstevel@tonic-gate * INTERRUPTED_STATE > CANCELLED_STATE > GRANTED_STATE. 7387c478bd9Sstevel@tonic-gate * The checks below implement this ordering. 7397c478bd9Sstevel@tonic-gate */ 7407c478bd9Sstevel@tonic-gate if (IS_INTERRUPTED(lock)) { 7417c478bd9Sstevel@tonic-gate if ((new_state == FLK_CANCELLED_STATE) || 7427c478bd9Sstevel@tonic-gate (new_state == FLK_GRANTED_STATE) || 7437c478bd9Sstevel@tonic-gate (new_state == FLK_INTERRUPTED_STATE)) { 7447c478bd9Sstevel@tonic-gate return; 7457c478bd9Sstevel@tonic-gate } 7467c478bd9Sstevel@tonic-gate } 7477c478bd9Sstevel@tonic-gate if (IS_CANCELLED(lock)) { 7487c478bd9Sstevel@tonic-gate if ((new_state == FLK_GRANTED_STATE) || 7497c478bd9Sstevel@tonic-gate (new_state == FLK_CANCELLED_STATE)) { 7507c478bd9Sstevel@tonic-gate return; 7517c478bd9Sstevel@tonic-gate } 7527c478bd9Sstevel@tonic-gate } 7537c478bd9Sstevel@tonic-gate CHECK_LOCK_TRANSITION(lock->l_status, new_state); 7547c478bd9Sstevel@tonic-gate if (IS_PXFS(lock)) { 7557c478bd9Sstevel@tonic-gate cl_flk_state_transition_notify(lock, lock->l_status, new_state); 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate lock->l_status = new_state; 7587c478bd9Sstevel@tonic-gate } 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate /* 7617c478bd9Sstevel@tonic-gate * Routine that checks whether there are any blocking locks in the system. 7627c478bd9Sstevel@tonic-gate * 7637c478bd9Sstevel@tonic-gate * The policy followed is if a write lock is sleeping we don't allow read 7647c478bd9Sstevel@tonic-gate * locks before this write lock even though there may not be any active 7657c478bd9Sstevel@tonic-gate * locks corresponding to the read locks' region. 7667c478bd9Sstevel@tonic-gate * 7677c478bd9Sstevel@tonic-gate * flk_add_edge() function adds an edge between l1 and l2 iff there 7687c478bd9Sstevel@tonic-gate * is no path between l1 and l2. This is done to have a "minimum 7697c478bd9Sstevel@tonic-gate * storage representation" of the dependency graph. 7707c478bd9Sstevel@tonic-gate * 7717c478bd9Sstevel@tonic-gate * Another property of the graph is since only the new request throws 7727c478bd9Sstevel@tonic-gate * edges to the existing locks in the graph, the graph is always topologically 7737c478bd9Sstevel@tonic-gate * ordered. 7747c478bd9Sstevel@tonic-gate */ 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate static int 7777c478bd9Sstevel@tonic-gate flk_process_request(lock_descriptor_t *request) 7787c478bd9Sstevel@tonic-gate { 7797c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 7807c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 7817c478bd9Sstevel@tonic-gate int request_blocked_by_active = 0; 7827c478bd9Sstevel@tonic-gate int request_blocked_by_granted = 0; 7837c478bd9Sstevel@tonic-gate int request_blocked_by_sleeping = 0; 7847c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 7857c478bd9Sstevel@tonic-gate int error = 0; 7867c478bd9Sstevel@tonic-gate int request_will_wait = 0; 7877c478bd9Sstevel@tonic-gate int found_covering_lock = 0; 7887c478bd9Sstevel@tonic-gate lock_descriptor_t *covered_by = NULL; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 7917c478bd9Sstevel@tonic-gate request_will_wait = IS_WILLING_TO_SLEEP(request); 7927c478bd9Sstevel@tonic-gate 7937c478bd9Sstevel@tonic-gate /* 7947c478bd9Sstevel@tonic-gate * check active locks 7957c478bd9Sstevel@tonic-gate */ 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate 8007c478bd9Sstevel@tonic-gate if (lock) { 8017c478bd9Sstevel@tonic-gate do { 8027c478bd9Sstevel@tonic-gate if (BLOCKS(lock, request)) { 8037c478bd9Sstevel@tonic-gate if (!request_will_wait) 8047c478bd9Sstevel@tonic-gate return (EAGAIN); 8057c478bd9Sstevel@tonic-gate request_blocked_by_active = 1; 8067c478bd9Sstevel@tonic-gate break; 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate /* 8097c478bd9Sstevel@tonic-gate * Grant lock if it is for the same owner holding active 8107c478bd9Sstevel@tonic-gate * lock that covers the request. 8117c478bd9Sstevel@tonic-gate */ 8127c478bd9Sstevel@tonic-gate 8137c478bd9Sstevel@tonic-gate if (SAME_OWNER(lock, request) && 8147c478bd9Sstevel@tonic-gate COVERS(lock, request) && 8157c478bd9Sstevel@tonic-gate (request->l_type == F_RDLCK)) 8167c478bd9Sstevel@tonic-gate return (flk_execute_request(request)); 8177c478bd9Sstevel@tonic-gate lock = lock->l_next; 8187c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate if (!request_blocked_by_active) { 8227c478bd9Sstevel@tonic-gate lock_descriptor_t *lk[1]; 8237c478bd9Sstevel@tonic-gate lock_descriptor_t *first_glock = NULL; 8247c478bd9Sstevel@tonic-gate /* 8257c478bd9Sstevel@tonic-gate * Shall we grant this?! NO!! 8267c478bd9Sstevel@tonic-gate * What about those locks that were just granted and still 8277c478bd9Sstevel@tonic-gate * in sleep queue. Those threads are woken up and so locks 8287c478bd9Sstevel@tonic-gate * are almost active. 8297c478bd9Sstevel@tonic-gate */ 8307c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 8317c478bd9Sstevel@tonic-gate if (lock) { 8327c478bd9Sstevel@tonic-gate do { 8337c478bd9Sstevel@tonic-gate if (BLOCKS(lock, request)) { 8347c478bd9Sstevel@tonic-gate if (IS_GRANTED(lock)) { 8357c478bd9Sstevel@tonic-gate request_blocked_by_granted = 1; 8367c478bd9Sstevel@tonic-gate } else { 8377c478bd9Sstevel@tonic-gate request_blocked_by_sleeping = 1; 8387c478bd9Sstevel@tonic-gate } 8397c478bd9Sstevel@tonic-gate } 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate lock = lock->l_next; 8427c478bd9Sstevel@tonic-gate } while ((lock->l_vnode == vp)); 8437c478bd9Sstevel@tonic-gate first_glock = lock->l_prev; 8447c478bd9Sstevel@tonic-gate ASSERT(first_glock->l_vnode == vp); 8457c478bd9Sstevel@tonic-gate } 8467c478bd9Sstevel@tonic-gate 8477c478bd9Sstevel@tonic-gate if (request_blocked_by_granted) 8487c478bd9Sstevel@tonic-gate goto block; 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate if (!request_blocked_by_sleeping) { 8517c478bd9Sstevel@tonic-gate /* 8527c478bd9Sstevel@tonic-gate * If the request isn't going to be blocked by a 8537c478bd9Sstevel@tonic-gate * sleeping request, we know that it isn't going to 8547c478bd9Sstevel@tonic-gate * be blocked; we can just execute the request -- 8557c478bd9Sstevel@tonic-gate * without performing costly deadlock detection. 8567c478bd9Sstevel@tonic-gate */ 8577c478bd9Sstevel@tonic-gate ASSERT(!request_blocked_by_active); 8587c478bd9Sstevel@tonic-gate return (flk_execute_request(request)); 8597c478bd9Sstevel@tonic-gate } else if (request->l_type == F_RDLCK) { 8607c478bd9Sstevel@tonic-gate /* 8617c478bd9Sstevel@tonic-gate * If we have a sleeping writer in the requested 8627c478bd9Sstevel@tonic-gate * lock's range, block. 8637c478bd9Sstevel@tonic-gate */ 8647c478bd9Sstevel@tonic-gate goto block; 8657c478bd9Sstevel@tonic-gate } 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate lk[0] = request; 8687c478bd9Sstevel@tonic-gate request->l_state |= RECOMPUTE_LOCK; 8697c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 8707c478bd9Sstevel@tonic-gate if (lock) { 8717c478bd9Sstevel@tonic-gate do { 8727c478bd9Sstevel@tonic-gate flk_recompute_dependencies(lock, lk, 1, 0); 8737c478bd9Sstevel@tonic-gate lock = lock->l_next; 8747c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 8757c478bd9Sstevel@tonic-gate } 8767c478bd9Sstevel@tonic-gate lock = first_glock; 8777c478bd9Sstevel@tonic-gate if (lock) { 8787c478bd9Sstevel@tonic-gate do { 8797c478bd9Sstevel@tonic-gate if (IS_GRANTED(lock)) { 8807c478bd9Sstevel@tonic-gate flk_recompute_dependencies(lock, lk, 1, 0); 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate lock = lock->l_prev; 8837c478bd9Sstevel@tonic-gate } while ((lock->l_vnode == vp)); 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate request->l_state &= ~RECOMPUTE_LOCK; 8867c478bd9Sstevel@tonic-gate if (!NO_DEPENDENTS(request) && flk_check_deadlock(request)) 8877c478bd9Sstevel@tonic-gate return (EDEADLK); 8887c478bd9Sstevel@tonic-gate return (flk_execute_request(request)); 8897c478bd9Sstevel@tonic-gate } 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate block: 8927c478bd9Sstevel@tonic-gate if (request_will_wait) 8937c478bd9Sstevel@tonic-gate flk_graph_uncolor(gp); 8947c478bd9Sstevel@tonic-gate 8957c478bd9Sstevel@tonic-gate /* check sleeping locks */ 8967c478bd9Sstevel@tonic-gate 8977c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 8987c478bd9Sstevel@tonic-gate 8997c478bd9Sstevel@tonic-gate /* 9007c478bd9Sstevel@tonic-gate * If we find a sleeping write lock that is a superset of the 9017c478bd9Sstevel@tonic-gate * region wanted by request we can be assured that by adding an 9027c478bd9Sstevel@tonic-gate * edge to this write lock we have paths to all locks in the 9037c478bd9Sstevel@tonic-gate * graph that blocks the request except in one case and that is why 9047c478bd9Sstevel@tonic-gate * another check for SAME_OWNER in the loop below. The exception 9057c478bd9Sstevel@tonic-gate * case is when this process that owns the sleeping write lock 'l1' 9067c478bd9Sstevel@tonic-gate * has other locks l2, l3, l4 that are in the system and arrived 9077c478bd9Sstevel@tonic-gate * before l1. l1 does not have path to these locks as they are from 9087c478bd9Sstevel@tonic-gate * same process. We break when we find a second covering sleeping 9097c478bd9Sstevel@tonic-gate * lock l5 owned by a process different from that owning l1, because 9107c478bd9Sstevel@tonic-gate * there cannot be any of l2, l3, l4, etc., arrived before l5, and if 9117c478bd9Sstevel@tonic-gate * it has l1 would have produced a deadlock already. 9127c478bd9Sstevel@tonic-gate */ 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate if (lock) { 9157c478bd9Sstevel@tonic-gate do { 9167c478bd9Sstevel@tonic-gate if (BLOCKS(lock, request)) { 9177c478bd9Sstevel@tonic-gate if (!request_will_wait) 9187c478bd9Sstevel@tonic-gate return (EAGAIN); 9197c478bd9Sstevel@tonic-gate if (COVERS(lock, request) && 9207c478bd9Sstevel@tonic-gate lock->l_type == F_WRLCK) { 9217c478bd9Sstevel@tonic-gate if (found_covering_lock && 9227c478bd9Sstevel@tonic-gate !SAME_OWNER(lock, covered_by)) { 9237c478bd9Sstevel@tonic-gate found_covering_lock++; 9247c478bd9Sstevel@tonic-gate break; 9257c478bd9Sstevel@tonic-gate } 9267c478bd9Sstevel@tonic-gate found_covering_lock = 1; 9277c478bd9Sstevel@tonic-gate covered_by = lock; 9287c478bd9Sstevel@tonic-gate } 9297c478bd9Sstevel@tonic-gate if (found_covering_lock && 9307c478bd9Sstevel@tonic-gate !SAME_OWNER(lock, covered_by)) { 9317c478bd9Sstevel@tonic-gate lock = lock->l_next; 9327c478bd9Sstevel@tonic-gate continue; 9337c478bd9Sstevel@tonic-gate } 9347c478bd9Sstevel@tonic-gate if ((error = flk_add_edge(request, lock, 9357c478bd9Sstevel@tonic-gate !found_covering_lock, 0))) 9367c478bd9Sstevel@tonic-gate return (error); 9377c478bd9Sstevel@tonic-gate } 9387c478bd9Sstevel@tonic-gate lock = lock->l_next; 9397c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 9407c478bd9Sstevel@tonic-gate } 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate /* 9437c478bd9Sstevel@tonic-gate * found_covering_lock == 2 iff at this point 'request' has paths 9447c478bd9Sstevel@tonic-gate * to all locks that blocks 'request'. found_covering_lock == 1 iff at this 9457c478bd9Sstevel@tonic-gate * point 'request' has paths to all locks that blocks 'request' whose owners 9467c478bd9Sstevel@tonic-gate * are not same as the one that covers 'request' (covered_by above) and 9477c478bd9Sstevel@tonic-gate * we can have locks whose owner is same as covered_by in the active list. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate if (request_blocked_by_active && found_covering_lock != 2) { 9517c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 9527c478bd9Sstevel@tonic-gate ASSERT(lock != NULL); 9537c478bd9Sstevel@tonic-gate do { 9547c478bd9Sstevel@tonic-gate if (BLOCKS(lock, request)) { 9557c478bd9Sstevel@tonic-gate if (found_covering_lock && 9567c478bd9Sstevel@tonic-gate !SAME_OWNER(lock, covered_by)) { 9577c478bd9Sstevel@tonic-gate lock = lock->l_next; 9587c478bd9Sstevel@tonic-gate continue; 9597c478bd9Sstevel@tonic-gate } 9607c478bd9Sstevel@tonic-gate if ((error = flk_add_edge(request, lock, 9617c478bd9Sstevel@tonic-gate CHECK_CYCLE, 0))) 9627c478bd9Sstevel@tonic-gate return (error); 9637c478bd9Sstevel@tonic-gate } 9647c478bd9Sstevel@tonic-gate lock = lock->l_next; 9657c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 9667c478bd9Sstevel@tonic-gate } 9677c478bd9Sstevel@tonic-gate 9687c478bd9Sstevel@tonic-gate if (NOT_BLOCKED(request)) { 9697c478bd9Sstevel@tonic-gate /* 9707c478bd9Sstevel@tonic-gate * request not dependent on any other locks 9717c478bd9Sstevel@tonic-gate * so execute this request 9727c478bd9Sstevel@tonic-gate */ 9737c478bd9Sstevel@tonic-gate return (flk_execute_request(request)); 9747c478bd9Sstevel@tonic-gate } else { 9757c478bd9Sstevel@tonic-gate /* 9767c478bd9Sstevel@tonic-gate * check for deadlock 9777c478bd9Sstevel@tonic-gate */ 9787c478bd9Sstevel@tonic-gate if (flk_check_deadlock(request)) 9797c478bd9Sstevel@tonic-gate return (EDEADLK); 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * this thread has to sleep 9827c478bd9Sstevel@tonic-gate */ 9837c478bd9Sstevel@tonic-gate return (flk_wait_execute_request(request)); 9847c478bd9Sstevel@tonic-gate } 9857c478bd9Sstevel@tonic-gate } 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 9887c478bd9Sstevel@tonic-gate /* 9897c478bd9Sstevel@tonic-gate * The actual execution of the request in the simple case is only to 9907c478bd9Sstevel@tonic-gate * insert the 'request' in the list of active locks if it is not an 9917c478bd9Sstevel@tonic-gate * UNLOCK. 9927c478bd9Sstevel@tonic-gate * We have to consider the existing active locks' relation to 9937c478bd9Sstevel@tonic-gate * this 'request' if they are owned by same process. flk_relation() does 9947c478bd9Sstevel@tonic-gate * this job and sees to that the dependency graph information is maintained 9957c478bd9Sstevel@tonic-gate * properly. 9967c478bd9Sstevel@tonic-gate */ 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate int 9997c478bd9Sstevel@tonic-gate flk_execute_request(lock_descriptor_t *request) 10007c478bd9Sstevel@tonic-gate { 10017c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 10027c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 10037c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *lock1; 10047c478bd9Sstevel@tonic-gate int done_searching = 0; 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 10077c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 10087c478bd9Sstevel@tonic-gate 10097c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 10107c478bd9Sstevel@tonic-gate 10117c478bd9Sstevel@tonic-gate flk_set_state(request, FLK_START_STATE); 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate ASSERT(NOT_BLOCKED(request)); 10147c478bd9Sstevel@tonic-gate 10157c478bd9Sstevel@tonic-gate /* IO_LOCK requests are only to check status */ 10167c478bd9Sstevel@tonic-gate 10177c478bd9Sstevel@tonic-gate if (IS_IO_LOCK(request)) 10187c478bd9Sstevel@tonic-gate return (0); 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 10217c478bd9Sstevel@tonic-gate 10227c478bd9Sstevel@tonic-gate if (lock == NULL && request->l_type == F_UNLCK) 10237c478bd9Sstevel@tonic-gate return (0); 10247c478bd9Sstevel@tonic-gate if (lock == NULL) { 10257c478bd9Sstevel@tonic-gate flk_insert_active_lock(request); 10267c478bd9Sstevel@tonic-gate return (0); 10277c478bd9Sstevel@tonic-gate } 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate do { 10307c478bd9Sstevel@tonic-gate lock1 = lock->l_next; 10317c478bd9Sstevel@tonic-gate if (SAME_OWNER(request, lock)) { 10327c478bd9Sstevel@tonic-gate done_searching = flk_relation(lock, request); 10337c478bd9Sstevel@tonic-gate } 10347c478bd9Sstevel@tonic-gate lock = lock1; 10357c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp && !done_searching); 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate /* 10387c478bd9Sstevel@tonic-gate * insert in active queue 10397c478bd9Sstevel@tonic-gate */ 10407c478bd9Sstevel@tonic-gate 10417c478bd9Sstevel@tonic-gate if (request->l_type != F_UNLCK) 10427c478bd9Sstevel@tonic-gate flk_insert_active_lock(request); 10437c478bd9Sstevel@tonic-gate 10447c478bd9Sstevel@tonic-gate return (0); 10457c478bd9Sstevel@tonic-gate } 10467c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate /* 10497c478bd9Sstevel@tonic-gate * 'request' is blocked by some one therefore we put it into sleep queue. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate static int 10527c478bd9Sstevel@tonic-gate flk_wait_execute_request(lock_descriptor_t *request) 10537c478bd9Sstevel@tonic-gate { 10547c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 10557c478bd9Sstevel@tonic-gate callb_cpr_t *cprp; /* CPR info from callback */ 10567c478bd9Sstevel@tonic-gate struct flock_globals *fg; 10577c478bd9Sstevel@tonic-gate int index; 10587c478bd9Sstevel@tonic-gate 10597c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 10607c478bd9Sstevel@tonic-gate ASSERT(IS_WILLING_TO_SLEEP(request)); 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate flk_insert_sleeping_lock(request); 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(request)) { 10657c478bd9Sstevel@tonic-gate index = HASH_INDEX(request->l_vnode); 10667c478bd9Sstevel@tonic-gate fg = flk_get_globals(); 10677c478bd9Sstevel@tonic-gate 10687c478bd9Sstevel@tonic-gate if (nlm_status_size == 0) { /* not booted as a cluster */ 10697c478bd9Sstevel@tonic-gate if (fg->lockmgr_status[index] != FLK_LOCKMGR_UP) { 10707c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(request, 1); 10717c478bd9Sstevel@tonic-gate return (ENOLCK); 10727c478bd9Sstevel@tonic-gate } 10737c478bd9Sstevel@tonic-gate } else { /* booted as a cluster */ 10747c478bd9Sstevel@tonic-gate /* 10757c478bd9Sstevel@tonic-gate * If the request is an NLM server lock request, 10767c478bd9Sstevel@tonic-gate * and the NLM state of the lock request is not 10777c478bd9Sstevel@tonic-gate * NLM_UP (because the NLM server is shutting 10787c478bd9Sstevel@tonic-gate * down), then cancel the sleeping lock and 10797c478bd9Sstevel@tonic-gate * return error ENOLCK that will encourage the 10807c478bd9Sstevel@tonic-gate * client to retransmit. 10817c478bd9Sstevel@tonic-gate */ 10827c478bd9Sstevel@tonic-gate if (!IS_NLM_UP(request)) { 10837c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(request, 1); 10847c478bd9Sstevel@tonic-gate return (ENOLCK); 10857c478bd9Sstevel@tonic-gate } 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate /* Clustering: For blocking PXFS locks, return */ 10907c478bd9Sstevel@tonic-gate if (IS_PXFS(request)) { 10917c478bd9Sstevel@tonic-gate /* 10927c478bd9Sstevel@tonic-gate * PXFS locks sleep on the client side. 10937c478bd9Sstevel@tonic-gate * The callback argument is used to wake up the sleeper 10947c478bd9Sstevel@tonic-gate * when the lock is granted. 10957c478bd9Sstevel@tonic-gate * We return -1 (rather than an errno value) to indicate 10967c478bd9Sstevel@tonic-gate * the client side should sleep 10977c478bd9Sstevel@tonic-gate */ 10987c478bd9Sstevel@tonic-gate return (PXFS_LOCK_BLOCKED); 10997c478bd9Sstevel@tonic-gate } 11007c478bd9Sstevel@tonic-gate 11017c478bd9Sstevel@tonic-gate if (request->l_callbacks != NULL) { 11027c478bd9Sstevel@tonic-gate /* 11037c478bd9Sstevel@tonic-gate * To make sure the shutdown code works correctly, either 11047c478bd9Sstevel@tonic-gate * the callback must happen after putting the lock on the 11057c478bd9Sstevel@tonic-gate * sleep list, or we must check the shutdown status after 11067c478bd9Sstevel@tonic-gate * returning from the callback (and before sleeping). At 11077c478bd9Sstevel@tonic-gate * least for now, we'll use the first option. If a 11087c478bd9Sstevel@tonic-gate * shutdown or signal or whatever happened while the graph 11097c478bd9Sstevel@tonic-gate * mutex was dropped, that will be detected by 11107c478bd9Sstevel@tonic-gate * wait_for_lock(). 11117c478bd9Sstevel@tonic-gate */ 11127c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 11137c478bd9Sstevel@tonic-gate 11147c478bd9Sstevel@tonic-gate cprp = flk_invoke_callbacks(request->l_callbacks, 11157c478bd9Sstevel@tonic-gate FLK_BEFORE_SLEEP); 11167c478bd9Sstevel@tonic-gate 11177c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 11187c478bd9Sstevel@tonic-gate 11197c478bd9Sstevel@tonic-gate if (cprp == NULL) { 11207c478bd9Sstevel@tonic-gate wait_for_lock(request); 11217c478bd9Sstevel@tonic-gate } else { 11227c478bd9Sstevel@tonic-gate mutex_enter(cprp->cc_lockp); 11237c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(cprp); 11247c478bd9Sstevel@tonic-gate mutex_exit(cprp->cc_lockp); 11257c478bd9Sstevel@tonic-gate wait_for_lock(request); 11267c478bd9Sstevel@tonic-gate mutex_enter(cprp->cc_lockp); 11277c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(cprp, cprp->cc_lockp); 11287c478bd9Sstevel@tonic-gate mutex_exit(cprp->cc_lockp); 11297c478bd9Sstevel@tonic-gate } 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 11327c478bd9Sstevel@tonic-gate (void) flk_invoke_callbacks(request->l_callbacks, 11337c478bd9Sstevel@tonic-gate FLK_AFTER_SLEEP); 11347c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 11357c478bd9Sstevel@tonic-gate } else { 11367c478bd9Sstevel@tonic-gate wait_for_lock(request); 11377c478bd9Sstevel@tonic-gate } 11387c478bd9Sstevel@tonic-gate 11397c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(request)) { 11407c478bd9Sstevel@tonic-gate /* 11417c478bd9Sstevel@tonic-gate * If the lock manager is shutting down, return an 11427c478bd9Sstevel@tonic-gate * error that will encourage the client to retransmit. 11437c478bd9Sstevel@tonic-gate */ 11447c478bd9Sstevel@tonic-gate if (fg->lockmgr_status[index] != FLK_LOCKMGR_UP && 11457c478bd9Sstevel@tonic-gate !IS_GRANTED(request)) { 11467c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(request, 1); 11477c478bd9Sstevel@tonic-gate return (ENOLCK); 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate if (IS_INTERRUPTED(request)) { 11527c478bd9Sstevel@tonic-gate /* we got a signal, or act like we did */ 11537c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(request, 1); 11547c478bd9Sstevel@tonic-gate return (EINTR); 11557c478bd9Sstevel@tonic-gate } 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate /* Cancelled if some other thread has closed the file */ 11587c478bd9Sstevel@tonic-gate 11597c478bd9Sstevel@tonic-gate if (IS_CANCELLED(request)) { 11607c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(request, 1); 11617c478bd9Sstevel@tonic-gate return (EBADF); 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate request->l_state &= ~GRANTED_LOCK; 11657c478bd9Sstevel@tonic-gate REMOVE_SLEEP_QUEUE(request); 11667c478bd9Sstevel@tonic-gate return (flk_execute_request(request)); 11677c478bd9Sstevel@tonic-gate } 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate /* 11707c478bd9Sstevel@tonic-gate * This routine adds an edge between from and to because from depends 11717c478bd9Sstevel@tonic-gate * to. If asked to check for deadlock it checks whether there are any 11727c478bd9Sstevel@tonic-gate * reachable locks from "from_lock" that is owned by the same process 11737c478bd9Sstevel@tonic-gate * as "from_lock". 11747c478bd9Sstevel@tonic-gate * NOTE: It is the caller's responsibility to make sure that the color 11757c478bd9Sstevel@tonic-gate * of the graph is consistent between the calls to flk_add_edge as done 11767c478bd9Sstevel@tonic-gate * in flk_process_request. This routine does not color and check for 11777c478bd9Sstevel@tonic-gate * deadlock explicitly. 11787c478bd9Sstevel@tonic-gate */ 11797c478bd9Sstevel@tonic-gate 11807c478bd9Sstevel@tonic-gate static int 11817c478bd9Sstevel@tonic-gate flk_add_edge(lock_descriptor_t *from_lock, lock_descriptor_t *to_lock, 11827c478bd9Sstevel@tonic-gate int check_cycle, int update_graph) 11837c478bd9Sstevel@tonic-gate { 11847c478bd9Sstevel@tonic-gate edge_t *edge; 11857c478bd9Sstevel@tonic-gate edge_t *ep; 11867c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex; 11877c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate /* 11927c478bd9Sstevel@tonic-gate * if to vertex already has mark_color just return 11937c478bd9Sstevel@tonic-gate * don't add an edge as it is reachable from from vertex 11947c478bd9Sstevel@tonic-gate * before itself. 11957c478bd9Sstevel@tonic-gate */ 11967c478bd9Sstevel@tonic-gate 11977c478bd9Sstevel@tonic-gate if (COLORED(to_lock)) 11987c478bd9Sstevel@tonic-gate return (0); 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate edge = flk_get_edge(); 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate /* 12037c478bd9Sstevel@tonic-gate * set the from and to vertex 12047c478bd9Sstevel@tonic-gate */ 12057c478bd9Sstevel@tonic-gate 12067c478bd9Sstevel@tonic-gate edge->from_vertex = from_lock; 12077c478bd9Sstevel@tonic-gate edge->to_vertex = to_lock; 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate /* 12107c478bd9Sstevel@tonic-gate * put in adjacency list of from vertex 12117c478bd9Sstevel@tonic-gate */ 12127c478bd9Sstevel@tonic-gate 12137c478bd9Sstevel@tonic-gate from_lock->l_edge.edge_adj_next->edge_adj_prev = edge; 12147c478bd9Sstevel@tonic-gate edge->edge_adj_next = from_lock->l_edge.edge_adj_next; 12157c478bd9Sstevel@tonic-gate edge->edge_adj_prev = &from_lock->l_edge; 12167c478bd9Sstevel@tonic-gate from_lock->l_edge.edge_adj_next = edge; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * put in in list of to vertex 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate 12227c478bd9Sstevel@tonic-gate to_lock->l_edge.edge_in_next->edge_in_prev = edge; 12237c478bd9Sstevel@tonic-gate edge->edge_in_next = to_lock->l_edge.edge_in_next; 12247c478bd9Sstevel@tonic-gate to_lock->l_edge.edge_in_next = edge; 12257c478bd9Sstevel@tonic-gate edge->edge_in_prev = &to_lock->l_edge; 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate 12287c478bd9Sstevel@tonic-gate if (update_graph) { 12297c478bd9Sstevel@tonic-gate flk_update_proc_graph(edge, 0); 12307c478bd9Sstevel@tonic-gate return (0); 12317c478bd9Sstevel@tonic-gate } 12327c478bd9Sstevel@tonic-gate if (!check_cycle) { 12337c478bd9Sstevel@tonic-gate return (0); 12347c478bd9Sstevel@tonic-gate } 12357c478bd9Sstevel@tonic-gate 12367c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, from_lock, l_stack); 12377c478bd9Sstevel@tonic-gate 12387c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack); 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gate for (ep = FIRST_ADJ(vertex); 12437c478bd9Sstevel@tonic-gate ep != HEAD(vertex); 12447c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep)) { 12457c478bd9Sstevel@tonic-gate if (COLORED(ep->to_vertex)) 12467c478bd9Sstevel@tonic-gate continue; 12477c478bd9Sstevel@tonic-gate COLOR(ep->to_vertex); 12487c478bd9Sstevel@tonic-gate if (SAME_OWNER(ep->to_vertex, from_lock)) 12497c478bd9Sstevel@tonic-gate goto dead_lock; 12507c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, ep->to_vertex, l_stack); 12517c478bd9Sstevel@tonic-gate } 12527c478bd9Sstevel@tonic-gate } 12537c478bd9Sstevel@tonic-gate return (0); 12547c478bd9Sstevel@tonic-gate 12557c478bd9Sstevel@tonic-gate dead_lock: 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate /* 12587c478bd9Sstevel@tonic-gate * remove all edges 12597c478bd9Sstevel@tonic-gate */ 12607c478bd9Sstevel@tonic-gate 12617c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(from_lock); 12627c478bd9Sstevel@tonic-gate 12637c478bd9Sstevel@tonic-gate while (ep != HEAD(from_lock)) { 12647c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 12657c478bd9Sstevel@tonic-gate from_lock->l_sedge = NEXT_ADJ(ep); 12667c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 12677c478bd9Sstevel@tonic-gate flk_free_edge(ep); 12687c478bd9Sstevel@tonic-gate ep = from_lock->l_sedge; 12697c478bd9Sstevel@tonic-gate } 12707c478bd9Sstevel@tonic-gate return (EDEADLK); 12717c478bd9Sstevel@tonic-gate } 12727c478bd9Sstevel@tonic-gate 12737c478bd9Sstevel@tonic-gate /* 12747c478bd9Sstevel@tonic-gate * Get an edge structure for representing the dependency between two locks. 12757c478bd9Sstevel@tonic-gate */ 12767c478bd9Sstevel@tonic-gate 12777c478bd9Sstevel@tonic-gate static edge_t * 12787c478bd9Sstevel@tonic-gate flk_get_edge() 12797c478bd9Sstevel@tonic-gate { 12807c478bd9Sstevel@tonic-gate edge_t *ep; 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate ASSERT(flk_edge_cache != NULL); 12837c478bd9Sstevel@tonic-gate 12847c478bd9Sstevel@tonic-gate ep = kmem_cache_alloc(flk_edge_cache, KM_SLEEP); 12857c478bd9Sstevel@tonic-gate edge_allocs++; 12867c478bd9Sstevel@tonic-gate return (ep); 12877c478bd9Sstevel@tonic-gate } 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * Free the edge structure. 12917c478bd9Sstevel@tonic-gate */ 12927c478bd9Sstevel@tonic-gate 12937c478bd9Sstevel@tonic-gate static void 12947c478bd9Sstevel@tonic-gate flk_free_edge(edge_t *ep) 12957c478bd9Sstevel@tonic-gate { 12967c478bd9Sstevel@tonic-gate edge_frees++; 12977c478bd9Sstevel@tonic-gate kmem_cache_free(flk_edge_cache, (void *)ep); 12987c478bd9Sstevel@tonic-gate } 12997c478bd9Sstevel@tonic-gate 13007c478bd9Sstevel@tonic-gate /* 13017c478bd9Sstevel@tonic-gate * Check the relationship of request with lock and perform the 13027c478bd9Sstevel@tonic-gate * recomputation of dependencies, break lock if required, and return 13037c478bd9Sstevel@tonic-gate * 1 if request cannot have any more relationship with the next 13047c478bd9Sstevel@tonic-gate * active locks. 13057c478bd9Sstevel@tonic-gate * The 'lock' and 'request' are compared and in case of overlap we 13067c478bd9Sstevel@tonic-gate * delete the 'lock' and form new locks to represent the non-overlapped 13077c478bd9Sstevel@tonic-gate * portion of original 'lock'. This function has side effects such as 13087c478bd9Sstevel@tonic-gate * 'lock' will be freed, new locks will be added to the active list. 13097c478bd9Sstevel@tonic-gate */ 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate static int 13127c478bd9Sstevel@tonic-gate flk_relation(lock_descriptor_t *lock, lock_descriptor_t *request) 13137c478bd9Sstevel@tonic-gate { 13147c478bd9Sstevel@tonic-gate int lock_effect; 13157c478bd9Sstevel@tonic-gate lock_descriptor_t *lock1, *lock2; 13167c478bd9Sstevel@tonic-gate lock_descriptor_t *topology[3]; 13177c478bd9Sstevel@tonic-gate int nvertex = 0; 13187c478bd9Sstevel@tonic-gate int i; 13197c478bd9Sstevel@tonic-gate edge_t *ep; 13207c478bd9Sstevel@tonic-gate graph_t *gp = (lock->l_graph); 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate 13237c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 13247c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate topology[0] = topology[1] = topology[2] = NULL; 13297c478bd9Sstevel@tonic-gate 13307c478bd9Sstevel@tonic-gate if (request->l_type == F_UNLCK) 13317c478bd9Sstevel@tonic-gate lock_effect = FLK_UNLOCK; 13327c478bd9Sstevel@tonic-gate else if (request->l_type == F_RDLCK && 13337c478bd9Sstevel@tonic-gate lock->l_type == F_WRLCK) 13347c478bd9Sstevel@tonic-gate lock_effect = FLK_DOWNGRADE; 13357c478bd9Sstevel@tonic-gate else if (request->l_type == F_WRLCK && 13367c478bd9Sstevel@tonic-gate lock->l_type == F_RDLCK) 13377c478bd9Sstevel@tonic-gate lock_effect = FLK_UPGRADE; 13387c478bd9Sstevel@tonic-gate else 13397c478bd9Sstevel@tonic-gate lock_effect = FLK_STAY_SAME; 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate if (lock->l_end < request->l_start) { 13427c478bd9Sstevel@tonic-gate if (lock->l_end == request->l_start - 1 && 13437c478bd9Sstevel@tonic-gate lock_effect == FLK_STAY_SAME) { 13447c478bd9Sstevel@tonic-gate topology[0] = request; 13457c478bd9Sstevel@tonic-gate request->l_start = lock->l_start; 13467c478bd9Sstevel@tonic-gate nvertex = 1; 13477c478bd9Sstevel@tonic-gate goto recompute; 13487c478bd9Sstevel@tonic-gate } else { 13497c478bd9Sstevel@tonic-gate return (0); 13507c478bd9Sstevel@tonic-gate } 13517c478bd9Sstevel@tonic-gate } 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate if (lock->l_start > request->l_end) { 13547c478bd9Sstevel@tonic-gate if (request->l_end == lock->l_start - 1 && 13557c478bd9Sstevel@tonic-gate lock_effect == FLK_STAY_SAME) { 13567c478bd9Sstevel@tonic-gate topology[0] = request; 13577c478bd9Sstevel@tonic-gate request->l_end = lock->l_end; 13587c478bd9Sstevel@tonic-gate nvertex = 1; 13597c478bd9Sstevel@tonic-gate goto recompute; 13607c478bd9Sstevel@tonic-gate } else { 13617c478bd9Sstevel@tonic-gate return (1); 13627c478bd9Sstevel@tonic-gate } 13637c478bd9Sstevel@tonic-gate } 13647c478bd9Sstevel@tonic-gate 13657c478bd9Sstevel@tonic-gate if (request->l_end < lock->l_end) { 13667c478bd9Sstevel@tonic-gate if (request->l_start > lock->l_start) { 13677c478bd9Sstevel@tonic-gate if (lock_effect == FLK_STAY_SAME) { 13687c478bd9Sstevel@tonic-gate request->l_start = lock->l_start; 13697c478bd9Sstevel@tonic-gate request->l_end = lock->l_end; 13707c478bd9Sstevel@tonic-gate topology[0] = request; 13717c478bd9Sstevel@tonic-gate nvertex = 1; 13727c478bd9Sstevel@tonic-gate } else { 13737c478bd9Sstevel@tonic-gate lock1 = flk_get_lock(); 13747c478bd9Sstevel@tonic-gate lock2 = flk_get_lock(); 13757c478bd9Sstevel@tonic-gate COPY(lock1, lock); 13767c478bd9Sstevel@tonic-gate COPY(lock2, lock); 13777c478bd9Sstevel@tonic-gate lock1->l_start = lock->l_start; 13787c478bd9Sstevel@tonic-gate lock1->l_end = request->l_start - 1; 13797c478bd9Sstevel@tonic-gate lock2->l_start = request->l_end + 1; 13807c478bd9Sstevel@tonic-gate lock2->l_end = lock->l_end; 13817c478bd9Sstevel@tonic-gate topology[0] = lock1; 13827c478bd9Sstevel@tonic-gate topology[1] = lock2; 13837c478bd9Sstevel@tonic-gate topology[2] = request; 13847c478bd9Sstevel@tonic-gate nvertex = 3; 13857c478bd9Sstevel@tonic-gate } 13867c478bd9Sstevel@tonic-gate } else if (request->l_start < lock->l_start) { 13877c478bd9Sstevel@tonic-gate if (lock_effect == FLK_STAY_SAME) { 13887c478bd9Sstevel@tonic-gate request->l_end = lock->l_end; 13897c478bd9Sstevel@tonic-gate topology[0] = request; 13907c478bd9Sstevel@tonic-gate nvertex = 1; 13917c478bd9Sstevel@tonic-gate } else { 13927c478bd9Sstevel@tonic-gate lock1 = flk_get_lock(); 13937c478bd9Sstevel@tonic-gate COPY(lock1, lock); 13947c478bd9Sstevel@tonic-gate lock1->l_start = request->l_end + 1; 13957c478bd9Sstevel@tonic-gate topology[0] = lock1; 13967c478bd9Sstevel@tonic-gate topology[1] = request; 13977c478bd9Sstevel@tonic-gate nvertex = 2; 13987c478bd9Sstevel@tonic-gate } 13997c478bd9Sstevel@tonic-gate } else { 14007c478bd9Sstevel@tonic-gate if (lock_effect == FLK_STAY_SAME) { 14017c478bd9Sstevel@tonic-gate request->l_start = lock->l_start; 14027c478bd9Sstevel@tonic-gate request->l_end = lock->l_end; 14037c478bd9Sstevel@tonic-gate topology[0] = request; 14047c478bd9Sstevel@tonic-gate nvertex = 1; 14057c478bd9Sstevel@tonic-gate } else { 14067c478bd9Sstevel@tonic-gate lock1 = flk_get_lock(); 14077c478bd9Sstevel@tonic-gate COPY(lock1, lock); 14087c478bd9Sstevel@tonic-gate lock1->l_start = request->l_end + 1; 14097c478bd9Sstevel@tonic-gate topology[0] = lock1; 14107c478bd9Sstevel@tonic-gate topology[1] = request; 14117c478bd9Sstevel@tonic-gate nvertex = 2; 14127c478bd9Sstevel@tonic-gate } 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate } else if (request->l_end > lock->l_end) { 14157c478bd9Sstevel@tonic-gate if (request->l_start > lock->l_start) { 14167c478bd9Sstevel@tonic-gate if (lock_effect == FLK_STAY_SAME) { 14177c478bd9Sstevel@tonic-gate request->l_start = lock->l_start; 14187c478bd9Sstevel@tonic-gate topology[0] = request; 14197c478bd9Sstevel@tonic-gate nvertex = 1; 14207c478bd9Sstevel@tonic-gate } else { 14217c478bd9Sstevel@tonic-gate lock1 = flk_get_lock(); 14227c478bd9Sstevel@tonic-gate COPY(lock1, lock); 14237c478bd9Sstevel@tonic-gate lock1->l_end = request->l_start - 1; 14247c478bd9Sstevel@tonic-gate topology[0] = lock1; 14257c478bd9Sstevel@tonic-gate topology[1] = request; 14267c478bd9Sstevel@tonic-gate nvertex = 2; 14277c478bd9Sstevel@tonic-gate } 14287c478bd9Sstevel@tonic-gate } else if (request->l_start < lock->l_start) { 14297c478bd9Sstevel@tonic-gate topology[0] = request; 14307c478bd9Sstevel@tonic-gate nvertex = 1; 14317c478bd9Sstevel@tonic-gate } else { 14327c478bd9Sstevel@tonic-gate topology[0] = request; 14337c478bd9Sstevel@tonic-gate nvertex = 1; 14347c478bd9Sstevel@tonic-gate } 14357c478bd9Sstevel@tonic-gate } else { 14367c478bd9Sstevel@tonic-gate if (request->l_start > lock->l_start) { 14377c478bd9Sstevel@tonic-gate if (lock_effect == FLK_STAY_SAME) { 14387c478bd9Sstevel@tonic-gate request->l_start = lock->l_start; 14397c478bd9Sstevel@tonic-gate topology[0] = request; 14407c478bd9Sstevel@tonic-gate nvertex = 1; 14417c478bd9Sstevel@tonic-gate } else { 14427c478bd9Sstevel@tonic-gate lock1 = flk_get_lock(); 14437c478bd9Sstevel@tonic-gate COPY(lock1, lock); 14447c478bd9Sstevel@tonic-gate lock1->l_end = request->l_start - 1; 14457c478bd9Sstevel@tonic-gate topology[0] = lock1; 14467c478bd9Sstevel@tonic-gate topology[1] = request; 14477c478bd9Sstevel@tonic-gate nvertex = 2; 14487c478bd9Sstevel@tonic-gate } 14497c478bd9Sstevel@tonic-gate } else if (request->l_start < lock->l_start) { 14507c478bd9Sstevel@tonic-gate topology[0] = request; 14517c478bd9Sstevel@tonic-gate nvertex = 1; 14527c478bd9Sstevel@tonic-gate } else { 14537c478bd9Sstevel@tonic-gate if (lock_effect != FLK_UNLOCK) { 14547c478bd9Sstevel@tonic-gate topology[0] = request; 14557c478bd9Sstevel@tonic-gate nvertex = 1; 14567c478bd9Sstevel@tonic-gate } else { 14577c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 14587c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 14597c478bd9Sstevel@tonic-gate flk_free_lock(lock); 14607c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 14617c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 14627c478bd9Sstevel@tonic-gate return (1); 14637c478bd9Sstevel@tonic-gate } 14647c478bd9Sstevel@tonic-gate } 14657c478bd9Sstevel@tonic-gate } 14667c478bd9Sstevel@tonic-gate 14677c478bd9Sstevel@tonic-gate recompute: 14687c478bd9Sstevel@tonic-gate 14697c478bd9Sstevel@tonic-gate /* 14707c478bd9Sstevel@tonic-gate * For unlock we don't send the 'request' to for recomputing 14717c478bd9Sstevel@tonic-gate * dependencies because no lock will add an edge to this. 14727c478bd9Sstevel@tonic-gate */ 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate if (lock_effect == FLK_UNLOCK) { 14757c478bd9Sstevel@tonic-gate topology[nvertex-1] = NULL; 14767c478bd9Sstevel@tonic-gate nvertex--; 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate for (i = 0; i < nvertex; i++) { 14797c478bd9Sstevel@tonic-gate topology[i]->l_state |= RECOMPUTE_LOCK; 14807c478bd9Sstevel@tonic-gate topology[i]->l_color = NO_COLOR; 14817c478bd9Sstevel@tonic-gate } 14827c478bd9Sstevel@tonic-gate 14837c478bd9Sstevel@tonic-gate ASSERT(FIRST_ADJ(lock) == HEAD(lock)); 14847c478bd9Sstevel@tonic-gate 14857c478bd9Sstevel@tonic-gate /* 14867c478bd9Sstevel@tonic-gate * we remove the adjacent edges for all vertices' to this vertex 14877c478bd9Sstevel@tonic-gate * 'lock'. 14887c478bd9Sstevel@tonic-gate */ 14897c478bd9Sstevel@tonic-gate 14907c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 14917c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 14927c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 14937c478bd9Sstevel@tonic-gate ep = NEXT_IN(ep); 14947c478bd9Sstevel@tonic-gate } 14957c478bd9Sstevel@tonic-gate 14967c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 14977c478bd9Sstevel@tonic-gate 14987c478bd9Sstevel@tonic-gate /* We are ready for recomputing the dependencies now */ 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate flk_recompute_dependencies(lock, topology, nvertex, 1); 15017c478bd9Sstevel@tonic-gate 15027c478bd9Sstevel@tonic-gate for (i = 0; i < nvertex; i++) { 15037c478bd9Sstevel@tonic-gate topology[i]->l_state &= ~RECOMPUTE_LOCK; 15047c478bd9Sstevel@tonic-gate topology[i]->l_color = NO_COLOR; 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate if (lock_effect == FLK_UNLOCK) { 15097c478bd9Sstevel@tonic-gate nvertex++; 15107c478bd9Sstevel@tonic-gate } 15117c478bd9Sstevel@tonic-gate for (i = 0; i < nvertex - 1; i++) { 15127c478bd9Sstevel@tonic-gate flk_insert_active_lock(topology[i]); 15137c478bd9Sstevel@tonic-gate } 15147c478bd9Sstevel@tonic-gate 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate if (lock_effect == FLK_DOWNGRADE || lock_effect == FLK_UNLOCK) { 15177c478bd9Sstevel@tonic-gate flk_wakeup(lock, 0); 15187c478bd9Sstevel@tonic-gate } else { 15197c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 15207c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 15217c478bd9Sstevel@tonic-gate lock->l_sedge = NEXT_IN(ep); 15227c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 15237c478bd9Sstevel@tonic-gate flk_update_proc_graph(ep, 1); 15247c478bd9Sstevel@tonic-gate flk_free_edge(ep); 15257c478bd9Sstevel@tonic-gate ep = lock->l_sedge; 15267c478bd9Sstevel@tonic-gate } 15277c478bd9Sstevel@tonic-gate } 15287c478bd9Sstevel@tonic-gate flk_free_lock(lock); 15297c478bd9Sstevel@tonic-gate 15307c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 15317c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 15327c478bd9Sstevel@tonic-gate return (0); 15337c478bd9Sstevel@tonic-gate } 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate /* 15367c478bd9Sstevel@tonic-gate * Insert a lock into the active queue. 15377c478bd9Sstevel@tonic-gate */ 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate static void 15407c478bd9Sstevel@tonic-gate flk_insert_active_lock(lock_descriptor_t *new_lock) 15417c478bd9Sstevel@tonic-gate { 15427c478bd9Sstevel@tonic-gate graph_t *gp = new_lock->l_graph; 15437c478bd9Sstevel@tonic-gate vnode_t *vp = new_lock->l_vnode; 15447c478bd9Sstevel@tonic-gate lock_descriptor_t *first_lock, *lock; 15457c478bd9Sstevel@tonic-gate 15467c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 15497c478bd9Sstevel@tonic-gate first_lock = lock; 15507c478bd9Sstevel@tonic-gate 15517c478bd9Sstevel@tonic-gate if (first_lock != NULL) { 15527c478bd9Sstevel@tonic-gate for (; (lock->l_vnode == vp && 15537c478bd9Sstevel@tonic-gate lock->l_start < new_lock->l_start); lock = lock->l_next) 15547c478bd9Sstevel@tonic-gate ; 15557c478bd9Sstevel@tonic-gate } else { 15567c478bd9Sstevel@tonic-gate lock = ACTIVE_HEAD(gp); 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate lock->l_prev->l_next = new_lock; 15607c478bd9Sstevel@tonic-gate new_lock->l_next = lock; 15617c478bd9Sstevel@tonic-gate new_lock->l_prev = lock->l_prev; 15627c478bd9Sstevel@tonic-gate lock->l_prev = new_lock; 15637c478bd9Sstevel@tonic-gate 15647c478bd9Sstevel@tonic-gate if (first_lock == NULL || (new_lock->l_start <= first_lock->l_start)) { 15657c478bd9Sstevel@tonic-gate vp->v_filocks = (struct filock *)new_lock; 15667c478bd9Sstevel@tonic-gate } 15677c478bd9Sstevel@tonic-gate flk_set_state(new_lock, FLK_ACTIVE_STATE); 15687c478bd9Sstevel@tonic-gate new_lock->l_state |= ACTIVE_LOCK; 15697c478bd9Sstevel@tonic-gate 15707c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 15717c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 15727c478bd9Sstevel@tonic-gate } 15737c478bd9Sstevel@tonic-gate 15747c478bd9Sstevel@tonic-gate /* 15757c478bd9Sstevel@tonic-gate * Delete the active lock : Performs two functions depending on the 15767c478bd9Sstevel@tonic-gate * value of second parameter. One is to remove from the active lists 15777c478bd9Sstevel@tonic-gate * only and other is to both remove and free the lock. 15787c478bd9Sstevel@tonic-gate */ 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate static void 15817c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock_descriptor_t *lock, int free_lock) 15827c478bd9Sstevel@tonic-gate { 15837c478bd9Sstevel@tonic-gate vnode_t *vp = lock->l_vnode; 15847c478bd9Sstevel@tonic-gate graph_t *gp = lock->l_graph; 15857c478bd9Sstevel@tonic-gate 15867c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 15877c478bd9Sstevel@tonic-gate if (free_lock) 15887c478bd9Sstevel@tonic-gate ASSERT(NO_DEPENDENTS(lock)); 15897c478bd9Sstevel@tonic-gate ASSERT(NOT_BLOCKED(lock)); 15907c478bd9Sstevel@tonic-gate ASSERT(IS_ACTIVE(lock)); 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate ASSERT((vp->v_filocks != NULL)); 15937c478bd9Sstevel@tonic-gate 15947c478bd9Sstevel@tonic-gate if (vp->v_filocks == (struct filock *)lock) { 15957c478bd9Sstevel@tonic-gate vp->v_filocks = (struct filock *) 15967c478bd9Sstevel@tonic-gate ((lock->l_next->l_vnode == vp) ? lock->l_next : 15977c478bd9Sstevel@tonic-gate NULL); 15987c478bd9Sstevel@tonic-gate } 15997c478bd9Sstevel@tonic-gate lock->l_next->l_prev = lock->l_prev; 16007c478bd9Sstevel@tonic-gate lock->l_prev->l_next = lock->l_next; 16017c478bd9Sstevel@tonic-gate lock->l_next = lock->l_prev = NULL; 16027c478bd9Sstevel@tonic-gate flk_set_state(lock, FLK_DEAD_STATE); 16037c478bd9Sstevel@tonic-gate lock->l_state &= ~ACTIVE_LOCK; 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate if (free_lock) 16067c478bd9Sstevel@tonic-gate flk_free_lock(lock); 16077c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 16087c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 16097c478bd9Sstevel@tonic-gate } 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate /* 16127c478bd9Sstevel@tonic-gate * Insert into the sleep queue. 16137c478bd9Sstevel@tonic-gate */ 16147c478bd9Sstevel@tonic-gate 16157c478bd9Sstevel@tonic-gate static void 16167c478bd9Sstevel@tonic-gate flk_insert_sleeping_lock(lock_descriptor_t *request) 16177c478bd9Sstevel@tonic-gate { 16187c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 16197c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 16207c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 16217c478bd9Sstevel@tonic-gate 16227c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 16237c478bd9Sstevel@tonic-gate ASSERT(IS_INITIAL(request)); 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate for (lock = gp->sleeping_locks.l_next; (lock != &gp->sleeping_locks && 16267c478bd9Sstevel@tonic-gate lock->l_vnode < vp); lock = lock->l_next) 16277c478bd9Sstevel@tonic-gate ; 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate lock->l_prev->l_next = request; 16307c478bd9Sstevel@tonic-gate request->l_prev = lock->l_prev; 16317c478bd9Sstevel@tonic-gate lock->l_prev = request; 16327c478bd9Sstevel@tonic-gate request->l_next = lock; 16337c478bd9Sstevel@tonic-gate flk_set_state(request, FLK_SLEEPING_STATE); 16347c478bd9Sstevel@tonic-gate request->l_state |= SLEEPING_LOCK; 16357c478bd9Sstevel@tonic-gate } 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate /* 16387c478bd9Sstevel@tonic-gate * Cancelling a sleeping lock implies removing a vertex from the 16397c478bd9Sstevel@tonic-gate * dependency graph and therefore we should recompute the dependencies 16407c478bd9Sstevel@tonic-gate * of all vertices that have a path to this vertex, w.r.t. all 16417c478bd9Sstevel@tonic-gate * vertices reachable from this vertex. 16427c478bd9Sstevel@tonic-gate */ 16437c478bd9Sstevel@tonic-gate 16447c478bd9Sstevel@tonic-gate void 16457c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(lock_descriptor_t *request, int remove_from_queue) 16467c478bd9Sstevel@tonic-gate { 16477c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 16487c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 16497c478bd9Sstevel@tonic-gate lock_descriptor_t **topology = NULL; 16507c478bd9Sstevel@tonic-gate edge_t *ep; 16517c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex, *lock; 16527c478bd9Sstevel@tonic-gate int nvertex = 0; 16537c478bd9Sstevel@tonic-gate int i; 16547c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 16557c478bd9Sstevel@tonic-gate 16567c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 16597c478bd9Sstevel@tonic-gate /* 16607c478bd9Sstevel@tonic-gate * count number of vertex pointers that has to be allocated 16617c478bd9Sstevel@tonic-gate * All vertices that are reachable from request. 16627c478bd9Sstevel@tonic-gate */ 16637c478bd9Sstevel@tonic-gate 16647c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, request, l_stack); 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 16677c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack); 16687c478bd9Sstevel@tonic-gate for (ep = FIRST_ADJ(vertex); ep != HEAD(vertex); 16697c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep)) { 16707c478bd9Sstevel@tonic-gate if (IS_RECOMPUTE(ep->to_vertex)) 16717c478bd9Sstevel@tonic-gate continue; 16727c478bd9Sstevel@tonic-gate ep->to_vertex->l_state |= RECOMPUTE_LOCK; 16737c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, ep->to_vertex, l_stack); 16747c478bd9Sstevel@tonic-gate nvertex++; 16757c478bd9Sstevel@tonic-gate } 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate 16787c478bd9Sstevel@tonic-gate /* 16797c478bd9Sstevel@tonic-gate * allocate memory for holding the vertex pointers 16807c478bd9Sstevel@tonic-gate */ 16817c478bd9Sstevel@tonic-gate 16827c478bd9Sstevel@tonic-gate if (nvertex) { 16837c478bd9Sstevel@tonic-gate topology = kmem_zalloc(nvertex * sizeof (lock_descriptor_t *), 16847c478bd9Sstevel@tonic-gate KM_SLEEP); 16857c478bd9Sstevel@tonic-gate } 16867c478bd9Sstevel@tonic-gate 16877c478bd9Sstevel@tonic-gate /* 16887c478bd9Sstevel@tonic-gate * one more pass to actually store the vertices in the 16897c478bd9Sstevel@tonic-gate * allocated array. 16907c478bd9Sstevel@tonic-gate * We first check sleeping locks and then active locks 16917c478bd9Sstevel@tonic-gate * so that topology array will be in a topological 16927c478bd9Sstevel@tonic-gate * order. 16937c478bd9Sstevel@tonic-gate */ 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate nvertex = 0; 16967c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate if (lock) { 16997c478bd9Sstevel@tonic-gate do { 17007c478bd9Sstevel@tonic-gate if (IS_RECOMPUTE(lock)) { 17017c478bd9Sstevel@tonic-gate lock->l_index = nvertex; 17027c478bd9Sstevel@tonic-gate topology[nvertex++] = lock; 17037c478bd9Sstevel@tonic-gate } 17047c478bd9Sstevel@tonic-gate lock->l_color = NO_COLOR; 17057c478bd9Sstevel@tonic-gate lock = lock->l_next; 17067c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 17077c478bd9Sstevel@tonic-gate } 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 17107c478bd9Sstevel@tonic-gate 17117c478bd9Sstevel@tonic-gate if (lock) { 17127c478bd9Sstevel@tonic-gate do { 17137c478bd9Sstevel@tonic-gate if (IS_RECOMPUTE(lock)) { 17147c478bd9Sstevel@tonic-gate lock->l_index = nvertex; 17157c478bd9Sstevel@tonic-gate topology[nvertex++] = lock; 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate lock->l_color = NO_COLOR; 17187c478bd9Sstevel@tonic-gate lock = lock->l_next; 17197c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 17207c478bd9Sstevel@tonic-gate } 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate /* 17237c478bd9Sstevel@tonic-gate * remove in and out edges of request 17247c478bd9Sstevel@tonic-gate * They are freed after updating proc_graph below. 17257c478bd9Sstevel@tonic-gate */ 17267c478bd9Sstevel@tonic-gate 17277c478bd9Sstevel@tonic-gate for (ep = FIRST_IN(request); ep != HEAD(request); ep = NEXT_IN(ep)) { 17287c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 17297c478bd9Sstevel@tonic-gate } 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate if (remove_from_queue) 17337c478bd9Sstevel@tonic-gate REMOVE_SLEEP_QUEUE(request); 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate /* we are ready to recompute */ 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate flk_recompute_dependencies(request, topology, nvertex, 1); 17387c478bd9Sstevel@tonic-gate 17397c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(request); 17407c478bd9Sstevel@tonic-gate while (ep != HEAD(request)) { 17417c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 17427c478bd9Sstevel@tonic-gate request->l_sedge = NEXT_ADJ(ep); 17437c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 17447c478bd9Sstevel@tonic-gate flk_update_proc_graph(ep, 1); 17457c478bd9Sstevel@tonic-gate flk_free_edge(ep); 17467c478bd9Sstevel@tonic-gate ep = request->l_sedge; 17477c478bd9Sstevel@tonic-gate } 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate 17507c478bd9Sstevel@tonic-gate /* 17517c478bd9Sstevel@tonic-gate * unset the RECOMPUTE flag in those vertices 17527c478bd9Sstevel@tonic-gate */ 17537c478bd9Sstevel@tonic-gate 17547c478bd9Sstevel@tonic-gate for (i = 0; i < nvertex; i++) { 17557c478bd9Sstevel@tonic-gate topology[i]->l_state &= ~RECOMPUTE_LOCK; 17567c478bd9Sstevel@tonic-gate } 17577c478bd9Sstevel@tonic-gate 17587c478bd9Sstevel@tonic-gate /* 17597c478bd9Sstevel@tonic-gate * free the topology 17607c478bd9Sstevel@tonic-gate */ 17617c478bd9Sstevel@tonic-gate if (nvertex) 17627c478bd9Sstevel@tonic-gate kmem_free((void *)topology, 17637c478bd9Sstevel@tonic-gate (nvertex * sizeof (lock_descriptor_t *))); 17647c478bd9Sstevel@tonic-gate /* 17657c478bd9Sstevel@tonic-gate * Possibility of some locks unblocked now 17667c478bd9Sstevel@tonic-gate */ 17677c478bd9Sstevel@tonic-gate 17687c478bd9Sstevel@tonic-gate flk_wakeup(request, 0); 17697c478bd9Sstevel@tonic-gate 17707c478bd9Sstevel@tonic-gate /* 17717c478bd9Sstevel@tonic-gate * we expect to have a correctly recomputed graph now. 17727c478bd9Sstevel@tonic-gate */ 17737c478bd9Sstevel@tonic-gate flk_set_state(request, FLK_DEAD_STATE); 17747c478bd9Sstevel@tonic-gate flk_free_lock(request); 17757c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 17767c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 17777c478bd9Sstevel@tonic-gate 17787c478bd9Sstevel@tonic-gate } 17797c478bd9Sstevel@tonic-gate 17807c478bd9Sstevel@tonic-gate /* 17817c478bd9Sstevel@tonic-gate * Uncoloring the graph is simply to increment the mark value of the graph 17827c478bd9Sstevel@tonic-gate * And only when wrap round takes place will we color all vertices in 17837c478bd9Sstevel@tonic-gate * the graph explicitly. 17847c478bd9Sstevel@tonic-gate */ 17857c478bd9Sstevel@tonic-gate 17867c478bd9Sstevel@tonic-gate static void 17877c478bd9Sstevel@tonic-gate flk_graph_uncolor(graph_t *gp) 17887c478bd9Sstevel@tonic-gate { 17897c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 17907c478bd9Sstevel@tonic-gate 17917c478bd9Sstevel@tonic-gate if (gp->mark == UINT_MAX) { 17927c478bd9Sstevel@tonic-gate gp->mark = 1; 17937c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp); 17947c478bd9Sstevel@tonic-gate lock = lock->l_next) 17957c478bd9Sstevel@tonic-gate lock->l_color = 0; 17967c478bd9Sstevel@tonic-gate 17977c478bd9Sstevel@tonic-gate for (lock = SLEEPING_HEAD(gp)->l_next; lock != SLEEPING_HEAD(gp); 17987c478bd9Sstevel@tonic-gate lock = lock->l_next) 17997c478bd9Sstevel@tonic-gate lock->l_color = 0; 18007c478bd9Sstevel@tonic-gate } else { 18017c478bd9Sstevel@tonic-gate gp->mark++; 18027c478bd9Sstevel@tonic-gate } 18037c478bd9Sstevel@tonic-gate } 18047c478bd9Sstevel@tonic-gate 18057c478bd9Sstevel@tonic-gate /* 18067c478bd9Sstevel@tonic-gate * Wake up locks that are blocked on the given lock. 18077c478bd9Sstevel@tonic-gate */ 18087c478bd9Sstevel@tonic-gate 18097c478bd9Sstevel@tonic-gate static void 18107c478bd9Sstevel@tonic-gate flk_wakeup(lock_descriptor_t *lock, int adj_list_remove) 18117c478bd9Sstevel@tonic-gate { 18127c478bd9Sstevel@tonic-gate edge_t *ep; 18137c478bd9Sstevel@tonic-gate graph_t *gp = lock->l_graph; 18147c478bd9Sstevel@tonic-gate lock_descriptor_t *lck; 18157c478bd9Sstevel@tonic-gate 18167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 18177c478bd9Sstevel@tonic-gate if (NO_DEPENDENTS(lock)) 18187c478bd9Sstevel@tonic-gate return; 18197c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 18207c478bd9Sstevel@tonic-gate do { 18217c478bd9Sstevel@tonic-gate /* 18227c478bd9Sstevel@tonic-gate * delete the edge from the adjacency list 18237c478bd9Sstevel@tonic-gate * of from vertex. if no more adjacent edges 18247c478bd9Sstevel@tonic-gate * for this vertex wake this process. 18257c478bd9Sstevel@tonic-gate */ 18267c478bd9Sstevel@tonic-gate lck = ep->from_vertex; 18277c478bd9Sstevel@tonic-gate if (adj_list_remove) 18287c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 18297c478bd9Sstevel@tonic-gate flk_update_proc_graph(ep, 1); 18307c478bd9Sstevel@tonic-gate if (NOT_BLOCKED(lck)) { 18317c478bd9Sstevel@tonic-gate GRANT_WAKEUP(lck); 18327c478bd9Sstevel@tonic-gate } 18337c478bd9Sstevel@tonic-gate lock->l_sedge = NEXT_IN(ep); 18347c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 18357c478bd9Sstevel@tonic-gate flk_free_edge(ep); 18367c478bd9Sstevel@tonic-gate ep = lock->l_sedge; 18377c478bd9Sstevel@tonic-gate } while (ep != HEAD(lock)); 18387c478bd9Sstevel@tonic-gate ASSERT(NO_DEPENDENTS(lock)); 18397c478bd9Sstevel@tonic-gate } 18407c478bd9Sstevel@tonic-gate 18417c478bd9Sstevel@tonic-gate /* 18427c478bd9Sstevel@tonic-gate * The dependents of request, is checked for its dependency against the 18437c478bd9Sstevel@tonic-gate * locks in topology (called topology because the array is and should be in 18447c478bd9Sstevel@tonic-gate * topological order for this algorithm, if not in topological order the 18457c478bd9Sstevel@tonic-gate * inner loop below might add more edges than necessary. Topological ordering 18467c478bd9Sstevel@tonic-gate * of vertices satisfies the property that all edges will be from left to 18477c478bd9Sstevel@tonic-gate * right i.e., topology[i] can have an edge to topology[j], iff i<j) 18487c478bd9Sstevel@tonic-gate * If lock l1 in the dependent set of request is dependent (blocked by) 18497c478bd9Sstevel@tonic-gate * on lock l2 in topology but does not have a path to it, we add an edge 18507c478bd9Sstevel@tonic-gate * in the inner loop below. 18517c478bd9Sstevel@tonic-gate * 18527c478bd9Sstevel@tonic-gate * We don't want to add an edge between l1 and l2 if there exists 18537c478bd9Sstevel@tonic-gate * already a path from l1 to l2, so care has to be taken for those vertices 18547c478bd9Sstevel@tonic-gate * that have two paths to 'request'. These vertices are referred to here 18557c478bd9Sstevel@tonic-gate * as barrier locks. 18567c478bd9Sstevel@tonic-gate * 18577c478bd9Sstevel@tonic-gate * The barriers has to be found (those vertex that originally had two paths 18587c478bd9Sstevel@tonic-gate * to request) because otherwise we may end up adding edges unnecessarily 18597c478bd9Sstevel@tonic-gate * to vertices in topology, and thus barrier vertices can have an edge 18607c478bd9Sstevel@tonic-gate * to a vertex in topology as well a path to it. 18617c478bd9Sstevel@tonic-gate */ 18627c478bd9Sstevel@tonic-gate 18637c478bd9Sstevel@tonic-gate static void 18647c478bd9Sstevel@tonic-gate flk_recompute_dependencies(lock_descriptor_t *request, 18657c478bd9Sstevel@tonic-gate lock_descriptor_t **topology, 18667c478bd9Sstevel@tonic-gate int nvertex, int update_graph) 18677c478bd9Sstevel@tonic-gate { 18687c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex, *lock; 18697c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 18707c478bd9Sstevel@tonic-gate int i, count; 18717c478bd9Sstevel@tonic-gate int barrier_found = 0; 18727c478bd9Sstevel@tonic-gate edge_t *ep; 18737c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 18747c478bd9Sstevel@tonic-gate 18757c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 18767c478bd9Sstevel@tonic-gate 18777c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 18787c478bd9Sstevel@tonic-gate if (nvertex == 0) 18797c478bd9Sstevel@tonic-gate return; 18807c478bd9Sstevel@tonic-gate flk_graph_uncolor(request->l_graph); 18817c478bd9Sstevel@tonic-gate barrier_found = flk_find_barriers(request); 18827c478bd9Sstevel@tonic-gate request->l_state |= RECOMPUTE_DONE; 18837c478bd9Sstevel@tonic-gate 18847c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, request, l_stack); 18857c478bd9Sstevel@tonic-gate request->l_sedge = FIRST_IN(request); 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate 18887c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 18897c478bd9Sstevel@tonic-gate if (vertex->l_state & RECOMPUTE_DONE) { 18907c478bd9Sstevel@tonic-gate count = 0; 18917c478bd9Sstevel@tonic-gate goto next_in_edge; 18927c478bd9Sstevel@tonic-gate } 18937c478bd9Sstevel@tonic-gate if (IS_BARRIER(vertex)) { 18947c478bd9Sstevel@tonic-gate /* decrement the barrier count */ 18957c478bd9Sstevel@tonic-gate if (vertex->l_index) { 18967c478bd9Sstevel@tonic-gate vertex->l_index--; 18977c478bd9Sstevel@tonic-gate /* this guy will be pushed again anyway ? */ 18987c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack); 18997c478bd9Sstevel@tonic-gate if (vertex->l_index == 0) { 19007c478bd9Sstevel@tonic-gate /* 19017c478bd9Sstevel@tonic-gate * barrier is over we can recompute 19027c478bd9Sstevel@tonic-gate * dependencies for this lock in the 19037c478bd9Sstevel@tonic-gate * next stack pop 19047c478bd9Sstevel@tonic-gate */ 19057c478bd9Sstevel@tonic-gate vertex->l_state &= ~BARRIER_LOCK; 19067c478bd9Sstevel@tonic-gate } 19077c478bd9Sstevel@tonic-gate continue; 19087c478bd9Sstevel@tonic-gate } 19097c478bd9Sstevel@tonic-gate } 19107c478bd9Sstevel@tonic-gate vertex->l_state |= RECOMPUTE_DONE; 19117c478bd9Sstevel@tonic-gate flk_graph_uncolor(gp); 19127c478bd9Sstevel@tonic-gate count = flk_color_reachables(vertex); 19137c478bd9Sstevel@tonic-gate for (i = 0; i < nvertex; i++) { 19147c478bd9Sstevel@tonic-gate lock = topology[i]; 19157c478bd9Sstevel@tonic-gate if (COLORED(lock)) 19167c478bd9Sstevel@tonic-gate continue; 19177c478bd9Sstevel@tonic-gate if (BLOCKS(lock, vertex)) { 19187c478bd9Sstevel@tonic-gate (void) flk_add_edge(vertex, lock, 19197c478bd9Sstevel@tonic-gate NO_CHECK_CYCLE, update_graph); 19207c478bd9Sstevel@tonic-gate COLOR(lock); 19217c478bd9Sstevel@tonic-gate count++; 19227c478bd9Sstevel@tonic-gate count += flk_color_reachables(lock); 19237c478bd9Sstevel@tonic-gate } 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate } 19267c478bd9Sstevel@tonic-gate 19277c478bd9Sstevel@tonic-gate next_in_edge: 19287c478bd9Sstevel@tonic-gate if (count == nvertex || 19297c478bd9Sstevel@tonic-gate vertex->l_sedge == HEAD(vertex)) { 19307c478bd9Sstevel@tonic-gate /* prune the tree below this */ 19317c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack); 19327c478bd9Sstevel@tonic-gate vertex->l_state &= ~RECOMPUTE_DONE; 19337c478bd9Sstevel@tonic-gate /* update the barrier locks below this! */ 19347c478bd9Sstevel@tonic-gate if (vertex->l_sedge != HEAD(vertex) && barrier_found) { 19357c478bd9Sstevel@tonic-gate flk_graph_uncolor(gp); 19367c478bd9Sstevel@tonic-gate flk_update_barriers(vertex); 19377c478bd9Sstevel@tonic-gate } 19387c478bd9Sstevel@tonic-gate continue; 19397c478bd9Sstevel@tonic-gate } 19407c478bd9Sstevel@tonic-gate 19417c478bd9Sstevel@tonic-gate ep = vertex->l_sedge; 19427c478bd9Sstevel@tonic-gate lock = ep->from_vertex; 19437c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lock, l_stack); 19447c478bd9Sstevel@tonic-gate lock->l_sedge = FIRST_IN(lock); 19457c478bd9Sstevel@tonic-gate vertex->l_sedge = NEXT_IN(ep); 19467c478bd9Sstevel@tonic-gate } 19477c478bd9Sstevel@tonic-gate 19487c478bd9Sstevel@tonic-gate } 19497c478bd9Sstevel@tonic-gate 19507c478bd9Sstevel@tonic-gate /* 19517c478bd9Sstevel@tonic-gate * Color all reachable vertices from vertex that belongs to topology (here 19527c478bd9Sstevel@tonic-gate * those that have RECOMPUTE_LOCK set in their state) and yet uncolored. 19537c478bd9Sstevel@tonic-gate * 19547c478bd9Sstevel@tonic-gate * Note: we need to use a different stack_link l_stack1 because this is 19557c478bd9Sstevel@tonic-gate * called from flk_recompute_dependencies() that already uses a stack with 19567c478bd9Sstevel@tonic-gate * l_stack as stack_link. 19577c478bd9Sstevel@tonic-gate */ 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate static int 19607c478bd9Sstevel@tonic-gate flk_color_reachables(lock_descriptor_t *vertex) 19617c478bd9Sstevel@tonic-gate { 19627c478bd9Sstevel@tonic-gate lock_descriptor_t *ver, *lock; 19637c478bd9Sstevel@tonic-gate int count; 19647c478bd9Sstevel@tonic-gate edge_t *ep; 19657c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 19667c478bd9Sstevel@tonic-gate 19677c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 19687c478bd9Sstevel@tonic-gate 19697c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, vertex, l_stack1); 19707c478bd9Sstevel@tonic-gate count = 0; 19717c478bd9Sstevel@tonic-gate while ((ver = STACK_TOP(vertex_stack)) != NULL) { 19727c478bd9Sstevel@tonic-gate 19737c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack1); 19747c478bd9Sstevel@tonic-gate for (ep = FIRST_ADJ(ver); ep != HEAD(ver); 19757c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep)) { 19767c478bd9Sstevel@tonic-gate lock = ep->to_vertex; 19777c478bd9Sstevel@tonic-gate if (COLORED(lock)) 19787c478bd9Sstevel@tonic-gate continue; 19797c478bd9Sstevel@tonic-gate COLOR(lock); 19807c478bd9Sstevel@tonic-gate if (IS_RECOMPUTE(lock)) 19817c478bd9Sstevel@tonic-gate count++; 19827c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lock, l_stack1); 19837c478bd9Sstevel@tonic-gate } 19847c478bd9Sstevel@tonic-gate 19857c478bd9Sstevel@tonic-gate } 19867c478bd9Sstevel@tonic-gate return (count); 19877c478bd9Sstevel@tonic-gate } 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate /* 19907c478bd9Sstevel@tonic-gate * Called from flk_recompute_dependencies() this routine decrements 19917c478bd9Sstevel@tonic-gate * the barrier count of barrier vertices that are reachable from lock. 19927c478bd9Sstevel@tonic-gate */ 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate static void 19957c478bd9Sstevel@tonic-gate flk_update_barriers(lock_descriptor_t *lock) 19967c478bd9Sstevel@tonic-gate { 19977c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex, *lck; 19987c478bd9Sstevel@tonic-gate edge_t *ep; 19997c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 20007c478bd9Sstevel@tonic-gate 20017c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 20027c478bd9Sstevel@tonic-gate 20037c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lock, l_stack1); 20047c478bd9Sstevel@tonic-gate 20057c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 20067c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack1); 20077c478bd9Sstevel@tonic-gate for (ep = FIRST_IN(vertex); ep != HEAD(vertex); 20087c478bd9Sstevel@tonic-gate ep = NEXT_IN(ep)) { 20097c478bd9Sstevel@tonic-gate lck = ep->from_vertex; 20107c478bd9Sstevel@tonic-gate if (COLORED(lck)) { 20117c478bd9Sstevel@tonic-gate if (IS_BARRIER(lck)) { 20127c478bd9Sstevel@tonic-gate ASSERT(lck->l_index > 0); 20137c478bd9Sstevel@tonic-gate lck->l_index--; 20147c478bd9Sstevel@tonic-gate if (lck->l_index == 0) 20157c478bd9Sstevel@tonic-gate lck->l_state &= ~BARRIER_LOCK; 20167c478bd9Sstevel@tonic-gate } 20177c478bd9Sstevel@tonic-gate continue; 20187c478bd9Sstevel@tonic-gate } 20197c478bd9Sstevel@tonic-gate COLOR(lck); 20207c478bd9Sstevel@tonic-gate if (IS_BARRIER(lck)) { 20217c478bd9Sstevel@tonic-gate ASSERT(lck->l_index > 0); 20227c478bd9Sstevel@tonic-gate lck->l_index--; 20237c478bd9Sstevel@tonic-gate if (lck->l_index == 0) 20247c478bd9Sstevel@tonic-gate lck->l_state &= ~BARRIER_LOCK; 20257c478bd9Sstevel@tonic-gate } 20267c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lck, l_stack1); 20277c478bd9Sstevel@tonic-gate } 20287c478bd9Sstevel@tonic-gate } 20297c478bd9Sstevel@tonic-gate } 20307c478bd9Sstevel@tonic-gate 20317c478bd9Sstevel@tonic-gate /* 20327c478bd9Sstevel@tonic-gate * Finds all vertices that are reachable from 'lock' more than once and 20337c478bd9Sstevel@tonic-gate * mark them as barrier vertices and increment their barrier count. 20347c478bd9Sstevel@tonic-gate * The barrier count is one minus the total number of paths from lock 20357c478bd9Sstevel@tonic-gate * to that vertex. 20367c478bd9Sstevel@tonic-gate */ 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate static int 20397c478bd9Sstevel@tonic-gate flk_find_barriers(lock_descriptor_t *lock) 20407c478bd9Sstevel@tonic-gate { 20417c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex, *lck; 20427c478bd9Sstevel@tonic-gate int found = 0; 20437c478bd9Sstevel@tonic-gate edge_t *ep; 20447c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 20457c478bd9Sstevel@tonic-gate 20467c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 20477c478bd9Sstevel@tonic-gate 20487c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lock, l_stack1); 20497c478bd9Sstevel@tonic-gate 20507c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 20517c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_stack1); 20527c478bd9Sstevel@tonic-gate for (ep = FIRST_IN(vertex); ep != HEAD(vertex); 20537c478bd9Sstevel@tonic-gate ep = NEXT_IN(ep)) { 20547c478bd9Sstevel@tonic-gate lck = ep->from_vertex; 20557c478bd9Sstevel@tonic-gate if (COLORED(lck)) { 20567c478bd9Sstevel@tonic-gate /* this is a barrier */ 20577c478bd9Sstevel@tonic-gate lck->l_state |= BARRIER_LOCK; 20587c478bd9Sstevel@tonic-gate /* index will have barrier count */ 20597c478bd9Sstevel@tonic-gate lck->l_index++; 20607c478bd9Sstevel@tonic-gate if (!found) 20617c478bd9Sstevel@tonic-gate found = 1; 20627c478bd9Sstevel@tonic-gate continue; 20637c478bd9Sstevel@tonic-gate } 20647c478bd9Sstevel@tonic-gate COLOR(lck); 20657c478bd9Sstevel@tonic-gate lck->l_index = 0; 20667c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, lck, l_stack1); 20677c478bd9Sstevel@tonic-gate } 20687c478bd9Sstevel@tonic-gate } 20697c478bd9Sstevel@tonic-gate return (found); 20707c478bd9Sstevel@tonic-gate } 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate /* 20737c478bd9Sstevel@tonic-gate * Finds the first lock that is mainly responsible for blocking this 20747c478bd9Sstevel@tonic-gate * request. If there is no such lock, request->l_flock.l_type is set to 20757c478bd9Sstevel@tonic-gate * F_UNLCK. Otherwise, request->l_flock is filled in with the particulars 20767c478bd9Sstevel@tonic-gate * of the blocking lock. 20777c478bd9Sstevel@tonic-gate * 20787c478bd9Sstevel@tonic-gate * Note: It is possible a request is blocked by a sleeping lock because 20797c478bd9Sstevel@tonic-gate * of the fairness policy used in flk_process_request() to construct the 20807c478bd9Sstevel@tonic-gate * dependencies. (see comments before flk_process_request()). 20817c478bd9Sstevel@tonic-gate */ 20827c478bd9Sstevel@tonic-gate 20837c478bd9Sstevel@tonic-gate static void 20847c478bd9Sstevel@tonic-gate flk_get_first_blocking_lock(lock_descriptor_t *request) 20857c478bd9Sstevel@tonic-gate { 20867c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 20877c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 20887c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *blocker; 20897c478bd9Sstevel@tonic-gate 20907c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 20917c478bd9Sstevel@tonic-gate blocker = NULL; 20927c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate if (lock) { 20957c478bd9Sstevel@tonic-gate do { 20967c478bd9Sstevel@tonic-gate if (BLOCKS(lock, request)) { 20977c478bd9Sstevel@tonic-gate blocker = lock; 20987c478bd9Sstevel@tonic-gate break; 20997c478bd9Sstevel@tonic-gate } 21007c478bd9Sstevel@tonic-gate lock = lock->l_next; 21017c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 21027c478bd9Sstevel@tonic-gate } 21037c478bd9Sstevel@tonic-gate 21047c478bd9Sstevel@tonic-gate if (blocker) { 21057c478bd9Sstevel@tonic-gate report_blocker(blocker, request); 21067c478bd9Sstevel@tonic-gate } else 21077c478bd9Sstevel@tonic-gate request->l_flock.l_type = F_UNLCK; 21087c478bd9Sstevel@tonic-gate } 21097c478bd9Sstevel@tonic-gate 21107c478bd9Sstevel@tonic-gate /* 21117c478bd9Sstevel@tonic-gate * Get the graph_t structure associated with a vnode. 21127c478bd9Sstevel@tonic-gate * If 'initialize' is non-zero, and the graph_t structure for this vnode has 21137c478bd9Sstevel@tonic-gate * not yet been initialized, then a new element is allocated and returned. 21147c478bd9Sstevel@tonic-gate */ 21157c478bd9Sstevel@tonic-gate graph_t * 21167c478bd9Sstevel@tonic-gate flk_get_lock_graph(vnode_t *vp, int initialize) 21177c478bd9Sstevel@tonic-gate { 21187c478bd9Sstevel@tonic-gate graph_t *gp; 21197c478bd9Sstevel@tonic-gate graph_t *gp_alloc = NULL; 21207c478bd9Sstevel@tonic-gate int index = HASH_INDEX(vp); 21217c478bd9Sstevel@tonic-gate 21227c478bd9Sstevel@tonic-gate if (initialize == FLK_USE_GRAPH) { 21237c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 21247c478bd9Sstevel@tonic-gate gp = lock_graph[index]; 21257c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 21267c478bd9Sstevel@tonic-gate return (gp); 21277c478bd9Sstevel@tonic-gate } 21287c478bd9Sstevel@tonic-gate 21297c478bd9Sstevel@tonic-gate ASSERT(initialize == FLK_INIT_GRAPH); 21307c478bd9Sstevel@tonic-gate 21317c478bd9Sstevel@tonic-gate if (lock_graph[index] == NULL) { 21327c478bd9Sstevel@tonic-gate 21337c478bd9Sstevel@tonic-gate gp_alloc = kmem_zalloc(sizeof (graph_t), KM_SLEEP); 21347c478bd9Sstevel@tonic-gate 21357c478bd9Sstevel@tonic-gate /* Initialize the graph */ 21367c478bd9Sstevel@tonic-gate 21377c478bd9Sstevel@tonic-gate gp_alloc->active_locks.l_next = 21387c478bd9Sstevel@tonic-gate gp_alloc->active_locks.l_prev = 21397c478bd9Sstevel@tonic-gate (lock_descriptor_t *)ACTIVE_HEAD(gp_alloc); 21407c478bd9Sstevel@tonic-gate gp_alloc->sleeping_locks.l_next = 21417c478bd9Sstevel@tonic-gate gp_alloc->sleeping_locks.l_prev = 21427c478bd9Sstevel@tonic-gate (lock_descriptor_t *)SLEEPING_HEAD(gp_alloc); 21437c478bd9Sstevel@tonic-gate gp_alloc->index = index; 21447c478bd9Sstevel@tonic-gate mutex_init(&gp_alloc->gp_mutex, NULL, MUTEX_DEFAULT, NULL); 21457c478bd9Sstevel@tonic-gate } 21467c478bd9Sstevel@tonic-gate 21477c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 21487c478bd9Sstevel@tonic-gate 21497c478bd9Sstevel@tonic-gate gp = lock_graph[index]; 21507c478bd9Sstevel@tonic-gate 21517c478bd9Sstevel@tonic-gate /* Recheck the value within flock_lock */ 21527c478bd9Sstevel@tonic-gate if (gp == NULL) { 21537c478bd9Sstevel@tonic-gate struct flock_globals *fg; 21547c478bd9Sstevel@tonic-gate 21557c478bd9Sstevel@tonic-gate /* We must have previously allocated the graph_t structure */ 21567c478bd9Sstevel@tonic-gate ASSERT(gp_alloc != NULL); 21577c478bd9Sstevel@tonic-gate lock_graph[index] = gp = gp_alloc; 21587c478bd9Sstevel@tonic-gate /* 21597c478bd9Sstevel@tonic-gate * The lockmgr status is only needed if KLM is loaded. 21607c478bd9Sstevel@tonic-gate */ 21617c478bd9Sstevel@tonic-gate if (flock_zone_key != ZONE_KEY_UNINITIALIZED) { 21627c478bd9Sstevel@tonic-gate fg = flk_get_globals(); 21637c478bd9Sstevel@tonic-gate fg->lockmgr_status[index] = fg->flk_lockmgr_status; 21647c478bd9Sstevel@tonic-gate } 21657c478bd9Sstevel@tonic-gate } 21667c478bd9Sstevel@tonic-gate 21677c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 21687c478bd9Sstevel@tonic-gate 21697c478bd9Sstevel@tonic-gate if ((gp_alloc != NULL) && (gp != gp_alloc)) { 21707c478bd9Sstevel@tonic-gate /* There was a race to allocate the graph_t and we lost */ 21717c478bd9Sstevel@tonic-gate mutex_destroy(&gp_alloc->gp_mutex); 21727c478bd9Sstevel@tonic-gate kmem_free(gp_alloc, sizeof (graph_t)); 21737c478bd9Sstevel@tonic-gate } 21747c478bd9Sstevel@tonic-gate 21757c478bd9Sstevel@tonic-gate return (gp); 21767c478bd9Sstevel@tonic-gate } 21777c478bd9Sstevel@tonic-gate 21787c478bd9Sstevel@tonic-gate /* 21797c478bd9Sstevel@tonic-gate * PSARC case 1997/292 21807c478bd9Sstevel@tonic-gate */ 21817c478bd9Sstevel@tonic-gate int 21827c478bd9Sstevel@tonic-gate cl_flk_has_remote_locks_for_nlmid(vnode_t *vp, int nlmid) 21837c478bd9Sstevel@tonic-gate { 21847c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 21857c478bd9Sstevel@tonic-gate int result = 0; 21867c478bd9Sstevel@tonic-gate graph_t *gp; 21877c478bd9Sstevel@tonic-gate int lock_nlmid; 21887c478bd9Sstevel@tonic-gate 21897c478bd9Sstevel@tonic-gate /* 21907c478bd9Sstevel@tonic-gate * Check to see if node is booted as a cluster. If not, return. 21917c478bd9Sstevel@tonic-gate */ 21927c478bd9Sstevel@tonic-gate if ((cluster_bootflags & CLUSTER_BOOTED) == 0) { 21937c478bd9Sstevel@tonic-gate return (0); 21947c478bd9Sstevel@tonic-gate } 21957c478bd9Sstevel@tonic-gate 21967c478bd9Sstevel@tonic-gate gp = flk_get_lock_graph(vp, FLK_USE_GRAPH); 21977c478bd9Sstevel@tonic-gate if (gp == NULL) { 21987c478bd9Sstevel@tonic-gate return (0); 21997c478bd9Sstevel@tonic-gate } 22007c478bd9Sstevel@tonic-gate 22017c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 22027c478bd9Sstevel@tonic-gate 22037c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 22047c478bd9Sstevel@tonic-gate 22057c478bd9Sstevel@tonic-gate if (lock) { 22067c478bd9Sstevel@tonic-gate while (lock->l_vnode == vp) { 22077c478bd9Sstevel@tonic-gate /* get NLM id from sysid */ 22087c478bd9Sstevel@tonic-gate lock_nlmid = GETNLMID(lock->l_flock.l_sysid); 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate /* 22117c478bd9Sstevel@tonic-gate * If NLM server request _and_ nlmid of lock matches 22127c478bd9Sstevel@tonic-gate * nlmid of argument, then we've found a remote lock. 22137c478bd9Sstevel@tonic-gate */ 22147c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) { 22157c478bd9Sstevel@tonic-gate result = 1; 22167c478bd9Sstevel@tonic-gate goto done; 22177c478bd9Sstevel@tonic-gate } 22187c478bd9Sstevel@tonic-gate lock = lock->l_next; 22197c478bd9Sstevel@tonic-gate } 22207c478bd9Sstevel@tonic-gate } 22217c478bd9Sstevel@tonic-gate 22227c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 22237c478bd9Sstevel@tonic-gate 22247c478bd9Sstevel@tonic-gate if (lock) { 22257c478bd9Sstevel@tonic-gate while (lock->l_vnode == vp) { 22267c478bd9Sstevel@tonic-gate /* get NLM id from sysid */ 22277c478bd9Sstevel@tonic-gate lock_nlmid = GETNLMID(lock->l_flock.l_sysid); 22287c478bd9Sstevel@tonic-gate 22297c478bd9Sstevel@tonic-gate /* 22307c478bd9Sstevel@tonic-gate * If NLM server request _and_ nlmid of lock matches 22317c478bd9Sstevel@tonic-gate * nlmid of argument, then we've found a remote lock. 22327c478bd9Sstevel@tonic-gate */ 22337c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) { 22347c478bd9Sstevel@tonic-gate result = 1; 22357c478bd9Sstevel@tonic-gate goto done; 22367c478bd9Sstevel@tonic-gate } 22377c478bd9Sstevel@tonic-gate lock = lock->l_next; 22387c478bd9Sstevel@tonic-gate } 22397c478bd9Sstevel@tonic-gate } 22407c478bd9Sstevel@tonic-gate 22417c478bd9Sstevel@tonic-gate done: 22427c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 22437c478bd9Sstevel@tonic-gate return (result); 22447c478bd9Sstevel@tonic-gate } 22457c478bd9Sstevel@tonic-gate 22467c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 22477c478bd9Sstevel@tonic-gate /* 22487c478bd9Sstevel@tonic-gate * Determine whether there are any locks for the given vnode with a remote 22497c478bd9Sstevel@tonic-gate * sysid. Returns zero if not, non-zero if there are. 22507c478bd9Sstevel@tonic-gate * 22517c478bd9Sstevel@tonic-gate * Note that the return value from this function is potentially invalid 22527c478bd9Sstevel@tonic-gate * once it has been returned. The caller is responsible for providing its 22537c478bd9Sstevel@tonic-gate * own synchronization mechanism to ensure that the return value is useful 22547c478bd9Sstevel@tonic-gate * (e.g., see nfs_lockcompletion()). 22557c478bd9Sstevel@tonic-gate */ 22567c478bd9Sstevel@tonic-gate int 22577c478bd9Sstevel@tonic-gate flk_has_remote_locks(vnode_t *vp) 22587c478bd9Sstevel@tonic-gate { 22597c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 22607c478bd9Sstevel@tonic-gate int result = 0; 22617c478bd9Sstevel@tonic-gate graph_t *gp; 22627c478bd9Sstevel@tonic-gate 22637c478bd9Sstevel@tonic-gate gp = flk_get_lock_graph(vp, FLK_USE_GRAPH); 22647c478bd9Sstevel@tonic-gate if (gp == NULL) { 22657c478bd9Sstevel@tonic-gate return (0); 22667c478bd9Sstevel@tonic-gate } 22677c478bd9Sstevel@tonic-gate 22687c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 22697c478bd9Sstevel@tonic-gate 22707c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 22717c478bd9Sstevel@tonic-gate 22727c478bd9Sstevel@tonic-gate if (lock) { 22737c478bd9Sstevel@tonic-gate while (lock->l_vnode == vp) { 22747c478bd9Sstevel@tonic-gate if (IS_REMOTE(lock)) { 22757c478bd9Sstevel@tonic-gate result = 1; 22767c478bd9Sstevel@tonic-gate goto done; 22777c478bd9Sstevel@tonic-gate } 22787c478bd9Sstevel@tonic-gate lock = lock->l_next; 22797c478bd9Sstevel@tonic-gate } 22807c478bd9Sstevel@tonic-gate } 22817c478bd9Sstevel@tonic-gate 22827c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 22837c478bd9Sstevel@tonic-gate 22847c478bd9Sstevel@tonic-gate if (lock) { 22857c478bd9Sstevel@tonic-gate while (lock->l_vnode == vp) { 22867c478bd9Sstevel@tonic-gate if (IS_REMOTE(lock)) { 22877c478bd9Sstevel@tonic-gate result = 1; 22887c478bd9Sstevel@tonic-gate goto done; 22897c478bd9Sstevel@tonic-gate } 22907c478bd9Sstevel@tonic-gate lock = lock->l_next; 22917c478bd9Sstevel@tonic-gate } 22927c478bd9Sstevel@tonic-gate } 22937c478bd9Sstevel@tonic-gate 22947c478bd9Sstevel@tonic-gate done: 22957c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 22967c478bd9Sstevel@tonic-gate return (result); 22977c478bd9Sstevel@tonic-gate } 22987c478bd9Sstevel@tonic-gate 22997c478bd9Sstevel@tonic-gate /* 23007c478bd9Sstevel@tonic-gate * Determine if there are any locks owned by the given sysid. 23017c478bd9Sstevel@tonic-gate * Returns zero if not, non-zero if there are. Note that this return code 23027c478bd9Sstevel@tonic-gate * could be derived from flk_get_{sleeping,active}_locks, but this routine 23037c478bd9Sstevel@tonic-gate * avoids all the memory allocations of those routines. 23047c478bd9Sstevel@tonic-gate * 23057c478bd9Sstevel@tonic-gate * This routine has the same synchronization issues as 23067c478bd9Sstevel@tonic-gate * flk_has_remote_locks. 23077c478bd9Sstevel@tonic-gate */ 23087c478bd9Sstevel@tonic-gate 23097c478bd9Sstevel@tonic-gate int 23107c478bd9Sstevel@tonic-gate flk_sysid_has_locks(int sysid, int lck_type) 23117c478bd9Sstevel@tonic-gate { 23127c478bd9Sstevel@tonic-gate int has_locks = 0; 23137c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 23147c478bd9Sstevel@tonic-gate graph_t *gp; 23157c478bd9Sstevel@tonic-gate int i; 23167c478bd9Sstevel@tonic-gate 23177c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE && !has_locks; i++) { 23187c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 23197c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 23207c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 23217c478bd9Sstevel@tonic-gate if (gp == NULL) { 23227c478bd9Sstevel@tonic-gate continue; 23237c478bd9Sstevel@tonic-gate } 23247c478bd9Sstevel@tonic-gate 23257c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 23267c478bd9Sstevel@tonic-gate 23277c478bd9Sstevel@tonic-gate if (lck_type & FLK_QUERY_ACTIVE) { 23287c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; 23297c478bd9Sstevel@tonic-gate lock != ACTIVE_HEAD(gp) && !has_locks; 23307c478bd9Sstevel@tonic-gate lock = lock->l_next) { 23317c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) 23327c478bd9Sstevel@tonic-gate has_locks = 1; 23337c478bd9Sstevel@tonic-gate } 23347c478bd9Sstevel@tonic-gate } 23357c478bd9Sstevel@tonic-gate 23367c478bd9Sstevel@tonic-gate if (lck_type & FLK_QUERY_SLEEPING) { 23377c478bd9Sstevel@tonic-gate for (lock = SLEEPING_HEAD(gp)->l_next; 23387c478bd9Sstevel@tonic-gate lock != SLEEPING_HEAD(gp) && !has_locks; 23397c478bd9Sstevel@tonic-gate lock = lock->l_next) { 23407c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) 23417c478bd9Sstevel@tonic-gate has_locks = 1; 23427c478bd9Sstevel@tonic-gate } 23437c478bd9Sstevel@tonic-gate } 23447c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 23457c478bd9Sstevel@tonic-gate } 23467c478bd9Sstevel@tonic-gate 23477c478bd9Sstevel@tonic-gate return (has_locks); 23487c478bd9Sstevel@tonic-gate } 23497c478bd9Sstevel@tonic-gate 23507c478bd9Sstevel@tonic-gate 23517c478bd9Sstevel@tonic-gate /* 23527c478bd9Sstevel@tonic-gate * PSARC case 1997/292 23537c478bd9Sstevel@tonic-gate * 23547c478bd9Sstevel@tonic-gate * Requires: "sysid" is a pair [nlmid, sysid]. The lower half is 16-bit 23557c478bd9Sstevel@tonic-gate * quantity, the real sysid generated by the NLM server; the upper half 23567c478bd9Sstevel@tonic-gate * identifies the node of the cluster where the NLM server ran. 23577c478bd9Sstevel@tonic-gate * This routine is only called by an NLM server running in a cluster. 23587c478bd9Sstevel@tonic-gate * Effects: Remove all locks held on behalf of the client identified 23597c478bd9Sstevel@tonic-gate * by "sysid." 23607c478bd9Sstevel@tonic-gate */ 23617c478bd9Sstevel@tonic-gate void 23627c478bd9Sstevel@tonic-gate cl_flk_remove_locks_by_sysid(int sysid) 23637c478bd9Sstevel@tonic-gate { 23647c478bd9Sstevel@tonic-gate graph_t *gp; 23657c478bd9Sstevel@tonic-gate int i; 23667c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *nlock; 23677c478bd9Sstevel@tonic-gate 23687c478bd9Sstevel@tonic-gate /* 23697c478bd9Sstevel@tonic-gate * Check to see if node is booted as a cluster. If not, return. 23707c478bd9Sstevel@tonic-gate */ 23717c478bd9Sstevel@tonic-gate if ((cluster_bootflags & CLUSTER_BOOTED) == 0) { 23727c478bd9Sstevel@tonic-gate return; 23737c478bd9Sstevel@tonic-gate } 23747c478bd9Sstevel@tonic-gate 23757c478bd9Sstevel@tonic-gate ASSERT(sysid != 0); 23767c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 23777c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 23787c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 23797c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 23807c478bd9Sstevel@tonic-gate 23817c478bd9Sstevel@tonic-gate if (gp == NULL) 23827c478bd9Sstevel@tonic-gate continue; 23837c478bd9Sstevel@tonic-gate 23847c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); /* get mutex on lock graph */ 23857c478bd9Sstevel@tonic-gate 23867c478bd9Sstevel@tonic-gate /* signal sleeping requests so that they bail out */ 23877c478bd9Sstevel@tonic-gate lock = SLEEPING_HEAD(gp)->l_next; 23887c478bd9Sstevel@tonic-gate while (lock != SLEEPING_HEAD(gp)) { 23897c478bd9Sstevel@tonic-gate nlock = lock->l_next; 23907c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) { 23917c478bd9Sstevel@tonic-gate INTERRUPT_WAKEUP(lock); 23927c478bd9Sstevel@tonic-gate } 23937c478bd9Sstevel@tonic-gate lock = nlock; 23947c478bd9Sstevel@tonic-gate } 23957c478bd9Sstevel@tonic-gate 23967c478bd9Sstevel@tonic-gate /* delete active locks */ 23977c478bd9Sstevel@tonic-gate lock = ACTIVE_HEAD(gp)->l_next; 23987c478bd9Sstevel@tonic-gate while (lock != ACTIVE_HEAD(gp)) { 23997c478bd9Sstevel@tonic-gate nlock = lock->l_next; 24007c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) { 24017c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 24027c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 24037c478bd9Sstevel@tonic-gate flk_free_lock(lock); 24047c478bd9Sstevel@tonic-gate } 24057c478bd9Sstevel@tonic-gate lock = nlock; 24067c478bd9Sstevel@tonic-gate } 24077c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); /* release mutex on lock graph */ 24087c478bd9Sstevel@tonic-gate } 24097c478bd9Sstevel@tonic-gate } 24107c478bd9Sstevel@tonic-gate 24117c478bd9Sstevel@tonic-gate /* 24127c478bd9Sstevel@tonic-gate * Delete all locks in the system that belongs to the sysid of the request. 24137c478bd9Sstevel@tonic-gate */ 24147c478bd9Sstevel@tonic-gate 24157c478bd9Sstevel@tonic-gate static void 24167c478bd9Sstevel@tonic-gate flk_delete_locks_by_sysid(lock_descriptor_t *request) 24177c478bd9Sstevel@tonic-gate { 24187c478bd9Sstevel@tonic-gate int sysid = request->l_flock.l_sysid; 24197c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *nlock; 24207c478bd9Sstevel@tonic-gate graph_t *gp; 24217c478bd9Sstevel@tonic-gate int i; 24227c478bd9Sstevel@tonic-gate 24237c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&request->l_graph->gp_mutex)); 24247c478bd9Sstevel@tonic-gate ASSERT(sysid != 0); 24257c478bd9Sstevel@tonic-gate 24267c478bd9Sstevel@tonic-gate mutex_exit(&request->l_graph->gp_mutex); 24277c478bd9Sstevel@tonic-gate 24287c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 24297c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 24307c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 24317c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 24327c478bd9Sstevel@tonic-gate 24337c478bd9Sstevel@tonic-gate if (gp == NULL) 24347c478bd9Sstevel@tonic-gate continue; 24357c478bd9Sstevel@tonic-gate 24367c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 24377c478bd9Sstevel@tonic-gate 24387c478bd9Sstevel@tonic-gate /* signal sleeping requests so that they bail out */ 24397c478bd9Sstevel@tonic-gate lock = SLEEPING_HEAD(gp)->l_next; 24407c478bd9Sstevel@tonic-gate while (lock != SLEEPING_HEAD(gp)) { 24417c478bd9Sstevel@tonic-gate nlock = lock->l_next; 24427c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) { 24437c478bd9Sstevel@tonic-gate INTERRUPT_WAKEUP(lock); 24447c478bd9Sstevel@tonic-gate } 24457c478bd9Sstevel@tonic-gate lock = nlock; 24467c478bd9Sstevel@tonic-gate } 24477c478bd9Sstevel@tonic-gate 24487c478bd9Sstevel@tonic-gate /* delete active locks */ 24497c478bd9Sstevel@tonic-gate lock = ACTIVE_HEAD(gp)->l_next; 24507c478bd9Sstevel@tonic-gate while (lock != ACTIVE_HEAD(gp)) { 24517c478bd9Sstevel@tonic-gate nlock = lock->l_next; 24527c478bd9Sstevel@tonic-gate if (lock->l_flock.l_sysid == sysid) { 24537c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 24547c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 24557c478bd9Sstevel@tonic-gate flk_free_lock(lock); 24567c478bd9Sstevel@tonic-gate } 24577c478bd9Sstevel@tonic-gate lock = nlock; 24587c478bd9Sstevel@tonic-gate } 24597c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 24607c478bd9Sstevel@tonic-gate } 24617c478bd9Sstevel@tonic-gate 24627c478bd9Sstevel@tonic-gate mutex_enter(&request->l_graph->gp_mutex); 24637c478bd9Sstevel@tonic-gate } 24647c478bd9Sstevel@tonic-gate 24657c478bd9Sstevel@tonic-gate /* 24667c478bd9Sstevel@tonic-gate * Clustering: Deletes PXFS locks 24677c478bd9Sstevel@tonic-gate * Effects: Delete all locks on files in the given file system and with the 24687c478bd9Sstevel@tonic-gate * given PXFS id. 24697c478bd9Sstevel@tonic-gate */ 24707c478bd9Sstevel@tonic-gate void 24717c478bd9Sstevel@tonic-gate cl_flk_delete_pxfs_locks(struct vfs *vfsp, int pxfsid) 24727c478bd9Sstevel@tonic-gate { 24737c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *nlock; 24747c478bd9Sstevel@tonic-gate graph_t *gp; 24757c478bd9Sstevel@tonic-gate int i; 24767c478bd9Sstevel@tonic-gate 24777c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 24787c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 24797c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 24807c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 24817c478bd9Sstevel@tonic-gate 24827c478bd9Sstevel@tonic-gate if (gp == NULL) 24837c478bd9Sstevel@tonic-gate continue; 24847c478bd9Sstevel@tonic-gate 24857c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 24867c478bd9Sstevel@tonic-gate 24877c478bd9Sstevel@tonic-gate /* signal sleeping requests so that they bail out */ 24887c478bd9Sstevel@tonic-gate lock = SLEEPING_HEAD(gp)->l_next; 24897c478bd9Sstevel@tonic-gate while (lock != SLEEPING_HEAD(gp)) { 24907c478bd9Sstevel@tonic-gate nlock = lock->l_next; 24917c478bd9Sstevel@tonic-gate if (lock->l_vnode->v_vfsp == vfsp) { 24927c478bd9Sstevel@tonic-gate ASSERT(IS_PXFS(lock)); 24937c478bd9Sstevel@tonic-gate if (GETPXFSID(lock->l_flock.l_sysid) == 24947c478bd9Sstevel@tonic-gate pxfsid) { 24957c478bd9Sstevel@tonic-gate flk_set_state(lock, 24967c478bd9Sstevel@tonic-gate FLK_CANCELLED_STATE); 24977c478bd9Sstevel@tonic-gate flk_cancel_sleeping_lock(lock, 1); 24987c478bd9Sstevel@tonic-gate } 24997c478bd9Sstevel@tonic-gate } 25007c478bd9Sstevel@tonic-gate lock = nlock; 25017c478bd9Sstevel@tonic-gate } 25027c478bd9Sstevel@tonic-gate 25037c478bd9Sstevel@tonic-gate /* delete active locks */ 25047c478bd9Sstevel@tonic-gate lock = ACTIVE_HEAD(gp)->l_next; 25057c478bd9Sstevel@tonic-gate while (lock != ACTIVE_HEAD(gp)) { 25067c478bd9Sstevel@tonic-gate nlock = lock->l_next; 25077c478bd9Sstevel@tonic-gate if (lock->l_vnode->v_vfsp == vfsp) { 25087c478bd9Sstevel@tonic-gate ASSERT(IS_PXFS(lock)); 25097c478bd9Sstevel@tonic-gate if (GETPXFSID(lock->l_flock.l_sysid) == 25107c478bd9Sstevel@tonic-gate pxfsid) { 25117c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 25127c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 25137c478bd9Sstevel@tonic-gate flk_free_lock(lock); 25147c478bd9Sstevel@tonic-gate } 25157c478bd9Sstevel@tonic-gate } 25167c478bd9Sstevel@tonic-gate lock = nlock; 25177c478bd9Sstevel@tonic-gate } 25187c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 25197c478bd9Sstevel@tonic-gate } 25207c478bd9Sstevel@tonic-gate } 25217c478bd9Sstevel@tonic-gate 25227c478bd9Sstevel@tonic-gate /* 25237c478bd9Sstevel@tonic-gate * Search for a sleeping lock manager lock which matches exactly this lock 25247c478bd9Sstevel@tonic-gate * request; if one is found, fake a signal to cancel it. 25257c478bd9Sstevel@tonic-gate * 25267c478bd9Sstevel@tonic-gate * Return 1 if a matching lock was found, 0 otherwise. 25277c478bd9Sstevel@tonic-gate */ 25287c478bd9Sstevel@tonic-gate 25297c478bd9Sstevel@tonic-gate static int 25307c478bd9Sstevel@tonic-gate flk_canceled(lock_descriptor_t *request) 25317c478bd9Sstevel@tonic-gate { 25327c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *nlock; 25337c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 25347c478bd9Sstevel@tonic-gate vnode_t *vp = request->l_vnode; 25357c478bd9Sstevel@tonic-gate 25367c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 25377c478bd9Sstevel@tonic-gate ASSERT(IS_LOCKMGR(request)); 25387c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 25397c478bd9Sstevel@tonic-gate 25407c478bd9Sstevel@tonic-gate if (lock) { 25417c478bd9Sstevel@tonic-gate while (lock->l_vnode == vp) { 25427c478bd9Sstevel@tonic-gate nlock = lock->l_next; 25437c478bd9Sstevel@tonic-gate if (SAME_OWNER(lock, request) && 25447c478bd9Sstevel@tonic-gate lock->l_start == request->l_start && 25457c478bd9Sstevel@tonic-gate lock->l_end == request->l_end) { 25467c478bd9Sstevel@tonic-gate INTERRUPT_WAKEUP(lock); 25477c478bd9Sstevel@tonic-gate return (1); 25487c478bd9Sstevel@tonic-gate } 25497c478bd9Sstevel@tonic-gate lock = nlock; 25507c478bd9Sstevel@tonic-gate } 25517c478bd9Sstevel@tonic-gate } 25527c478bd9Sstevel@tonic-gate return (0); 25537c478bd9Sstevel@tonic-gate } 25547c478bd9Sstevel@tonic-gate 25557c478bd9Sstevel@tonic-gate /* 25567c478bd9Sstevel@tonic-gate * Remove all the locks for the vnode belonging to the given pid and sysid. 25577c478bd9Sstevel@tonic-gate */ 25587c478bd9Sstevel@tonic-gate 25597c478bd9Sstevel@tonic-gate void 25607c478bd9Sstevel@tonic-gate cleanlocks(vnode_t *vp, pid_t pid, int sysid) 25617c478bd9Sstevel@tonic-gate { 25627c478bd9Sstevel@tonic-gate graph_t *gp; 25637c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *nlock; 25647c478bd9Sstevel@tonic-gate lock_descriptor_t *link_stack; 25657c478bd9Sstevel@tonic-gate 25667c478bd9Sstevel@tonic-gate STACK_INIT(link_stack); 25677c478bd9Sstevel@tonic-gate 25687c478bd9Sstevel@tonic-gate gp = flk_get_lock_graph(vp, FLK_USE_GRAPH); 25697c478bd9Sstevel@tonic-gate 25707c478bd9Sstevel@tonic-gate if (gp == NULL) 25717c478bd9Sstevel@tonic-gate return; 25727c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 25737c478bd9Sstevel@tonic-gate 25747c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 25757c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 25767c478bd9Sstevel@tonic-gate 25777c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 25787c478bd9Sstevel@tonic-gate 25797c478bd9Sstevel@tonic-gate if (lock) { 25807c478bd9Sstevel@tonic-gate do { 25817c478bd9Sstevel@tonic-gate nlock = lock->l_next; 25827c478bd9Sstevel@tonic-gate if ((lock->l_flock.l_pid == pid || 25837c478bd9Sstevel@tonic-gate pid == IGN_PID) && 25847c478bd9Sstevel@tonic-gate lock->l_flock.l_sysid == sysid) { 25857c478bd9Sstevel@tonic-gate CANCEL_WAKEUP(lock); 25867c478bd9Sstevel@tonic-gate } 25877c478bd9Sstevel@tonic-gate lock = nlock; 25887c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 25897c478bd9Sstevel@tonic-gate } 25907c478bd9Sstevel@tonic-gate 25917c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 25927c478bd9Sstevel@tonic-gate 25937c478bd9Sstevel@tonic-gate if (lock) { 25947c478bd9Sstevel@tonic-gate do { 25957c478bd9Sstevel@tonic-gate nlock = lock->l_next; 25967c478bd9Sstevel@tonic-gate if ((lock->l_flock.l_pid == pid || 25977c478bd9Sstevel@tonic-gate pid == IGN_PID) && 25987c478bd9Sstevel@tonic-gate lock->l_flock.l_sysid == sysid) { 25997c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 26007c478bd9Sstevel@tonic-gate STACK_PUSH(link_stack, lock, l_stack); 26017c478bd9Sstevel@tonic-gate } 26027c478bd9Sstevel@tonic-gate lock = nlock; 26037c478bd9Sstevel@tonic-gate } while (lock->l_vnode == vp); 26047c478bd9Sstevel@tonic-gate } 26057c478bd9Sstevel@tonic-gate 26067c478bd9Sstevel@tonic-gate while ((lock = STACK_TOP(link_stack)) != NULL) { 26077c478bd9Sstevel@tonic-gate STACK_POP(link_stack, l_stack); 26087c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 26097c478bd9Sstevel@tonic-gate flk_free_lock(lock); 26107c478bd9Sstevel@tonic-gate } 26117c478bd9Sstevel@tonic-gate 26127c478bd9Sstevel@tonic-gate CHECK_SLEEPING_LOCKS(gp); 26137c478bd9Sstevel@tonic-gate CHECK_ACTIVE_LOCKS(gp); 26147c478bd9Sstevel@tonic-gate CHECK_OWNER_LOCKS(gp, pid, sysid, vp); 26157c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 26167c478bd9Sstevel@tonic-gate } 26177c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate 26207c478bd9Sstevel@tonic-gate /* 26217c478bd9Sstevel@tonic-gate * Called from 'fs' read and write routines for files that have mandatory 26227c478bd9Sstevel@tonic-gate * locking enabled. 26237c478bd9Sstevel@tonic-gate */ 26247c478bd9Sstevel@tonic-gate 26257c478bd9Sstevel@tonic-gate int 26267c478bd9Sstevel@tonic-gate chklock( 26277c478bd9Sstevel@tonic-gate struct vnode *vp, 26287c478bd9Sstevel@tonic-gate int iomode, 26297c478bd9Sstevel@tonic-gate u_offset_t offset, 26307c478bd9Sstevel@tonic-gate ssize_t len, 26317c478bd9Sstevel@tonic-gate int fmode, 26327c478bd9Sstevel@tonic-gate caller_context_t *ct) 26337c478bd9Sstevel@tonic-gate { 26347c478bd9Sstevel@tonic-gate register int i; 26357c478bd9Sstevel@tonic-gate struct flock64 bf; 26367c478bd9Sstevel@tonic-gate int error = 0; 26377c478bd9Sstevel@tonic-gate 26387c478bd9Sstevel@tonic-gate bf.l_type = (iomode & FWRITE) ? F_WRLCK : F_RDLCK; 26397c478bd9Sstevel@tonic-gate bf.l_whence = 0; 26407c478bd9Sstevel@tonic-gate bf.l_start = offset; 26417c478bd9Sstevel@tonic-gate bf.l_len = len; 26427c478bd9Sstevel@tonic-gate if (ct == NULL) { 26437c478bd9Sstevel@tonic-gate bf.l_pid = curproc->p_pid; 26447c478bd9Sstevel@tonic-gate bf.l_sysid = 0; 26457c478bd9Sstevel@tonic-gate } else { 26467c478bd9Sstevel@tonic-gate bf.l_pid = ct->cc_pid; 26477c478bd9Sstevel@tonic-gate bf.l_sysid = ct->cc_sysid; 26487c478bd9Sstevel@tonic-gate } 26497c478bd9Sstevel@tonic-gate i = (fmode & (FNDELAY|FNONBLOCK)) ? INOFLCK : INOFLCK|SLPFLCK; 26507c478bd9Sstevel@tonic-gate if ((i = reclock(vp, &bf, i, 0, offset, NULL)) != 0 || 26517c478bd9Sstevel@tonic-gate bf.l_type != F_UNLCK) 26527c478bd9Sstevel@tonic-gate error = i ? i : EAGAIN; 26537c478bd9Sstevel@tonic-gate return (error); 26547c478bd9Sstevel@tonic-gate } 26557c478bd9Sstevel@tonic-gate 26567c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 26577c478bd9Sstevel@tonic-gate /* 26587c478bd9Sstevel@tonic-gate * convoff - converts the given data (start, whence) to the 26597c478bd9Sstevel@tonic-gate * given whence. 26607c478bd9Sstevel@tonic-gate */ 26617c478bd9Sstevel@tonic-gate int 26627c478bd9Sstevel@tonic-gate convoff(vp, lckdat, whence, offset) 26637c478bd9Sstevel@tonic-gate struct vnode *vp; 26647c478bd9Sstevel@tonic-gate struct flock64 *lckdat; 26657c478bd9Sstevel@tonic-gate int whence; 26667c478bd9Sstevel@tonic-gate offset_t offset; 26677c478bd9Sstevel@tonic-gate { 26687c478bd9Sstevel@tonic-gate int error; 26697c478bd9Sstevel@tonic-gate struct vattr vattr; 26707c478bd9Sstevel@tonic-gate 26717c478bd9Sstevel@tonic-gate if ((lckdat->l_whence == 2) || (whence == 2)) { 26727c478bd9Sstevel@tonic-gate vattr.va_mask = AT_SIZE; 2673*da6c28aaSamw if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL)) 26747c478bd9Sstevel@tonic-gate return (error); 26757c478bd9Sstevel@tonic-gate } 26767c478bd9Sstevel@tonic-gate 26777c478bd9Sstevel@tonic-gate switch (lckdat->l_whence) { 26787c478bd9Sstevel@tonic-gate case 1: 26797c478bd9Sstevel@tonic-gate lckdat->l_start += offset; 26807c478bd9Sstevel@tonic-gate break; 26817c478bd9Sstevel@tonic-gate case 2: 26827c478bd9Sstevel@tonic-gate lckdat->l_start += vattr.va_size; 26837c478bd9Sstevel@tonic-gate /* FALLTHRU */ 26847c478bd9Sstevel@tonic-gate case 0: 26857c478bd9Sstevel@tonic-gate break; 26867c478bd9Sstevel@tonic-gate default: 26877c478bd9Sstevel@tonic-gate return (EINVAL); 26887c478bd9Sstevel@tonic-gate } 26897c478bd9Sstevel@tonic-gate 26907c478bd9Sstevel@tonic-gate if (lckdat->l_start < 0) 26917c478bd9Sstevel@tonic-gate return (EINVAL); 26927c478bd9Sstevel@tonic-gate 26937c478bd9Sstevel@tonic-gate switch (whence) { 26947c478bd9Sstevel@tonic-gate case 1: 26957c478bd9Sstevel@tonic-gate lckdat->l_start -= offset; 26967c478bd9Sstevel@tonic-gate break; 26977c478bd9Sstevel@tonic-gate case 2: 26987c478bd9Sstevel@tonic-gate lckdat->l_start -= vattr.va_size; 26997c478bd9Sstevel@tonic-gate /* FALLTHRU */ 27007c478bd9Sstevel@tonic-gate case 0: 27017c478bd9Sstevel@tonic-gate break; 27027c478bd9Sstevel@tonic-gate default: 27037c478bd9Sstevel@tonic-gate return (EINVAL); 27047c478bd9Sstevel@tonic-gate } 27057c478bd9Sstevel@tonic-gate 27067c478bd9Sstevel@tonic-gate lckdat->l_whence = (short)whence; 27077c478bd9Sstevel@tonic-gate return (0); 27087c478bd9Sstevel@tonic-gate } 27097c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 27107c478bd9Sstevel@tonic-gate 27117c478bd9Sstevel@tonic-gate 27127c478bd9Sstevel@tonic-gate /* proc_graph function definitions */ 27137c478bd9Sstevel@tonic-gate 27147c478bd9Sstevel@tonic-gate /* 27157c478bd9Sstevel@tonic-gate * Function checks for deadlock due to the new 'lock'. If deadlock found 27167c478bd9Sstevel@tonic-gate * edges of this lock are freed and returned. 27177c478bd9Sstevel@tonic-gate */ 27187c478bd9Sstevel@tonic-gate 27197c478bd9Sstevel@tonic-gate static int 27207c478bd9Sstevel@tonic-gate flk_check_deadlock(lock_descriptor_t *lock) 27217c478bd9Sstevel@tonic-gate { 27227c478bd9Sstevel@tonic-gate proc_vertex_t *start_vertex, *pvertex; 27237c478bd9Sstevel@tonic-gate proc_vertex_t *dvertex; 27247c478bd9Sstevel@tonic-gate proc_edge_t *pep, *ppep; 27257c478bd9Sstevel@tonic-gate edge_t *ep, *nep; 27267c478bd9Sstevel@tonic-gate proc_vertex_t *process_stack; 27277c478bd9Sstevel@tonic-gate 27287c478bd9Sstevel@tonic-gate STACK_INIT(process_stack); 27297c478bd9Sstevel@tonic-gate 27307c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 27317c478bd9Sstevel@tonic-gate start_vertex = flk_get_proc_vertex(lock); 27327c478bd9Sstevel@tonic-gate ASSERT(start_vertex != NULL); 27337c478bd9Sstevel@tonic-gate 27347c478bd9Sstevel@tonic-gate /* construct the edges from this process to other processes */ 27357c478bd9Sstevel@tonic-gate 27367c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(lock); 27377c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 27387c478bd9Sstevel@tonic-gate proc_vertex_t *adj_proc; 27397c478bd9Sstevel@tonic-gate 27407c478bd9Sstevel@tonic-gate adj_proc = flk_get_proc_vertex(ep->to_vertex); 27417c478bd9Sstevel@tonic-gate for (pep = start_vertex->edge; pep != NULL; pep = pep->next) { 27427c478bd9Sstevel@tonic-gate if (pep->to_proc == adj_proc) { 27437c478bd9Sstevel@tonic-gate ASSERT(pep->refcount); 27447c478bd9Sstevel@tonic-gate pep->refcount++; 27457c478bd9Sstevel@tonic-gate break; 27467c478bd9Sstevel@tonic-gate } 27477c478bd9Sstevel@tonic-gate } 27487c478bd9Sstevel@tonic-gate if (pep == NULL) { 27497c478bd9Sstevel@tonic-gate pep = flk_get_proc_edge(); 27507c478bd9Sstevel@tonic-gate pep->to_proc = adj_proc; 27517c478bd9Sstevel@tonic-gate pep->refcount = 1; 27527c478bd9Sstevel@tonic-gate adj_proc->incount++; 27537c478bd9Sstevel@tonic-gate pep->next = start_vertex->edge; 27547c478bd9Sstevel@tonic-gate start_vertex->edge = pep; 27557c478bd9Sstevel@tonic-gate } 27567c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep); 27577c478bd9Sstevel@tonic-gate } 27587c478bd9Sstevel@tonic-gate 27597c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 27607c478bd9Sstevel@tonic-gate 27617c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 27627c478bd9Sstevel@tonic-gate proc_vertex_t *in_proc; 27637c478bd9Sstevel@tonic-gate 27647c478bd9Sstevel@tonic-gate in_proc = flk_get_proc_vertex(ep->from_vertex); 27657c478bd9Sstevel@tonic-gate 27667c478bd9Sstevel@tonic-gate for (pep = in_proc->edge; pep != NULL; pep = pep->next) { 27677c478bd9Sstevel@tonic-gate if (pep->to_proc == start_vertex) { 27687c478bd9Sstevel@tonic-gate ASSERT(pep->refcount); 27697c478bd9Sstevel@tonic-gate pep->refcount++; 27707c478bd9Sstevel@tonic-gate break; 27717c478bd9Sstevel@tonic-gate } 27727c478bd9Sstevel@tonic-gate } 27737c478bd9Sstevel@tonic-gate if (pep == NULL) { 27747c478bd9Sstevel@tonic-gate pep = flk_get_proc_edge(); 27757c478bd9Sstevel@tonic-gate pep->to_proc = start_vertex; 27767c478bd9Sstevel@tonic-gate pep->refcount = 1; 27777c478bd9Sstevel@tonic-gate start_vertex->incount++; 27787c478bd9Sstevel@tonic-gate pep->next = in_proc->edge; 27797c478bd9Sstevel@tonic-gate in_proc->edge = pep; 27807c478bd9Sstevel@tonic-gate } 27817c478bd9Sstevel@tonic-gate ep = NEXT_IN(ep); 27827c478bd9Sstevel@tonic-gate } 27837c478bd9Sstevel@tonic-gate 27847c478bd9Sstevel@tonic-gate if (start_vertex->incount == 0) { 27857c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 27867c478bd9Sstevel@tonic-gate return (0); 27877c478bd9Sstevel@tonic-gate } 27887c478bd9Sstevel@tonic-gate 27897c478bd9Sstevel@tonic-gate flk_proc_graph_uncolor(); 27907c478bd9Sstevel@tonic-gate 27917c478bd9Sstevel@tonic-gate start_vertex->p_sedge = start_vertex->edge; 27927c478bd9Sstevel@tonic-gate 27937c478bd9Sstevel@tonic-gate STACK_PUSH(process_stack, start_vertex, p_stack); 27947c478bd9Sstevel@tonic-gate 27957c478bd9Sstevel@tonic-gate while ((pvertex = STACK_TOP(process_stack)) != NULL) { 27967c478bd9Sstevel@tonic-gate for (pep = pvertex->p_sedge; pep != NULL; pep = pep->next) { 27977c478bd9Sstevel@tonic-gate dvertex = pep->to_proc; 27987c478bd9Sstevel@tonic-gate if (!PROC_ARRIVED(dvertex)) { 27997c478bd9Sstevel@tonic-gate STACK_PUSH(process_stack, dvertex, p_stack); 28007c478bd9Sstevel@tonic-gate dvertex->p_sedge = dvertex->edge; 28017c478bd9Sstevel@tonic-gate PROC_ARRIVE(pvertex); 28027c478bd9Sstevel@tonic-gate pvertex->p_sedge = pep->next; 28037c478bd9Sstevel@tonic-gate break; 28047c478bd9Sstevel@tonic-gate } 28057c478bd9Sstevel@tonic-gate if (!PROC_DEPARTED(dvertex)) 28067c478bd9Sstevel@tonic-gate goto deadlock; 28077c478bd9Sstevel@tonic-gate } 28087c478bd9Sstevel@tonic-gate if (pep == NULL) { 28097c478bd9Sstevel@tonic-gate PROC_DEPART(pvertex); 28107c478bd9Sstevel@tonic-gate STACK_POP(process_stack, p_stack); 28117c478bd9Sstevel@tonic-gate } 28127c478bd9Sstevel@tonic-gate } 28137c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 28147c478bd9Sstevel@tonic-gate return (0); 28157c478bd9Sstevel@tonic-gate 28167c478bd9Sstevel@tonic-gate deadlock: 28177c478bd9Sstevel@tonic-gate 28187c478bd9Sstevel@tonic-gate /* we remove all lock edges and proc edges */ 28197c478bd9Sstevel@tonic-gate 28207c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(lock); 28217c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 28227c478bd9Sstevel@tonic-gate proc_vertex_t *adj_proc; 28237c478bd9Sstevel@tonic-gate adj_proc = flk_get_proc_vertex(ep->to_vertex); 28247c478bd9Sstevel@tonic-gate nep = NEXT_ADJ(ep); 28257c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 28267c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 28277c478bd9Sstevel@tonic-gate flk_free_edge(ep); 28287c478bd9Sstevel@tonic-gate ppep = start_vertex->edge; 28297c478bd9Sstevel@tonic-gate for (pep = start_vertex->edge; pep != NULL; ppep = pep, 28307c478bd9Sstevel@tonic-gate pep = ppep->next) { 28317c478bd9Sstevel@tonic-gate if (pep->to_proc == adj_proc) { 28327c478bd9Sstevel@tonic-gate pep->refcount--; 28337c478bd9Sstevel@tonic-gate if (pep->refcount == 0) { 28347c478bd9Sstevel@tonic-gate if (pep == ppep) { 28357c478bd9Sstevel@tonic-gate start_vertex->edge = pep->next; 28367c478bd9Sstevel@tonic-gate } else { 28377c478bd9Sstevel@tonic-gate ppep->next = pep->next; 28387c478bd9Sstevel@tonic-gate } 28397c478bd9Sstevel@tonic-gate adj_proc->incount--; 28407c478bd9Sstevel@tonic-gate flk_proc_release(adj_proc); 28417c478bd9Sstevel@tonic-gate flk_free_proc_edge(pep); 28427c478bd9Sstevel@tonic-gate } 28437c478bd9Sstevel@tonic-gate break; 28447c478bd9Sstevel@tonic-gate } 28457c478bd9Sstevel@tonic-gate } 28467c478bd9Sstevel@tonic-gate ep = nep; 28477c478bd9Sstevel@tonic-gate } 28487c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 28497c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 28507c478bd9Sstevel@tonic-gate proc_vertex_t *in_proc; 28517c478bd9Sstevel@tonic-gate in_proc = flk_get_proc_vertex(ep->from_vertex); 28527c478bd9Sstevel@tonic-gate nep = NEXT_IN(ep); 28537c478bd9Sstevel@tonic-gate IN_LIST_REMOVE(ep); 28547c478bd9Sstevel@tonic-gate ADJ_LIST_REMOVE(ep); 28557c478bd9Sstevel@tonic-gate flk_free_edge(ep); 28567c478bd9Sstevel@tonic-gate ppep = in_proc->edge; 28577c478bd9Sstevel@tonic-gate for (pep = in_proc->edge; pep != NULL; ppep = pep, 28587c478bd9Sstevel@tonic-gate pep = ppep->next) { 28597c478bd9Sstevel@tonic-gate if (pep->to_proc == start_vertex) { 28607c478bd9Sstevel@tonic-gate pep->refcount--; 28617c478bd9Sstevel@tonic-gate if (pep->refcount == 0) { 28627c478bd9Sstevel@tonic-gate if (pep == ppep) { 28637c478bd9Sstevel@tonic-gate in_proc->edge = pep->next; 28647c478bd9Sstevel@tonic-gate } else { 28657c478bd9Sstevel@tonic-gate ppep->next = pep->next; 28667c478bd9Sstevel@tonic-gate } 28677c478bd9Sstevel@tonic-gate start_vertex->incount--; 28687c478bd9Sstevel@tonic-gate flk_proc_release(in_proc); 28697c478bd9Sstevel@tonic-gate flk_free_proc_edge(pep); 28707c478bd9Sstevel@tonic-gate } 28717c478bd9Sstevel@tonic-gate break; 28727c478bd9Sstevel@tonic-gate } 28737c478bd9Sstevel@tonic-gate } 28747c478bd9Sstevel@tonic-gate ep = nep; 28757c478bd9Sstevel@tonic-gate } 28767c478bd9Sstevel@tonic-gate flk_proc_release(start_vertex); 28777c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 28787c478bd9Sstevel@tonic-gate return (1); 28797c478bd9Sstevel@tonic-gate } 28807c478bd9Sstevel@tonic-gate 28817c478bd9Sstevel@tonic-gate /* 28827c478bd9Sstevel@tonic-gate * Get a proc vertex. If lock's pvertex value gets a correct proc vertex 28837c478bd9Sstevel@tonic-gate * from the list we return that, otherwise we allocate one. If necessary, 28847c478bd9Sstevel@tonic-gate * we grow the list of vertices also. 28857c478bd9Sstevel@tonic-gate */ 28867c478bd9Sstevel@tonic-gate 28877c478bd9Sstevel@tonic-gate static proc_vertex_t * 28887c478bd9Sstevel@tonic-gate flk_get_proc_vertex(lock_descriptor_t *lock) 28897c478bd9Sstevel@tonic-gate { 28907c478bd9Sstevel@tonic-gate int i; 28917c478bd9Sstevel@tonic-gate proc_vertex_t *pv; 28927c478bd9Sstevel@tonic-gate proc_vertex_t **palloc; 28937c478bd9Sstevel@tonic-gate 28947c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&flock_lock)); 28957c478bd9Sstevel@tonic-gate if (lock->pvertex != -1) { 28967c478bd9Sstevel@tonic-gate ASSERT(lock->pvertex >= 0); 28977c478bd9Sstevel@tonic-gate pv = pgraph.proc[lock->pvertex]; 28987c478bd9Sstevel@tonic-gate if (pv != NULL && PROC_SAME_OWNER(lock, pv)) { 28997c478bd9Sstevel@tonic-gate return (pv); 29007c478bd9Sstevel@tonic-gate } 29017c478bd9Sstevel@tonic-gate } 29027c478bd9Sstevel@tonic-gate for (i = 0; i < pgraph.gcount; i++) { 29037c478bd9Sstevel@tonic-gate pv = pgraph.proc[i]; 29047c478bd9Sstevel@tonic-gate if (pv != NULL && PROC_SAME_OWNER(lock, pv)) { 29057c478bd9Sstevel@tonic-gate lock->pvertex = pv->index = i; 29067c478bd9Sstevel@tonic-gate return (pv); 29077c478bd9Sstevel@tonic-gate } 29087c478bd9Sstevel@tonic-gate } 29097c478bd9Sstevel@tonic-gate pv = kmem_zalloc(sizeof (struct proc_vertex), KM_SLEEP); 29107c478bd9Sstevel@tonic-gate pv->pid = lock->l_flock.l_pid; 29117c478bd9Sstevel@tonic-gate pv->sysid = lock->l_flock.l_sysid; 29127c478bd9Sstevel@tonic-gate flk_proc_vertex_allocs++; 29137c478bd9Sstevel@tonic-gate if (pgraph.free != 0) { 29147c478bd9Sstevel@tonic-gate for (i = 0; i < pgraph.gcount; i++) { 29157c478bd9Sstevel@tonic-gate if (pgraph.proc[i] == NULL) { 29167c478bd9Sstevel@tonic-gate pgraph.proc[i] = pv; 29177c478bd9Sstevel@tonic-gate lock->pvertex = pv->index = i; 29187c478bd9Sstevel@tonic-gate pgraph.free--; 29197c478bd9Sstevel@tonic-gate return (pv); 29207c478bd9Sstevel@tonic-gate } 29217c478bd9Sstevel@tonic-gate } 29227c478bd9Sstevel@tonic-gate } 29237c478bd9Sstevel@tonic-gate palloc = kmem_zalloc((pgraph.gcount + PROC_CHUNK) * 29247c478bd9Sstevel@tonic-gate sizeof (proc_vertex_t *), KM_SLEEP); 29257c478bd9Sstevel@tonic-gate 29267c478bd9Sstevel@tonic-gate if (pgraph.proc) { 29277c478bd9Sstevel@tonic-gate bcopy(pgraph.proc, palloc, 29287c478bd9Sstevel@tonic-gate pgraph.gcount * sizeof (proc_vertex_t *)); 29297c478bd9Sstevel@tonic-gate 29307c478bd9Sstevel@tonic-gate kmem_free(pgraph.proc, 29317c478bd9Sstevel@tonic-gate pgraph.gcount * sizeof (proc_vertex_t *)); 29327c478bd9Sstevel@tonic-gate } 29337c478bd9Sstevel@tonic-gate pgraph.proc = palloc; 29347c478bd9Sstevel@tonic-gate pgraph.free += (PROC_CHUNK - 1); 29357c478bd9Sstevel@tonic-gate pv->index = lock->pvertex = pgraph.gcount; 29367c478bd9Sstevel@tonic-gate pgraph.gcount += PROC_CHUNK; 29377c478bd9Sstevel@tonic-gate pgraph.proc[pv->index] = pv; 29387c478bd9Sstevel@tonic-gate return (pv); 29397c478bd9Sstevel@tonic-gate } 29407c478bd9Sstevel@tonic-gate 29417c478bd9Sstevel@tonic-gate /* 29427c478bd9Sstevel@tonic-gate * Allocate a proc edge. 29437c478bd9Sstevel@tonic-gate */ 29447c478bd9Sstevel@tonic-gate 29457c478bd9Sstevel@tonic-gate static proc_edge_t * 29467c478bd9Sstevel@tonic-gate flk_get_proc_edge() 29477c478bd9Sstevel@tonic-gate { 29487c478bd9Sstevel@tonic-gate proc_edge_t *pep; 29497c478bd9Sstevel@tonic-gate 29507c478bd9Sstevel@tonic-gate pep = kmem_zalloc(sizeof (proc_edge_t), KM_SLEEP); 29517c478bd9Sstevel@tonic-gate flk_proc_edge_allocs++; 29527c478bd9Sstevel@tonic-gate return (pep); 29537c478bd9Sstevel@tonic-gate } 29547c478bd9Sstevel@tonic-gate 29557c478bd9Sstevel@tonic-gate /* 29567c478bd9Sstevel@tonic-gate * Free the proc edge. Called whenever its reference count goes to zero. 29577c478bd9Sstevel@tonic-gate */ 29587c478bd9Sstevel@tonic-gate 29597c478bd9Sstevel@tonic-gate static void 29607c478bd9Sstevel@tonic-gate flk_free_proc_edge(proc_edge_t *pep) 29617c478bd9Sstevel@tonic-gate { 29627c478bd9Sstevel@tonic-gate ASSERT(pep->refcount == 0); 29637c478bd9Sstevel@tonic-gate kmem_free((void *)pep, sizeof (proc_edge_t)); 29647c478bd9Sstevel@tonic-gate flk_proc_edge_frees++; 29657c478bd9Sstevel@tonic-gate } 29667c478bd9Sstevel@tonic-gate 29677c478bd9Sstevel@tonic-gate /* 29687c478bd9Sstevel@tonic-gate * Color the graph explicitly done only when the mark value hits max value. 29697c478bd9Sstevel@tonic-gate */ 29707c478bd9Sstevel@tonic-gate 29717c478bd9Sstevel@tonic-gate static void 29727c478bd9Sstevel@tonic-gate flk_proc_graph_uncolor() 29737c478bd9Sstevel@tonic-gate { 29747c478bd9Sstevel@tonic-gate int i; 29757c478bd9Sstevel@tonic-gate 29767c478bd9Sstevel@tonic-gate if (pgraph.mark == UINT_MAX) { 29777c478bd9Sstevel@tonic-gate for (i = 0; i < pgraph.gcount; i++) 29787c478bd9Sstevel@tonic-gate if (pgraph.proc[i] != NULL) { 29797c478bd9Sstevel@tonic-gate pgraph.proc[i]->atime = 0; 29807c478bd9Sstevel@tonic-gate pgraph.proc[i]->dtime = 0; 29817c478bd9Sstevel@tonic-gate } 29827c478bd9Sstevel@tonic-gate pgraph.mark = 1; 29837c478bd9Sstevel@tonic-gate } else { 29847c478bd9Sstevel@tonic-gate pgraph.mark++; 29857c478bd9Sstevel@tonic-gate } 29867c478bd9Sstevel@tonic-gate } 29877c478bd9Sstevel@tonic-gate 29887c478bd9Sstevel@tonic-gate /* 29897c478bd9Sstevel@tonic-gate * Release the proc vertex iff both there are no in edges and out edges 29907c478bd9Sstevel@tonic-gate */ 29917c478bd9Sstevel@tonic-gate 29927c478bd9Sstevel@tonic-gate static void 29937c478bd9Sstevel@tonic-gate flk_proc_release(proc_vertex_t *proc) 29947c478bd9Sstevel@tonic-gate { 29957c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&flock_lock)); 29967c478bd9Sstevel@tonic-gate if (proc->edge == NULL && proc->incount == 0) { 29977c478bd9Sstevel@tonic-gate pgraph.proc[proc->index] = NULL; 29987c478bd9Sstevel@tonic-gate pgraph.free++; 29997c478bd9Sstevel@tonic-gate kmem_free(proc, sizeof (proc_vertex_t)); 30007c478bd9Sstevel@tonic-gate flk_proc_vertex_frees++; 30017c478bd9Sstevel@tonic-gate } 30027c478bd9Sstevel@tonic-gate } 30037c478bd9Sstevel@tonic-gate 30047c478bd9Sstevel@tonic-gate /* 30057c478bd9Sstevel@tonic-gate * Updates process graph to reflect change in a lock_graph. 30067c478bd9Sstevel@tonic-gate * Note: We should call this function only after we have a correctly 30077c478bd9Sstevel@tonic-gate * recomputed lock graph. Otherwise we might miss a deadlock detection. 30087c478bd9Sstevel@tonic-gate * eg: in function flk_relation() we call this function after flk_recompute_ 30097c478bd9Sstevel@tonic-gate * dependencies() otherwise if a process tries to lock a vnode hashed 30107c478bd9Sstevel@tonic-gate * into another graph it might sleep for ever. 30117c478bd9Sstevel@tonic-gate */ 30127c478bd9Sstevel@tonic-gate 30137c478bd9Sstevel@tonic-gate static void 30147c478bd9Sstevel@tonic-gate flk_update_proc_graph(edge_t *ep, int delete) 30157c478bd9Sstevel@tonic-gate { 30167c478bd9Sstevel@tonic-gate proc_vertex_t *toproc, *fromproc; 30177c478bd9Sstevel@tonic-gate proc_edge_t *pep, *prevpep; 30187c478bd9Sstevel@tonic-gate 30197c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 30207c478bd9Sstevel@tonic-gate toproc = flk_get_proc_vertex(ep->to_vertex); 30217c478bd9Sstevel@tonic-gate fromproc = flk_get_proc_vertex(ep->from_vertex); 30227c478bd9Sstevel@tonic-gate 30237c478bd9Sstevel@tonic-gate if (!delete) 30247c478bd9Sstevel@tonic-gate goto add; 30257c478bd9Sstevel@tonic-gate pep = prevpep = fromproc->edge; 30267c478bd9Sstevel@tonic-gate 30277c478bd9Sstevel@tonic-gate ASSERT(pep != NULL); 30287c478bd9Sstevel@tonic-gate while (pep != NULL) { 30297c478bd9Sstevel@tonic-gate if (pep->to_proc == toproc) { 30307c478bd9Sstevel@tonic-gate ASSERT(pep->refcount > 0); 30317c478bd9Sstevel@tonic-gate pep->refcount--; 30327c478bd9Sstevel@tonic-gate if (pep->refcount == 0) { 30337c478bd9Sstevel@tonic-gate if (pep == prevpep) { 30347c478bd9Sstevel@tonic-gate fromproc->edge = pep->next; 30357c478bd9Sstevel@tonic-gate } else { 30367c478bd9Sstevel@tonic-gate prevpep->next = pep->next; 30377c478bd9Sstevel@tonic-gate } 30387c478bd9Sstevel@tonic-gate toproc->incount--; 30397c478bd9Sstevel@tonic-gate flk_proc_release(toproc); 30407c478bd9Sstevel@tonic-gate flk_free_proc_edge(pep); 30417c478bd9Sstevel@tonic-gate } 30427c478bd9Sstevel@tonic-gate break; 30437c478bd9Sstevel@tonic-gate } 30447c478bd9Sstevel@tonic-gate prevpep = pep; 30457c478bd9Sstevel@tonic-gate pep = pep->next; 30467c478bd9Sstevel@tonic-gate } 30477c478bd9Sstevel@tonic-gate flk_proc_release(fromproc); 30487c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 30497c478bd9Sstevel@tonic-gate return; 30507c478bd9Sstevel@tonic-gate add: 30517c478bd9Sstevel@tonic-gate 30527c478bd9Sstevel@tonic-gate pep = fromproc->edge; 30537c478bd9Sstevel@tonic-gate 30547c478bd9Sstevel@tonic-gate while (pep != NULL) { 30557c478bd9Sstevel@tonic-gate if (pep->to_proc == toproc) { 30567c478bd9Sstevel@tonic-gate ASSERT(pep->refcount > 0); 30577c478bd9Sstevel@tonic-gate pep->refcount++; 30587c478bd9Sstevel@tonic-gate break; 30597c478bd9Sstevel@tonic-gate } 30607c478bd9Sstevel@tonic-gate pep = pep->next; 30617c478bd9Sstevel@tonic-gate } 30627c478bd9Sstevel@tonic-gate if (pep == NULL) { 30637c478bd9Sstevel@tonic-gate pep = flk_get_proc_edge(); 30647c478bd9Sstevel@tonic-gate pep->to_proc = toproc; 30657c478bd9Sstevel@tonic-gate pep->refcount = 1; 30667c478bd9Sstevel@tonic-gate toproc->incount++; 30677c478bd9Sstevel@tonic-gate pep->next = fromproc->edge; 30687c478bd9Sstevel@tonic-gate fromproc->edge = pep; 30697c478bd9Sstevel@tonic-gate } 30707c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 30717c478bd9Sstevel@tonic-gate } 30727c478bd9Sstevel@tonic-gate 30737c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 30747c478bd9Sstevel@tonic-gate /* 30757c478bd9Sstevel@tonic-gate * Set the control status for lock manager requests. 30767c478bd9Sstevel@tonic-gate * 30777c478bd9Sstevel@tonic-gate */ 30787c478bd9Sstevel@tonic-gate 30797c478bd9Sstevel@tonic-gate /* 30807c478bd9Sstevel@tonic-gate * PSARC case 1997/292 30817c478bd9Sstevel@tonic-gate * 30827c478bd9Sstevel@tonic-gate * Requires: "nlmid" must be >= 1 and <= clconf_maximum_nodeid(). 30837c478bd9Sstevel@tonic-gate * Effects: Set the state of the NLM server identified by "nlmid" 30847c478bd9Sstevel@tonic-gate * in the NLM registry to state "nlm_state." 30857c478bd9Sstevel@tonic-gate * Raises exception no_such_nlm if "nlmid" doesn't identify a known 30867c478bd9Sstevel@tonic-gate * NLM server to this LLM. 30877c478bd9Sstevel@tonic-gate * Note that when this routine is called with NLM_SHUTTING_DOWN there 30887c478bd9Sstevel@tonic-gate * may be locks requests that have gotten started but not finished. In 30897c478bd9Sstevel@tonic-gate * particular, there may be blocking requests that are in the callback code 30907c478bd9Sstevel@tonic-gate * before sleeping (so they're not holding the lock for the graph). If 30917c478bd9Sstevel@tonic-gate * such a thread reacquires the graph's lock (to go to sleep) after 30927c478bd9Sstevel@tonic-gate * NLM state in the NLM registry is set to a non-up value, 30937c478bd9Sstevel@tonic-gate * it will notice the status and bail out. If the request gets 30947c478bd9Sstevel@tonic-gate * granted before the thread can check the NLM registry, let it 30957c478bd9Sstevel@tonic-gate * continue normally. It will get flushed when we are called with NLM_DOWN. 30967c478bd9Sstevel@tonic-gate * 30977c478bd9Sstevel@tonic-gate * Modifies: nlm_reg_obj (global) 30987c478bd9Sstevel@tonic-gate * Arguments: 30997c478bd9Sstevel@tonic-gate * nlmid (IN): id uniquely identifying an NLM server 31007c478bd9Sstevel@tonic-gate * nlm_state (IN): NLM server state to change "nlmid" to 31017c478bd9Sstevel@tonic-gate */ 31027c478bd9Sstevel@tonic-gate void 31037c478bd9Sstevel@tonic-gate cl_flk_set_nlm_status(int nlmid, flk_nlm_status_t nlm_state) 31047c478bd9Sstevel@tonic-gate { 31057c478bd9Sstevel@tonic-gate /* 31067c478bd9Sstevel@tonic-gate * Check to see if node is booted as a cluster. If not, return. 31077c478bd9Sstevel@tonic-gate */ 31087c478bd9Sstevel@tonic-gate if ((cluster_bootflags & CLUSTER_BOOTED) == 0) { 31097c478bd9Sstevel@tonic-gate return; 31107c478bd9Sstevel@tonic-gate } 31117c478bd9Sstevel@tonic-gate 31127c478bd9Sstevel@tonic-gate /* 31137c478bd9Sstevel@tonic-gate * Check for development/debugging. It is possible to boot a node 31147c478bd9Sstevel@tonic-gate * in non-cluster mode, and then run a special script, currently 31157c478bd9Sstevel@tonic-gate * available only to developers, to bring up the node as part of a 31167c478bd9Sstevel@tonic-gate * cluster. The problem is that running such a script does not 31177c478bd9Sstevel@tonic-gate * result in the routine flk_init() being called and hence global array 31187c478bd9Sstevel@tonic-gate * nlm_reg_status is NULL. The NLM thinks it's in cluster mode, 31197c478bd9Sstevel@tonic-gate * but the LLM needs to do an additional check to see if the global 31207c478bd9Sstevel@tonic-gate * array has been created or not. If nlm_reg_status is NULL, then 31217c478bd9Sstevel@tonic-gate * return, else continue. 31227c478bd9Sstevel@tonic-gate */ 31237c478bd9Sstevel@tonic-gate if (nlm_reg_status == NULL) { 31247c478bd9Sstevel@tonic-gate return; 31257c478bd9Sstevel@tonic-gate } 31267c478bd9Sstevel@tonic-gate 31277c478bd9Sstevel@tonic-gate ASSERT(nlmid <= nlm_status_size && nlmid >= 0); 31287c478bd9Sstevel@tonic-gate mutex_enter(&nlm_reg_lock); 31297c478bd9Sstevel@tonic-gate 31307c478bd9Sstevel@tonic-gate if (FLK_REGISTRY_IS_NLM_UNKNOWN(nlm_reg_status, nlmid)) { 31317c478bd9Sstevel@tonic-gate /* 31327c478bd9Sstevel@tonic-gate * If the NLM server "nlmid" is unknown in the NLM registry, 31337c478bd9Sstevel@tonic-gate * add it to the registry in the nlm shutting down state. 31347c478bd9Sstevel@tonic-gate */ 31357c478bd9Sstevel@tonic-gate FLK_REGISTRY_CHANGE_NLM_STATE(nlm_reg_status, nlmid, 31367c478bd9Sstevel@tonic-gate FLK_NLM_SHUTTING_DOWN); 31377c478bd9Sstevel@tonic-gate } else { 31387c478bd9Sstevel@tonic-gate /* 31397c478bd9Sstevel@tonic-gate * Change the state of the NLM server identified by "nlmid" 31407c478bd9Sstevel@tonic-gate * in the NLM registry to the argument "nlm_state." 31417c478bd9Sstevel@tonic-gate */ 31427c478bd9Sstevel@tonic-gate FLK_REGISTRY_CHANGE_NLM_STATE(nlm_reg_status, nlmid, 31437c478bd9Sstevel@tonic-gate nlm_state); 31447c478bd9Sstevel@tonic-gate } 31457c478bd9Sstevel@tonic-gate 31467c478bd9Sstevel@tonic-gate /* 31477c478bd9Sstevel@tonic-gate * The reason we must register the NLM server that is shutting down 31487c478bd9Sstevel@tonic-gate * with an LLM that doesn't already know about it (never sent a lock 31497c478bd9Sstevel@tonic-gate * request) is to handle correctly a race between shutdown and a new 31507c478bd9Sstevel@tonic-gate * lock request. Suppose that a shutdown request from the NLM server 31517c478bd9Sstevel@tonic-gate * invokes this routine at the LLM, and a thread is spawned to 31527c478bd9Sstevel@tonic-gate * service the request. Now suppose a new lock request is in 31537c478bd9Sstevel@tonic-gate * progress and has already passed the first line of defense in 31547c478bd9Sstevel@tonic-gate * reclock(), which denies new locks requests from NLM servers 31557c478bd9Sstevel@tonic-gate * that are not in the NLM_UP state. After the current routine 31567c478bd9Sstevel@tonic-gate * is invoked for both phases of shutdown, the routine will return, 31577c478bd9Sstevel@tonic-gate * having done nothing, and the lock request will proceed and 31587c478bd9Sstevel@tonic-gate * probably be granted. The problem is that the shutdown was ignored 31597c478bd9Sstevel@tonic-gate * by the lock request because there was no record of that NLM server 31607c478bd9Sstevel@tonic-gate * shutting down. We will be in the peculiar position of thinking 31617c478bd9Sstevel@tonic-gate * that we've shutdown the NLM server and all locks at all LLMs have 31627c478bd9Sstevel@tonic-gate * been discarded, but in fact there's still one lock held. 31637c478bd9Sstevel@tonic-gate * The solution is to record the existence of NLM server and change 31647c478bd9Sstevel@tonic-gate * its state immediately to NLM_SHUTTING_DOWN. The lock request in 31657c478bd9Sstevel@tonic-gate * progress may proceed because the next phase NLM_DOWN will catch 31667c478bd9Sstevel@tonic-gate * this lock and discard it. 31677c478bd9Sstevel@tonic-gate */ 31687c478bd9Sstevel@tonic-gate mutex_exit(&nlm_reg_lock); 31697c478bd9Sstevel@tonic-gate 31707c478bd9Sstevel@tonic-gate switch (nlm_state) { 31717c478bd9Sstevel@tonic-gate case FLK_NLM_UP: 31727c478bd9Sstevel@tonic-gate /* 31737c478bd9Sstevel@tonic-gate * Change the NLM state of all locks still held on behalf of 31747c478bd9Sstevel@tonic-gate * the NLM server identified by "nlmid" to NLM_UP. 31757c478bd9Sstevel@tonic-gate */ 31767c478bd9Sstevel@tonic-gate cl_flk_change_nlm_state_all_locks(nlmid, FLK_NLM_UP); 31777c478bd9Sstevel@tonic-gate break; 31787c478bd9Sstevel@tonic-gate 31797c478bd9Sstevel@tonic-gate case FLK_NLM_SHUTTING_DOWN: 31807c478bd9Sstevel@tonic-gate /* 31817c478bd9Sstevel@tonic-gate * Wake up all sleeping locks for the NLM server identified 31827c478bd9Sstevel@tonic-gate * by "nlmid." Note that eventually all woken threads will 31837c478bd9Sstevel@tonic-gate * have their lock requests cancelled and descriptors 31847c478bd9Sstevel@tonic-gate * removed from the sleeping lock list. Note that the NLM 31857c478bd9Sstevel@tonic-gate * server state associated with each lock descriptor is 31867c478bd9Sstevel@tonic-gate * changed to FLK_NLM_SHUTTING_DOWN. 31877c478bd9Sstevel@tonic-gate */ 31887c478bd9Sstevel@tonic-gate cl_flk_wakeup_sleeping_nlm_locks(nlmid); 31897c478bd9Sstevel@tonic-gate break; 31907c478bd9Sstevel@tonic-gate 31917c478bd9Sstevel@tonic-gate case FLK_NLM_DOWN: 31927c478bd9Sstevel@tonic-gate /* 31937c478bd9Sstevel@tonic-gate * Discard all active, granted locks for this NLM server 31947c478bd9Sstevel@tonic-gate * identified by "nlmid." 31957c478bd9Sstevel@tonic-gate */ 31967c478bd9Sstevel@tonic-gate cl_flk_unlock_nlm_granted(nlmid); 31977c478bd9Sstevel@tonic-gate break; 31987c478bd9Sstevel@tonic-gate 31997c478bd9Sstevel@tonic-gate default: 32007c478bd9Sstevel@tonic-gate panic("cl_set_nlm_status: bad status (%d)", nlm_state); 32017c478bd9Sstevel@tonic-gate } 32027c478bd9Sstevel@tonic-gate } 32037c478bd9Sstevel@tonic-gate 32047c478bd9Sstevel@tonic-gate /* 32057c478bd9Sstevel@tonic-gate * Set the control status for lock manager requests. 32067c478bd9Sstevel@tonic-gate * 32077c478bd9Sstevel@tonic-gate * Note that when this routine is called with FLK_WAKEUP_SLEEPERS, there 32087c478bd9Sstevel@tonic-gate * may be locks requests that have gotten started but not finished. In 32097c478bd9Sstevel@tonic-gate * particular, there may be blocking requests that are in the callback code 32107c478bd9Sstevel@tonic-gate * before sleeping (so they're not holding the lock for the graph). If 32117c478bd9Sstevel@tonic-gate * such a thread reacquires the graph's lock (to go to sleep) after 32127c478bd9Sstevel@tonic-gate * flk_lockmgr_status is set to a non-up value, it will notice the status 32137c478bd9Sstevel@tonic-gate * and bail out. If the request gets granted before the thread can check 32147c478bd9Sstevel@tonic-gate * flk_lockmgr_status, let it continue normally. It will get flushed when 32157c478bd9Sstevel@tonic-gate * we are called with FLK_LOCKMGR_DOWN. 32167c478bd9Sstevel@tonic-gate */ 32177c478bd9Sstevel@tonic-gate 32187c478bd9Sstevel@tonic-gate void 32197c478bd9Sstevel@tonic-gate flk_set_lockmgr_status(flk_lockmgr_status_t status) 32207c478bd9Sstevel@tonic-gate { 32217c478bd9Sstevel@tonic-gate int i; 32227c478bd9Sstevel@tonic-gate graph_t *gp; 32237c478bd9Sstevel@tonic-gate struct flock_globals *fg; 32247c478bd9Sstevel@tonic-gate 32257c478bd9Sstevel@tonic-gate fg = flk_get_globals(); 32267c478bd9Sstevel@tonic-gate ASSERT(fg != NULL); 32277c478bd9Sstevel@tonic-gate 32287c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 32297c478bd9Sstevel@tonic-gate fg->flk_lockmgr_status = status; 32307c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 32317c478bd9Sstevel@tonic-gate 32327c478bd9Sstevel@tonic-gate /* 32337c478bd9Sstevel@tonic-gate * If the lock manager is coming back up, all that's needed is to 32347c478bd9Sstevel@tonic-gate * propagate this information to the graphs. If the lock manager 32357c478bd9Sstevel@tonic-gate * is going down, additional action is required, and each graph's 32367c478bd9Sstevel@tonic-gate * copy of the state is updated atomically with this other action. 32377c478bd9Sstevel@tonic-gate */ 32387c478bd9Sstevel@tonic-gate switch (status) { 32397c478bd9Sstevel@tonic-gate case FLK_LOCKMGR_UP: 32407c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 32417c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 32427c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 32437c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 32447c478bd9Sstevel@tonic-gate if (gp == NULL) 32457c478bd9Sstevel@tonic-gate continue; 32467c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 32477c478bd9Sstevel@tonic-gate fg->lockmgr_status[i] = status; 32487c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 32497c478bd9Sstevel@tonic-gate } 32507c478bd9Sstevel@tonic-gate break; 32517c478bd9Sstevel@tonic-gate case FLK_WAKEUP_SLEEPERS: 32527c478bd9Sstevel@tonic-gate wakeup_sleeping_lockmgr_locks(fg); 32537c478bd9Sstevel@tonic-gate break; 32547c478bd9Sstevel@tonic-gate case FLK_LOCKMGR_DOWN: 32557c478bd9Sstevel@tonic-gate unlock_lockmgr_granted(fg); 32567c478bd9Sstevel@tonic-gate break; 32577c478bd9Sstevel@tonic-gate default: 32587c478bd9Sstevel@tonic-gate panic("flk_set_lockmgr_status: bad status (%d)", status); 32597c478bd9Sstevel@tonic-gate break; 32607c478bd9Sstevel@tonic-gate } 32617c478bd9Sstevel@tonic-gate } 32627c478bd9Sstevel@tonic-gate 32637c478bd9Sstevel@tonic-gate /* 32647c478bd9Sstevel@tonic-gate * This routine returns all the locks that are active or sleeping and are 32657c478bd9Sstevel@tonic-gate * associated with a particular set of identifiers. If lock_state != 0, then 32667c478bd9Sstevel@tonic-gate * only locks that match the lock_state are returned. If lock_state == 0, then 32677c478bd9Sstevel@tonic-gate * all locks are returned. If pid == NOPID, the pid is ignored. If 32687c478bd9Sstevel@tonic-gate * use_sysid is FALSE, then the sysid is ignored. If vp is NULL, then the 32697c478bd9Sstevel@tonic-gate * vnode pointer is ignored. 32707c478bd9Sstevel@tonic-gate * 32717c478bd9Sstevel@tonic-gate * A list containing the vnode pointer and an flock structure 32727c478bd9Sstevel@tonic-gate * describing the lock is returned. Each element in the list is 3273*da6c28aaSamw * dynamically allocated and must be freed by the caller. The 32747c478bd9Sstevel@tonic-gate * last item in the list is denoted by a NULL value in the ll_next 32757c478bd9Sstevel@tonic-gate * field. 32767c478bd9Sstevel@tonic-gate * 32777c478bd9Sstevel@tonic-gate * The vnode pointers returned are held. The caller is responsible 32787c478bd9Sstevel@tonic-gate * for releasing these. Note that the returned list is only a snapshot of 32797c478bd9Sstevel@tonic-gate * the current lock information, and that it is a snapshot of a moving 32807c478bd9Sstevel@tonic-gate * target (only one graph is locked at a time). 32817c478bd9Sstevel@tonic-gate */ 32827c478bd9Sstevel@tonic-gate 32837c478bd9Sstevel@tonic-gate locklist_t * 32847c478bd9Sstevel@tonic-gate get_lock_list(int list_type, int lock_state, int sysid, boolean_t use_sysid, 32857c478bd9Sstevel@tonic-gate pid_t pid, const vnode_t *vp, zoneid_t zoneid) 32867c478bd9Sstevel@tonic-gate { 32877c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 32887c478bd9Sstevel@tonic-gate lock_descriptor_t *graph_head; 32897c478bd9Sstevel@tonic-gate locklist_t listhead; 32907c478bd9Sstevel@tonic-gate locklist_t *llheadp; 32917c478bd9Sstevel@tonic-gate locklist_t *llp; 32927c478bd9Sstevel@tonic-gate locklist_t *lltp; 32937c478bd9Sstevel@tonic-gate graph_t *gp; 32947c478bd9Sstevel@tonic-gate int i; 32957c478bd9Sstevel@tonic-gate int first_index; /* graph index */ 32967c478bd9Sstevel@tonic-gate int num_indexes; /* graph index */ 32977c478bd9Sstevel@tonic-gate 32987c478bd9Sstevel@tonic-gate ASSERT((list_type == FLK_ACTIVE_STATE) || 32997c478bd9Sstevel@tonic-gate (list_type == FLK_SLEEPING_STATE)); 33007c478bd9Sstevel@tonic-gate 33017c478bd9Sstevel@tonic-gate /* 33027c478bd9Sstevel@tonic-gate * Get a pointer to something to use as a list head while building 33037c478bd9Sstevel@tonic-gate * the rest of the list. 33047c478bd9Sstevel@tonic-gate */ 33057c478bd9Sstevel@tonic-gate llheadp = &listhead; 33067c478bd9Sstevel@tonic-gate lltp = llheadp; 33077c478bd9Sstevel@tonic-gate llheadp->ll_next = (locklist_t *)NULL; 33087c478bd9Sstevel@tonic-gate 33097c478bd9Sstevel@tonic-gate /* Figure out which graphs we want to look at. */ 33107c478bd9Sstevel@tonic-gate if (vp == NULL) { 33117c478bd9Sstevel@tonic-gate first_index = 0; 33127c478bd9Sstevel@tonic-gate num_indexes = HASH_SIZE; 33137c478bd9Sstevel@tonic-gate } else { 33147c478bd9Sstevel@tonic-gate first_index = HASH_INDEX(vp); 33157c478bd9Sstevel@tonic-gate num_indexes = 1; 33167c478bd9Sstevel@tonic-gate } 33177c478bd9Sstevel@tonic-gate 33187c478bd9Sstevel@tonic-gate for (i = first_index; i < first_index + num_indexes; i++) { 33197c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 33207c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 33217c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 33227c478bd9Sstevel@tonic-gate if (gp == NULL) { 33237c478bd9Sstevel@tonic-gate continue; 33247c478bd9Sstevel@tonic-gate } 33257c478bd9Sstevel@tonic-gate 33267c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 33277c478bd9Sstevel@tonic-gate graph_head = (list_type == FLK_ACTIVE_STATE) ? 33287c478bd9Sstevel@tonic-gate ACTIVE_HEAD(gp) : SLEEPING_HEAD(gp); 33297c478bd9Sstevel@tonic-gate for (lock = graph_head->l_next; 33307c478bd9Sstevel@tonic-gate lock != graph_head; 33317c478bd9Sstevel@tonic-gate lock = lock->l_next) { 33327c478bd9Sstevel@tonic-gate if (use_sysid && lock->l_flock.l_sysid != sysid) 33337c478bd9Sstevel@tonic-gate continue; 33347c478bd9Sstevel@tonic-gate if (pid != NOPID && lock->l_flock.l_pid != pid) 33357c478bd9Sstevel@tonic-gate continue; 33367c478bd9Sstevel@tonic-gate if (vp != NULL && lock->l_vnode != vp) 33377c478bd9Sstevel@tonic-gate continue; 33387c478bd9Sstevel@tonic-gate if (lock_state && !(lock_state & lock->l_state)) 33397c478bd9Sstevel@tonic-gate continue; 33407c478bd9Sstevel@tonic-gate if (zoneid != lock->l_zoneid && zoneid != ALL_ZONES) 33417c478bd9Sstevel@tonic-gate continue; 33427c478bd9Sstevel@tonic-gate /* 33437c478bd9Sstevel@tonic-gate * A matching lock was found. Allocate 33447c478bd9Sstevel@tonic-gate * space for a new locklist entry and fill 33457c478bd9Sstevel@tonic-gate * it in. 33467c478bd9Sstevel@tonic-gate */ 33477c478bd9Sstevel@tonic-gate llp = kmem_alloc(sizeof (locklist_t), KM_SLEEP); 33487c478bd9Sstevel@tonic-gate lltp->ll_next = llp; 33497c478bd9Sstevel@tonic-gate VN_HOLD(lock->l_vnode); 33507c478bd9Sstevel@tonic-gate llp->ll_vp = lock->l_vnode; 33517c478bd9Sstevel@tonic-gate create_flock(lock, &(llp->ll_flock)); 33527c478bd9Sstevel@tonic-gate llp->ll_next = (locklist_t *)NULL; 33537c478bd9Sstevel@tonic-gate lltp = llp; 33547c478bd9Sstevel@tonic-gate } 33557c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 33567c478bd9Sstevel@tonic-gate } 33577c478bd9Sstevel@tonic-gate 33587c478bd9Sstevel@tonic-gate llp = llheadp->ll_next; 33597c478bd9Sstevel@tonic-gate return (llp); 33607c478bd9Sstevel@tonic-gate } 33617c478bd9Sstevel@tonic-gate 33627c478bd9Sstevel@tonic-gate /* 33637c478bd9Sstevel@tonic-gate * These two functions are simply interfaces to get_lock_list. They return 33647c478bd9Sstevel@tonic-gate * a list of sleeping or active locks for the given sysid and pid. See 33657c478bd9Sstevel@tonic-gate * get_lock_list for details. 33667c478bd9Sstevel@tonic-gate * 33677c478bd9Sstevel@tonic-gate * In either case we don't particularly care to specify the zone of interest; 33687c478bd9Sstevel@tonic-gate * the sysid-space is global across zones, so the sysid will map to exactly one 33697c478bd9Sstevel@tonic-gate * zone, and we'll return information for that zone. 33707c478bd9Sstevel@tonic-gate */ 33717c478bd9Sstevel@tonic-gate 33727c478bd9Sstevel@tonic-gate locklist_t * 33737c478bd9Sstevel@tonic-gate flk_get_sleeping_locks(int sysid, pid_t pid) 33747c478bd9Sstevel@tonic-gate { 33757c478bd9Sstevel@tonic-gate return (get_lock_list(FLK_SLEEPING_STATE, 0, sysid, B_TRUE, pid, NULL, 33767c478bd9Sstevel@tonic-gate ALL_ZONES)); 33777c478bd9Sstevel@tonic-gate } 33787c478bd9Sstevel@tonic-gate 33797c478bd9Sstevel@tonic-gate locklist_t * 33807c478bd9Sstevel@tonic-gate flk_get_active_locks(int sysid, pid_t pid) 33817c478bd9Sstevel@tonic-gate { 33827c478bd9Sstevel@tonic-gate return (get_lock_list(FLK_ACTIVE_STATE, 0, sysid, B_TRUE, pid, NULL, 33837c478bd9Sstevel@tonic-gate ALL_ZONES)); 33847c478bd9Sstevel@tonic-gate } 33857c478bd9Sstevel@tonic-gate 33867c478bd9Sstevel@tonic-gate /* 33877c478bd9Sstevel@tonic-gate * Another interface to get_lock_list. This one returns all the active 33887c478bd9Sstevel@tonic-gate * locks for a given vnode. Again, see get_lock_list for details. 33897c478bd9Sstevel@tonic-gate * 33907c478bd9Sstevel@tonic-gate * We don't need to specify which zone's locks we're interested in. The matter 33917c478bd9Sstevel@tonic-gate * would only be interesting if the vnode belonged to NFS, and NFS vnodes can't 33927c478bd9Sstevel@tonic-gate * be used by multiple zones, so the list of locks will all be from the right 33937c478bd9Sstevel@tonic-gate * zone. 33947c478bd9Sstevel@tonic-gate */ 33957c478bd9Sstevel@tonic-gate 33967c478bd9Sstevel@tonic-gate locklist_t * 33977c478bd9Sstevel@tonic-gate flk_active_locks_for_vp(const vnode_t *vp) 33987c478bd9Sstevel@tonic-gate { 33997c478bd9Sstevel@tonic-gate return (get_lock_list(FLK_ACTIVE_STATE, 0, 0, B_FALSE, NOPID, vp, 34007c478bd9Sstevel@tonic-gate ALL_ZONES)); 34017c478bd9Sstevel@tonic-gate } 34027c478bd9Sstevel@tonic-gate 34037c478bd9Sstevel@tonic-gate /* 34047c478bd9Sstevel@tonic-gate * Another interface to get_lock_list. This one returns all the active 34057c478bd9Sstevel@tonic-gate * nbmand locks for a given vnode. Again, see get_lock_list for details. 34067c478bd9Sstevel@tonic-gate * 34077c478bd9Sstevel@tonic-gate * See the comment for flk_active_locks_for_vp() for why we don't care to 34087c478bd9Sstevel@tonic-gate * specify the particular zone of interest. 34097c478bd9Sstevel@tonic-gate */ 34107c478bd9Sstevel@tonic-gate locklist_t * 34117c478bd9Sstevel@tonic-gate flk_active_nbmand_locks_for_vp(const vnode_t *vp) 34127c478bd9Sstevel@tonic-gate { 34137c478bd9Sstevel@tonic-gate return (get_lock_list(FLK_ACTIVE_STATE, NBMAND_LOCK, 0, B_FALSE, 34147c478bd9Sstevel@tonic-gate NOPID, vp, ALL_ZONES)); 34157c478bd9Sstevel@tonic-gate } 34167c478bd9Sstevel@tonic-gate 34177c478bd9Sstevel@tonic-gate /* 34187c478bd9Sstevel@tonic-gate * Another interface to get_lock_list. This one returns all the active 34197c478bd9Sstevel@tonic-gate * nbmand locks for a given pid. Again, see get_lock_list for details. 34207c478bd9Sstevel@tonic-gate * 34217c478bd9Sstevel@tonic-gate * The zone doesn't need to be specified here; the locks held by a 34227c478bd9Sstevel@tonic-gate * particular process will either be local (ie, non-NFS) or from the zone 34237c478bd9Sstevel@tonic-gate * the process is executing in. This is because other parts of the system 34247c478bd9Sstevel@tonic-gate * ensure that an NFS vnode can't be used in a zone other than that in 34257c478bd9Sstevel@tonic-gate * which it was opened. 34267c478bd9Sstevel@tonic-gate */ 34277c478bd9Sstevel@tonic-gate locklist_t * 34287c478bd9Sstevel@tonic-gate flk_active_nbmand_locks(pid_t pid) 34297c478bd9Sstevel@tonic-gate { 34307c478bd9Sstevel@tonic-gate return (get_lock_list(FLK_ACTIVE_STATE, NBMAND_LOCK, 0, B_FALSE, 34317c478bd9Sstevel@tonic-gate pid, NULL, ALL_ZONES)); 34327c478bd9Sstevel@tonic-gate } 34337c478bd9Sstevel@tonic-gate 34347c478bd9Sstevel@tonic-gate /* 34357c478bd9Sstevel@tonic-gate * Free up all entries in the locklist. 34367c478bd9Sstevel@tonic-gate */ 34377c478bd9Sstevel@tonic-gate void 34387c478bd9Sstevel@tonic-gate flk_free_locklist(locklist_t *llp) 34397c478bd9Sstevel@tonic-gate { 34407c478bd9Sstevel@tonic-gate locklist_t *next_llp; 34417c478bd9Sstevel@tonic-gate 34427c478bd9Sstevel@tonic-gate while (llp) { 34437c478bd9Sstevel@tonic-gate next_llp = llp->ll_next; 34447c478bd9Sstevel@tonic-gate VN_RELE(llp->ll_vp); 34457c478bd9Sstevel@tonic-gate kmem_free(llp, sizeof (*llp)); 34467c478bd9Sstevel@tonic-gate llp = next_llp; 34477c478bd9Sstevel@tonic-gate } 34487c478bd9Sstevel@tonic-gate } 34497c478bd9Sstevel@tonic-gate 34507c478bd9Sstevel@tonic-gate static void 34517c478bd9Sstevel@tonic-gate cl_flk_change_nlm_state_all_locks(int nlmid, flk_nlm_status_t nlm_state) 34527c478bd9Sstevel@tonic-gate { 34537c478bd9Sstevel@tonic-gate /* 34547c478bd9Sstevel@tonic-gate * For each graph "lg" in the hash table lock_graph do 34557c478bd9Sstevel@tonic-gate * a. Get the list of sleeping locks 34567c478bd9Sstevel@tonic-gate * b. For each lock descriptor in the list do 34577c478bd9Sstevel@tonic-gate * i. If the requested lock is an NLM server request AND 34587c478bd9Sstevel@tonic-gate * the nlmid is the same as the routine argument then 34597c478bd9Sstevel@tonic-gate * change the lock descriptor's state field to 34607c478bd9Sstevel@tonic-gate * "nlm_state." 34617c478bd9Sstevel@tonic-gate * c. Get the list of active locks 34627c478bd9Sstevel@tonic-gate * d. For each lock descriptor in the list do 34637c478bd9Sstevel@tonic-gate * i. If the requested lock is an NLM server request AND 34647c478bd9Sstevel@tonic-gate * the nlmid is the same as the routine argument then 34657c478bd9Sstevel@tonic-gate * change the lock descriptor's state field to 34667c478bd9Sstevel@tonic-gate * "nlm_state." 34677c478bd9Sstevel@tonic-gate */ 34687c478bd9Sstevel@tonic-gate 34697c478bd9Sstevel@tonic-gate int i; 34707c478bd9Sstevel@tonic-gate graph_t *gp; /* lock graph */ 34717c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; /* lock */ 34727c478bd9Sstevel@tonic-gate lock_descriptor_t *nlock = NULL; /* next lock */ 34737c478bd9Sstevel@tonic-gate int lock_nlmid; 34747c478bd9Sstevel@tonic-gate 34757c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 34767c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 34777c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 34787c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 34797c478bd9Sstevel@tonic-gate if (gp == NULL) { 34807c478bd9Sstevel@tonic-gate continue; 34817c478bd9Sstevel@tonic-gate } 34827c478bd9Sstevel@tonic-gate 34837c478bd9Sstevel@tonic-gate /* Get list of sleeping locks in current lock graph. */ 34847c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 34857c478bd9Sstevel@tonic-gate for (lock = SLEEPING_HEAD(gp)->l_next; 34867c478bd9Sstevel@tonic-gate lock != SLEEPING_HEAD(gp); 34877c478bd9Sstevel@tonic-gate lock = nlock) { 34887c478bd9Sstevel@tonic-gate nlock = lock->l_next; 34897c478bd9Sstevel@tonic-gate /* get NLM id */ 34907c478bd9Sstevel@tonic-gate lock_nlmid = GETNLMID(lock->l_flock.l_sysid); 34917c478bd9Sstevel@tonic-gate 34927c478bd9Sstevel@tonic-gate /* 34937c478bd9Sstevel@tonic-gate * If NLM server request AND nlmid of lock matches 34947c478bd9Sstevel@tonic-gate * nlmid of argument, then set the NLM state of the 34957c478bd9Sstevel@tonic-gate * lock to "nlm_state." 34967c478bd9Sstevel@tonic-gate */ 34977c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) { 34987c478bd9Sstevel@tonic-gate SET_NLM_STATE(lock, nlm_state); 34997c478bd9Sstevel@tonic-gate } 35007c478bd9Sstevel@tonic-gate } 35017c478bd9Sstevel@tonic-gate 35027c478bd9Sstevel@tonic-gate /* Get list of active locks in current lock graph. */ 35037c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; 35047c478bd9Sstevel@tonic-gate lock != ACTIVE_HEAD(gp); 35057c478bd9Sstevel@tonic-gate lock = nlock) { 35067c478bd9Sstevel@tonic-gate nlock = lock->l_next; 35077c478bd9Sstevel@tonic-gate /* get NLM id */ 35087c478bd9Sstevel@tonic-gate lock_nlmid = GETNLMID(lock->l_flock.l_sysid); 35097c478bd9Sstevel@tonic-gate 35107c478bd9Sstevel@tonic-gate /* 35117c478bd9Sstevel@tonic-gate * If NLM server request AND nlmid of lock matches 35127c478bd9Sstevel@tonic-gate * nlmid of argument, then set the NLM state of the 35137c478bd9Sstevel@tonic-gate * lock to "nlm_state." 35147c478bd9Sstevel@tonic-gate */ 35157c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && nlmid == lock_nlmid) { 35167c478bd9Sstevel@tonic-gate ASSERT(IS_ACTIVE(lock)); 35177c478bd9Sstevel@tonic-gate SET_NLM_STATE(lock, nlm_state); 35187c478bd9Sstevel@tonic-gate } 35197c478bd9Sstevel@tonic-gate } 35207c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 35217c478bd9Sstevel@tonic-gate } 35227c478bd9Sstevel@tonic-gate } 35237c478bd9Sstevel@tonic-gate 35247c478bd9Sstevel@tonic-gate /* 35257c478bd9Sstevel@tonic-gate * Requires: "nlmid" >= 1 and <= clconf_maximum_nodeid(). 35267c478bd9Sstevel@tonic-gate * Effects: Find all sleeping lock manager requests _only_ for the NLM server 35277c478bd9Sstevel@tonic-gate * identified by "nlmid." Poke those lock requests. 35287c478bd9Sstevel@tonic-gate */ 35297c478bd9Sstevel@tonic-gate static void 35307c478bd9Sstevel@tonic-gate cl_flk_wakeup_sleeping_nlm_locks(int nlmid) 35317c478bd9Sstevel@tonic-gate { 35327c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 35337c478bd9Sstevel@tonic-gate lock_descriptor_t *nlock = NULL; /* next lock */ 35347c478bd9Sstevel@tonic-gate int i; 35357c478bd9Sstevel@tonic-gate graph_t *gp; 35367c478bd9Sstevel@tonic-gate int lock_nlmid; 35377c478bd9Sstevel@tonic-gate 35387c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 35397c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 35407c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 35417c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 35427c478bd9Sstevel@tonic-gate if (gp == NULL) { 35437c478bd9Sstevel@tonic-gate continue; 35447c478bd9Sstevel@tonic-gate } 35457c478bd9Sstevel@tonic-gate 35467c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 35477c478bd9Sstevel@tonic-gate for (lock = SLEEPING_HEAD(gp)->l_next; 35487c478bd9Sstevel@tonic-gate lock != SLEEPING_HEAD(gp); 35497c478bd9Sstevel@tonic-gate lock = nlock) { 35507c478bd9Sstevel@tonic-gate nlock = lock->l_next; 35517c478bd9Sstevel@tonic-gate /* 35527c478bd9Sstevel@tonic-gate * If NLM server request _and_ nlmid of lock matches 35537c478bd9Sstevel@tonic-gate * nlmid of argument, then set the NLM state of the 35547c478bd9Sstevel@tonic-gate * lock to NLM_SHUTTING_DOWN, and wake up sleeping 35557c478bd9Sstevel@tonic-gate * request. 35567c478bd9Sstevel@tonic-gate */ 35577c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock)) { 35587c478bd9Sstevel@tonic-gate /* get NLM id */ 35597c478bd9Sstevel@tonic-gate lock_nlmid = 35607c478bd9Sstevel@tonic-gate GETNLMID(lock->l_flock.l_sysid); 35617c478bd9Sstevel@tonic-gate if (nlmid == lock_nlmid) { 35627c478bd9Sstevel@tonic-gate SET_NLM_STATE(lock, 35637c478bd9Sstevel@tonic-gate FLK_NLM_SHUTTING_DOWN); 35647c478bd9Sstevel@tonic-gate INTERRUPT_WAKEUP(lock); 35657c478bd9Sstevel@tonic-gate } 35667c478bd9Sstevel@tonic-gate } 35677c478bd9Sstevel@tonic-gate } 35687c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 35697c478bd9Sstevel@tonic-gate } 35707c478bd9Sstevel@tonic-gate } 35717c478bd9Sstevel@tonic-gate 35727c478bd9Sstevel@tonic-gate /* 35737c478bd9Sstevel@tonic-gate * Requires: "nlmid" >= 1 and <= clconf_maximum_nodeid() 35747c478bd9Sstevel@tonic-gate * Effects: Find all active (granted) lock manager locks _only_ for the 35757c478bd9Sstevel@tonic-gate * NLM server identified by "nlmid" and release them. 35767c478bd9Sstevel@tonic-gate */ 35777c478bd9Sstevel@tonic-gate static void 35787c478bd9Sstevel@tonic-gate cl_flk_unlock_nlm_granted(int nlmid) 35797c478bd9Sstevel@tonic-gate { 35807c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 35817c478bd9Sstevel@tonic-gate lock_descriptor_t *nlock = NULL; /* next lock */ 35827c478bd9Sstevel@tonic-gate int i; 35837c478bd9Sstevel@tonic-gate graph_t *gp; 35847c478bd9Sstevel@tonic-gate int lock_nlmid; 35857c478bd9Sstevel@tonic-gate 35867c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 35877c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 35887c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 35897c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 35907c478bd9Sstevel@tonic-gate if (gp == NULL) { 35917c478bd9Sstevel@tonic-gate continue; 35927c478bd9Sstevel@tonic-gate } 35937c478bd9Sstevel@tonic-gate 35947c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 35957c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; 35967c478bd9Sstevel@tonic-gate lock != ACTIVE_HEAD(gp); 35977c478bd9Sstevel@tonic-gate lock = nlock) { 35987c478bd9Sstevel@tonic-gate nlock = lock->l_next; 35997c478bd9Sstevel@tonic-gate ASSERT(IS_ACTIVE(lock)); 36007c478bd9Sstevel@tonic-gate 36017c478bd9Sstevel@tonic-gate /* 36027c478bd9Sstevel@tonic-gate * If it's an NLM server request _and_ nlmid of 36037c478bd9Sstevel@tonic-gate * the lock matches nlmid of argument, then 36047c478bd9Sstevel@tonic-gate * remove the active lock the list, wakup blocked 36057c478bd9Sstevel@tonic-gate * threads, and free the storage for the lock. 36067c478bd9Sstevel@tonic-gate * Note that there's no need to mark the NLM state 36077c478bd9Sstevel@tonic-gate * of this lock to NLM_DOWN because the lock will 36087c478bd9Sstevel@tonic-gate * be deleted anyway and its storage freed. 36097c478bd9Sstevel@tonic-gate */ 36107c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock)) { 36117c478bd9Sstevel@tonic-gate /* get NLM id */ 36127c478bd9Sstevel@tonic-gate lock_nlmid = GETNLMID(lock->l_flock.l_sysid); 36137c478bd9Sstevel@tonic-gate if (nlmid == lock_nlmid) { 36147c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 36157c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 36167c478bd9Sstevel@tonic-gate flk_free_lock(lock); 36177c478bd9Sstevel@tonic-gate } 36187c478bd9Sstevel@tonic-gate } 36197c478bd9Sstevel@tonic-gate } 36207c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 36217c478bd9Sstevel@tonic-gate } 36227c478bd9Sstevel@tonic-gate } 36237c478bd9Sstevel@tonic-gate 36247c478bd9Sstevel@tonic-gate /* 36257c478bd9Sstevel@tonic-gate * Find all sleeping lock manager requests and poke them. 36267c478bd9Sstevel@tonic-gate */ 36277c478bd9Sstevel@tonic-gate static void 36287c478bd9Sstevel@tonic-gate wakeup_sleeping_lockmgr_locks(struct flock_globals *fg) 36297c478bd9Sstevel@tonic-gate { 36307c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 36317c478bd9Sstevel@tonic-gate lock_descriptor_t *nlock = NULL; /* next lock */ 36327c478bd9Sstevel@tonic-gate int i; 36337c478bd9Sstevel@tonic-gate graph_t *gp; 36347c478bd9Sstevel@tonic-gate zoneid_t zoneid = getzoneid(); 36357c478bd9Sstevel@tonic-gate 36367c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 36377c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 36387c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 36397c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 36407c478bd9Sstevel@tonic-gate if (gp == NULL) { 36417c478bd9Sstevel@tonic-gate continue; 36427c478bd9Sstevel@tonic-gate } 36437c478bd9Sstevel@tonic-gate 36447c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 36457c478bd9Sstevel@tonic-gate fg->lockmgr_status[i] = FLK_WAKEUP_SLEEPERS; 36467c478bd9Sstevel@tonic-gate for (lock = SLEEPING_HEAD(gp)->l_next; 36477c478bd9Sstevel@tonic-gate lock != SLEEPING_HEAD(gp); 36487c478bd9Sstevel@tonic-gate lock = nlock) { 36497c478bd9Sstevel@tonic-gate nlock = lock->l_next; 36507c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) { 36517c478bd9Sstevel@tonic-gate INTERRUPT_WAKEUP(lock); 36527c478bd9Sstevel@tonic-gate } 36537c478bd9Sstevel@tonic-gate } 36547c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 36557c478bd9Sstevel@tonic-gate } 36567c478bd9Sstevel@tonic-gate } 36577c478bd9Sstevel@tonic-gate 36587c478bd9Sstevel@tonic-gate 36597c478bd9Sstevel@tonic-gate /* 36607c478bd9Sstevel@tonic-gate * Find all active (granted) lock manager locks and release them. 36617c478bd9Sstevel@tonic-gate */ 36627c478bd9Sstevel@tonic-gate static void 36637c478bd9Sstevel@tonic-gate unlock_lockmgr_granted(struct flock_globals *fg) 36647c478bd9Sstevel@tonic-gate { 36657c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 36667c478bd9Sstevel@tonic-gate lock_descriptor_t *nlock = NULL; /* next lock */ 36677c478bd9Sstevel@tonic-gate int i; 36687c478bd9Sstevel@tonic-gate graph_t *gp; 36697c478bd9Sstevel@tonic-gate zoneid_t zoneid = getzoneid(); 36707c478bd9Sstevel@tonic-gate 36717c478bd9Sstevel@tonic-gate for (i = 0; i < HASH_SIZE; i++) { 36727c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 36737c478bd9Sstevel@tonic-gate gp = lock_graph[i]; 36747c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 36757c478bd9Sstevel@tonic-gate if (gp == NULL) { 36767c478bd9Sstevel@tonic-gate continue; 36777c478bd9Sstevel@tonic-gate } 36787c478bd9Sstevel@tonic-gate 36797c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 36807c478bd9Sstevel@tonic-gate fg->lockmgr_status[i] = FLK_LOCKMGR_DOWN; 36817c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; 36827c478bd9Sstevel@tonic-gate lock != ACTIVE_HEAD(gp); 36837c478bd9Sstevel@tonic-gate lock = nlock) { 36847c478bd9Sstevel@tonic-gate nlock = lock->l_next; 36857c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(lock) && lock->l_zoneid == zoneid) { 36867c478bd9Sstevel@tonic-gate ASSERT(IS_ACTIVE(lock)); 36877c478bd9Sstevel@tonic-gate flk_delete_active_lock(lock, 0); 36887c478bd9Sstevel@tonic-gate flk_wakeup(lock, 1); 36897c478bd9Sstevel@tonic-gate flk_free_lock(lock); 36907c478bd9Sstevel@tonic-gate } 36917c478bd9Sstevel@tonic-gate } 36927c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 36937c478bd9Sstevel@tonic-gate } 36947c478bd9Sstevel@tonic-gate } 36957c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 36967c478bd9Sstevel@tonic-gate 36977c478bd9Sstevel@tonic-gate 36987c478bd9Sstevel@tonic-gate /* 36997c478bd9Sstevel@tonic-gate * Wait until a lock is granted, cancelled, or interrupted. 37007c478bd9Sstevel@tonic-gate */ 37017c478bd9Sstevel@tonic-gate 37027c478bd9Sstevel@tonic-gate static void 37037c478bd9Sstevel@tonic-gate wait_for_lock(lock_descriptor_t *request) 37047c478bd9Sstevel@tonic-gate { 37057c478bd9Sstevel@tonic-gate graph_t *gp = request->l_graph; 37067c478bd9Sstevel@tonic-gate 37077c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&gp->gp_mutex)); 37087c478bd9Sstevel@tonic-gate 37097c478bd9Sstevel@tonic-gate while (!(IS_GRANTED(request)) && !(IS_CANCELLED(request)) && 37107c478bd9Sstevel@tonic-gate !(IS_INTERRUPTED(request))) { 37117c478bd9Sstevel@tonic-gate if (!cv_wait_sig(&request->l_cv, &gp->gp_mutex)) { 37127c478bd9Sstevel@tonic-gate flk_set_state(request, FLK_INTERRUPTED_STATE); 37137c478bd9Sstevel@tonic-gate request->l_state |= INTERRUPTED_LOCK; 37147c478bd9Sstevel@tonic-gate } 37157c478bd9Sstevel@tonic-gate } 37167c478bd9Sstevel@tonic-gate } 37177c478bd9Sstevel@tonic-gate 37187c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT START */ 37197c478bd9Sstevel@tonic-gate /* 37207c478bd9Sstevel@tonic-gate * Create an flock structure from the existing lock information 37217c478bd9Sstevel@tonic-gate * 37227c478bd9Sstevel@tonic-gate * This routine is used to create flock structures for the lock manager 3723*da6c28aaSamw * to use in a reclaim request. Since the lock was originated on this 37247c478bd9Sstevel@tonic-gate * host, it must be conforming to UNIX semantics, so no checking is 37257c478bd9Sstevel@tonic-gate * done to make sure it falls within the lower half of the 32-bit range. 37267c478bd9Sstevel@tonic-gate */ 37277c478bd9Sstevel@tonic-gate 37287c478bd9Sstevel@tonic-gate static void 37297c478bd9Sstevel@tonic-gate create_flock(lock_descriptor_t *lp, flock64_t *flp) 37307c478bd9Sstevel@tonic-gate { 37317c478bd9Sstevel@tonic-gate ASSERT(lp->l_end == MAX_U_OFFSET_T || lp->l_end <= MAXEND); 37327c478bd9Sstevel@tonic-gate ASSERT(lp->l_end >= lp->l_start); 37337c478bd9Sstevel@tonic-gate 37347c478bd9Sstevel@tonic-gate flp->l_type = lp->l_type; 37357c478bd9Sstevel@tonic-gate flp->l_whence = 0; 37367c478bd9Sstevel@tonic-gate flp->l_start = lp->l_start; 37377c478bd9Sstevel@tonic-gate flp->l_len = (lp->l_end == MAX_U_OFFSET_T) ? 0 : 37387c478bd9Sstevel@tonic-gate (lp->l_end - lp->l_start + 1); 37397c478bd9Sstevel@tonic-gate flp->l_sysid = lp->l_flock.l_sysid; 37407c478bd9Sstevel@tonic-gate flp->l_pid = lp->l_flock.l_pid; 37417c478bd9Sstevel@tonic-gate } 37427c478bd9Sstevel@tonic-gate 37437c478bd9Sstevel@tonic-gate /* 37447c478bd9Sstevel@tonic-gate * Convert flock_t data describing a lock range into unsigned long starting 37457c478bd9Sstevel@tonic-gate * and ending points, which are put into lock_request. Returns 0 or an 37467c478bd9Sstevel@tonic-gate * errno value. 37477c478bd9Sstevel@tonic-gate * Large Files: max is passed by the caller and we return EOVERFLOW 37487c478bd9Sstevel@tonic-gate * as defined by LFS API. 37497c478bd9Sstevel@tonic-gate */ 37507c478bd9Sstevel@tonic-gate 37517c478bd9Sstevel@tonic-gate int 37527c478bd9Sstevel@tonic-gate flk_convert_lock_data(vnode_t *vp, flock64_t *flp, 37537c478bd9Sstevel@tonic-gate u_offset_t *start, u_offset_t *end, offset_t offset) 37547c478bd9Sstevel@tonic-gate { 37557c478bd9Sstevel@tonic-gate struct vattr vattr; 37567c478bd9Sstevel@tonic-gate int error; 37577c478bd9Sstevel@tonic-gate 37587c478bd9Sstevel@tonic-gate /* 37597c478bd9Sstevel@tonic-gate * Determine the starting point of the request 37607c478bd9Sstevel@tonic-gate */ 37617c478bd9Sstevel@tonic-gate switch (flp->l_whence) { 37627c478bd9Sstevel@tonic-gate case 0: /* SEEK_SET */ 37637c478bd9Sstevel@tonic-gate *start = (u_offset_t)flp->l_start; 37647c478bd9Sstevel@tonic-gate break; 37657c478bd9Sstevel@tonic-gate case 1: /* SEEK_CUR */ 37667c478bd9Sstevel@tonic-gate *start = (u_offset_t)(flp->l_start + offset); 37677c478bd9Sstevel@tonic-gate break; 37687c478bd9Sstevel@tonic-gate case 2: /* SEEK_END */ 37697c478bd9Sstevel@tonic-gate vattr.va_mask = AT_SIZE; 3770*da6c28aaSamw if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL)) 37717c478bd9Sstevel@tonic-gate return (error); 37727c478bd9Sstevel@tonic-gate *start = (u_offset_t)(flp->l_start + vattr.va_size); 37737c478bd9Sstevel@tonic-gate break; 37747c478bd9Sstevel@tonic-gate default: 37757c478bd9Sstevel@tonic-gate return (EINVAL); 37767c478bd9Sstevel@tonic-gate } 37777c478bd9Sstevel@tonic-gate 37787c478bd9Sstevel@tonic-gate /* 37797c478bd9Sstevel@tonic-gate * Determine the range covered by the request. 37807c478bd9Sstevel@tonic-gate */ 37817c478bd9Sstevel@tonic-gate if (flp->l_len == 0) 37827c478bd9Sstevel@tonic-gate *end = MAX_U_OFFSET_T; 37837c478bd9Sstevel@tonic-gate else if ((offset_t)flp->l_len > 0) { 37847c478bd9Sstevel@tonic-gate *end = (u_offset_t)(*start + (flp->l_len - 1)); 37857c478bd9Sstevel@tonic-gate } else { 37867c478bd9Sstevel@tonic-gate /* 37877c478bd9Sstevel@tonic-gate * Negative length; why do we even allow this ? 37887c478bd9Sstevel@tonic-gate * Because this allows easy specification of 37897c478bd9Sstevel@tonic-gate * the last n bytes of the file. 37907c478bd9Sstevel@tonic-gate */ 37917c478bd9Sstevel@tonic-gate *end = *start; 37927c478bd9Sstevel@tonic-gate *start += (u_offset_t)flp->l_len; 37937c478bd9Sstevel@tonic-gate (*start)++; 37947c478bd9Sstevel@tonic-gate } 37957c478bd9Sstevel@tonic-gate return (0); 37967c478bd9Sstevel@tonic-gate } 37977c478bd9Sstevel@tonic-gate 37987c478bd9Sstevel@tonic-gate /* 37997c478bd9Sstevel@tonic-gate * Check the validity of lock data. This can used by the NFS 38007c478bd9Sstevel@tonic-gate * frlock routines to check data before contacting the server. The 38017c478bd9Sstevel@tonic-gate * server must support semantics that aren't as restrictive as 38027c478bd9Sstevel@tonic-gate * the UNIX API, so the NFS client is required to check. 38037c478bd9Sstevel@tonic-gate * The maximum is now passed in by the caller. 38047c478bd9Sstevel@tonic-gate */ 38057c478bd9Sstevel@tonic-gate 38067c478bd9Sstevel@tonic-gate int 38077c478bd9Sstevel@tonic-gate flk_check_lock_data(u_offset_t start, u_offset_t end, offset_t max) 38087c478bd9Sstevel@tonic-gate { 38097c478bd9Sstevel@tonic-gate /* 38107c478bd9Sstevel@tonic-gate * The end (length) for local locking should never be greater 38117c478bd9Sstevel@tonic-gate * than MAXEND. However, the representation for 38127c478bd9Sstevel@tonic-gate * the entire file is MAX_U_OFFSET_T. 38137c478bd9Sstevel@tonic-gate */ 38147c478bd9Sstevel@tonic-gate if ((start > max) || 38157c478bd9Sstevel@tonic-gate ((end > max) && (end != MAX_U_OFFSET_T))) { 38167c478bd9Sstevel@tonic-gate return (EINVAL); 38177c478bd9Sstevel@tonic-gate } 38187c478bd9Sstevel@tonic-gate if (start > end) { 38197c478bd9Sstevel@tonic-gate return (EINVAL); 38207c478bd9Sstevel@tonic-gate } 38217c478bd9Sstevel@tonic-gate return (0); 38227c478bd9Sstevel@tonic-gate } 38237c478bd9Sstevel@tonic-gate 38247c478bd9Sstevel@tonic-gate /* 38257c478bd9Sstevel@tonic-gate * Fill in request->l_flock with information about the lock blocking the 38267c478bd9Sstevel@tonic-gate * request. The complexity here is that lock manager requests are allowed 38277c478bd9Sstevel@tonic-gate * to see into the upper part of the 32-bit address range, whereas local 38287c478bd9Sstevel@tonic-gate * requests are only allowed to see signed values. 38297c478bd9Sstevel@tonic-gate * 38307c478bd9Sstevel@tonic-gate * What should be done when "blocker" is a lock manager lock that uses the 38317c478bd9Sstevel@tonic-gate * upper portion of the 32-bit range, but "request" is local? Since the 38327c478bd9Sstevel@tonic-gate * request has already been determined to have been blocked by the blocker, 38337c478bd9Sstevel@tonic-gate * at least some portion of "blocker" must be in the range of the request, 38347c478bd9Sstevel@tonic-gate * or the request extends to the end of file. For the first case, the 38357c478bd9Sstevel@tonic-gate * portion in the lower range is returned with the indication that it goes 38367c478bd9Sstevel@tonic-gate * "to EOF." For the second case, the last byte of the lower range is 38377c478bd9Sstevel@tonic-gate * returned with the indication that it goes "to EOF." 38387c478bd9Sstevel@tonic-gate */ 38397c478bd9Sstevel@tonic-gate 38407c478bd9Sstevel@tonic-gate static void 38417c478bd9Sstevel@tonic-gate report_blocker(lock_descriptor_t *blocker, lock_descriptor_t *request) 38427c478bd9Sstevel@tonic-gate { 38437c478bd9Sstevel@tonic-gate flock64_t *flrp; /* l_flock portion of request */ 38447c478bd9Sstevel@tonic-gate 38457c478bd9Sstevel@tonic-gate ASSERT(blocker != NULL); 38467c478bd9Sstevel@tonic-gate 38477c478bd9Sstevel@tonic-gate flrp = &request->l_flock; 38487c478bd9Sstevel@tonic-gate flrp->l_whence = 0; 38497c478bd9Sstevel@tonic-gate flrp->l_type = blocker->l_type; 38507c478bd9Sstevel@tonic-gate flrp->l_pid = blocker->l_flock.l_pid; 38517c478bd9Sstevel@tonic-gate flrp->l_sysid = blocker->l_flock.l_sysid; 38527c478bd9Sstevel@tonic-gate 38537c478bd9Sstevel@tonic-gate if (IS_LOCKMGR(request)) { 38547c478bd9Sstevel@tonic-gate flrp->l_start = blocker->l_start; 38557c478bd9Sstevel@tonic-gate if (blocker->l_end == MAX_U_OFFSET_T) 38567c478bd9Sstevel@tonic-gate flrp->l_len = 0; 38577c478bd9Sstevel@tonic-gate else 38587c478bd9Sstevel@tonic-gate flrp->l_len = blocker->l_end - blocker->l_start + 1; 38597c478bd9Sstevel@tonic-gate } else { 38607c478bd9Sstevel@tonic-gate if (blocker->l_start > MAXEND) { 38617c478bd9Sstevel@tonic-gate flrp->l_start = MAXEND; 38627c478bd9Sstevel@tonic-gate flrp->l_len = 0; 38637c478bd9Sstevel@tonic-gate } else { 38647c478bd9Sstevel@tonic-gate flrp->l_start = blocker->l_start; 38657c478bd9Sstevel@tonic-gate if (blocker->l_end == MAX_U_OFFSET_T) 38667c478bd9Sstevel@tonic-gate flrp->l_len = 0; 38677c478bd9Sstevel@tonic-gate else 38687c478bd9Sstevel@tonic-gate flrp->l_len = blocker->l_end - 38697c478bd9Sstevel@tonic-gate blocker->l_start + 1; 38707c478bd9Sstevel@tonic-gate } 38717c478bd9Sstevel@tonic-gate } 38727c478bd9Sstevel@tonic-gate } 38737c478bd9Sstevel@tonic-gate /* ONC_PLUS EXTRACT END */ 38747c478bd9Sstevel@tonic-gate 38757c478bd9Sstevel@tonic-gate /* 38767c478bd9Sstevel@tonic-gate * PSARC case 1997/292 38777c478bd9Sstevel@tonic-gate */ 38787c478bd9Sstevel@tonic-gate /* 38797c478bd9Sstevel@tonic-gate * This is the public routine exported by flock.h. 38807c478bd9Sstevel@tonic-gate */ 38817c478bd9Sstevel@tonic-gate void 38827c478bd9Sstevel@tonic-gate cl_flk_change_nlm_state_to_unknown(int nlmid) 38837c478bd9Sstevel@tonic-gate { 38847c478bd9Sstevel@tonic-gate /* 38857c478bd9Sstevel@tonic-gate * Check to see if node is booted as a cluster. If not, return. 38867c478bd9Sstevel@tonic-gate */ 38877c478bd9Sstevel@tonic-gate if ((cluster_bootflags & CLUSTER_BOOTED) == 0) { 38887c478bd9Sstevel@tonic-gate return; 38897c478bd9Sstevel@tonic-gate } 38907c478bd9Sstevel@tonic-gate 38917c478bd9Sstevel@tonic-gate /* 38927c478bd9Sstevel@tonic-gate * See comment in cl_flk_set_nlm_status(). 38937c478bd9Sstevel@tonic-gate */ 38947c478bd9Sstevel@tonic-gate if (nlm_reg_status == NULL) { 38957c478bd9Sstevel@tonic-gate return; 38967c478bd9Sstevel@tonic-gate } 38977c478bd9Sstevel@tonic-gate 38987c478bd9Sstevel@tonic-gate /* 38997c478bd9Sstevel@tonic-gate * protect NLM registry state with a mutex. 39007c478bd9Sstevel@tonic-gate */ 39017c478bd9Sstevel@tonic-gate ASSERT(nlmid <= nlm_status_size && nlmid >= 0); 39027c478bd9Sstevel@tonic-gate mutex_enter(&nlm_reg_lock); 39037c478bd9Sstevel@tonic-gate FLK_REGISTRY_CHANGE_NLM_STATE(nlm_reg_status, nlmid, FLK_NLM_UNKNOWN); 39047c478bd9Sstevel@tonic-gate mutex_exit(&nlm_reg_lock); 39057c478bd9Sstevel@tonic-gate } 39067c478bd9Sstevel@tonic-gate 39077c478bd9Sstevel@tonic-gate /* 39087c478bd9Sstevel@tonic-gate * Return non-zero if the given I/O request conflicts with an active NBMAND 39097c478bd9Sstevel@tonic-gate * lock. 39107c478bd9Sstevel@tonic-gate * If svmand is non-zero, it means look at all active locks, not just NBMAND 39117c478bd9Sstevel@tonic-gate * locks. 39127c478bd9Sstevel@tonic-gate */ 39137c478bd9Sstevel@tonic-gate 39147c478bd9Sstevel@tonic-gate int 39157c478bd9Sstevel@tonic-gate nbl_lock_conflict(vnode_t *vp, nbl_op_t op, u_offset_t offset, 3916*da6c28aaSamw ssize_t length, int svmand, caller_context_t *ct) 39177c478bd9Sstevel@tonic-gate { 39187c478bd9Sstevel@tonic-gate int conflict = 0; 39197c478bd9Sstevel@tonic-gate graph_t *gp; 39207c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 3921*da6c28aaSamw pid_t pid; 3922*da6c28aaSamw int sysid; 3923*da6c28aaSamw 3924*da6c28aaSamw if (ct == NULL) { 3925*da6c28aaSamw pid = curproc->p_pid; 3926*da6c28aaSamw sysid = 0; 3927*da6c28aaSamw } else { 3928*da6c28aaSamw pid = ct->cc_pid; 3929*da6c28aaSamw sysid = ct->cc_sysid; 3930*da6c28aaSamw } 39317c478bd9Sstevel@tonic-gate 39327c478bd9Sstevel@tonic-gate mutex_enter(&flock_lock); 39337c478bd9Sstevel@tonic-gate gp = lock_graph[HASH_INDEX(vp)]; 39347c478bd9Sstevel@tonic-gate mutex_exit(&flock_lock); 39357c478bd9Sstevel@tonic-gate if (gp == NULL) 39367c478bd9Sstevel@tonic-gate return (0); 39377c478bd9Sstevel@tonic-gate 39387c478bd9Sstevel@tonic-gate mutex_enter(&gp->gp_mutex); 39397c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 39407c478bd9Sstevel@tonic-gate 39417c478bd9Sstevel@tonic-gate for (; lock && lock->l_vnode == vp; lock = lock->l_next) { 39427c478bd9Sstevel@tonic-gate if ((svmand || (lock->l_state & NBMAND_LOCK)) && 3943*da6c28aaSamw (lock->l_flock.l_sysid != sysid || 3944*da6c28aaSamw lock->l_flock.l_pid != pid) && 39457c478bd9Sstevel@tonic-gate lock_blocks_io(op, offset, length, 39467c478bd9Sstevel@tonic-gate lock->l_type, lock->l_start, lock->l_end)) { 39477c478bd9Sstevel@tonic-gate conflict = 1; 39487c478bd9Sstevel@tonic-gate break; 39497c478bd9Sstevel@tonic-gate } 39507c478bd9Sstevel@tonic-gate } 39517c478bd9Sstevel@tonic-gate mutex_exit(&gp->gp_mutex); 39527c478bd9Sstevel@tonic-gate 39537c478bd9Sstevel@tonic-gate return (conflict); 39547c478bd9Sstevel@tonic-gate } 39557c478bd9Sstevel@tonic-gate 39567c478bd9Sstevel@tonic-gate /* 39577c478bd9Sstevel@tonic-gate * Return non-zero if the given I/O request conflicts with the given lock. 39587c478bd9Sstevel@tonic-gate */ 39597c478bd9Sstevel@tonic-gate 39607c478bd9Sstevel@tonic-gate static int 39617c478bd9Sstevel@tonic-gate lock_blocks_io(nbl_op_t op, u_offset_t offset, ssize_t length, 39627c478bd9Sstevel@tonic-gate int lock_type, u_offset_t lock_start, u_offset_t lock_end) 39637c478bd9Sstevel@tonic-gate { 39647c478bd9Sstevel@tonic-gate ASSERT(op == NBL_READ || op == NBL_WRITE || op == NBL_READWRITE); 39657c478bd9Sstevel@tonic-gate ASSERT(lock_type == F_RDLCK || lock_type == F_WRLCK); 39667c478bd9Sstevel@tonic-gate 39677c478bd9Sstevel@tonic-gate if (op == NBL_READ && lock_type == F_RDLCK) 39687c478bd9Sstevel@tonic-gate return (0); 39697c478bd9Sstevel@tonic-gate 39707c478bd9Sstevel@tonic-gate if (offset <= lock_start && lock_start < offset + length) 39717c478bd9Sstevel@tonic-gate return (1); 39727c478bd9Sstevel@tonic-gate if (lock_start <= offset && offset <= lock_end) 39737c478bd9Sstevel@tonic-gate return (1); 39747c478bd9Sstevel@tonic-gate 39757c478bd9Sstevel@tonic-gate return (0); 39767c478bd9Sstevel@tonic-gate } 39777c478bd9Sstevel@tonic-gate 39787c478bd9Sstevel@tonic-gate #ifdef DEBUG 39797c478bd9Sstevel@tonic-gate static void 39807c478bd9Sstevel@tonic-gate check_active_locks(graph_t *gp) 39817c478bd9Sstevel@tonic-gate { 39827c478bd9Sstevel@tonic-gate lock_descriptor_t *lock, *lock1; 39837c478bd9Sstevel@tonic-gate edge_t *ep; 39847c478bd9Sstevel@tonic-gate 39857c478bd9Sstevel@tonic-gate for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp); 39867c478bd9Sstevel@tonic-gate lock = lock->l_next) { 39877c478bd9Sstevel@tonic-gate ASSERT(IS_ACTIVE(lock)); 39887c478bd9Sstevel@tonic-gate ASSERT(NOT_BLOCKED(lock)); 39897c478bd9Sstevel@tonic-gate ASSERT(!IS_BARRIER(lock)); 39907c478bd9Sstevel@tonic-gate 39917c478bd9Sstevel@tonic-gate ep = FIRST_IN(lock); 39927c478bd9Sstevel@tonic-gate 39937c478bd9Sstevel@tonic-gate while (ep != HEAD(lock)) { 39947c478bd9Sstevel@tonic-gate ASSERT(IS_SLEEPING(ep->from_vertex)); 39957c478bd9Sstevel@tonic-gate ASSERT(!NOT_BLOCKED(ep->from_vertex)); 39967c478bd9Sstevel@tonic-gate ep = NEXT_IN(ep); 39977c478bd9Sstevel@tonic-gate } 39987c478bd9Sstevel@tonic-gate 39997c478bd9Sstevel@tonic-gate for (lock1 = lock->l_next; lock1 != ACTIVE_HEAD(gp); 40007c478bd9Sstevel@tonic-gate lock1 = lock1->l_next) { 40017c478bd9Sstevel@tonic-gate if (lock1->l_vnode == lock->l_vnode) { 40027c478bd9Sstevel@tonic-gate if (BLOCKS(lock1, lock)) { 40037c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 40047c478bd9Sstevel@tonic-gate "active lock %p blocks %p", 40057c478bd9Sstevel@tonic-gate (void *)lock1, (void *)lock); 40067c478bd9Sstevel@tonic-gate } else if (BLOCKS(lock, lock1)) { 40077c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 40087c478bd9Sstevel@tonic-gate "active lock %p blocks %p", 40097c478bd9Sstevel@tonic-gate (void *)lock, (void *)lock1); 40107c478bd9Sstevel@tonic-gate } 40117c478bd9Sstevel@tonic-gate } 40127c478bd9Sstevel@tonic-gate } 40137c478bd9Sstevel@tonic-gate } 40147c478bd9Sstevel@tonic-gate } 40157c478bd9Sstevel@tonic-gate 40167c478bd9Sstevel@tonic-gate /* 40177c478bd9Sstevel@tonic-gate * Effect: This functions checks to see if the transition from 'old_state' to 40187c478bd9Sstevel@tonic-gate * 'new_state' is a valid one. It returns 0 if the transition is valid 40197c478bd9Sstevel@tonic-gate * and 1 if it is not. 40207c478bd9Sstevel@tonic-gate * For a map of valid transitions, see sys/flock_impl.h 40217c478bd9Sstevel@tonic-gate */ 40227c478bd9Sstevel@tonic-gate static int 40237c478bd9Sstevel@tonic-gate check_lock_transition(int old_state, int new_state) 40247c478bd9Sstevel@tonic-gate { 40257c478bd9Sstevel@tonic-gate switch (old_state) { 40267c478bd9Sstevel@tonic-gate case FLK_INITIAL_STATE: 40277c478bd9Sstevel@tonic-gate if ((new_state == FLK_START_STATE) || 40287c478bd9Sstevel@tonic-gate (new_state == FLK_SLEEPING_STATE) || 40297c478bd9Sstevel@tonic-gate (new_state == FLK_ACTIVE_STATE) || 40307c478bd9Sstevel@tonic-gate (new_state == FLK_DEAD_STATE)) { 40317c478bd9Sstevel@tonic-gate return (0); 40327c478bd9Sstevel@tonic-gate } else { 40337c478bd9Sstevel@tonic-gate return (1); 40347c478bd9Sstevel@tonic-gate } 40357c478bd9Sstevel@tonic-gate case FLK_START_STATE: 40367c478bd9Sstevel@tonic-gate if ((new_state == FLK_ACTIVE_STATE) || 40377c478bd9Sstevel@tonic-gate (new_state == FLK_DEAD_STATE)) { 40387c478bd9Sstevel@tonic-gate return (0); 40397c478bd9Sstevel@tonic-gate } else { 40407c478bd9Sstevel@tonic-gate return (1); 40417c478bd9Sstevel@tonic-gate } 40427c478bd9Sstevel@tonic-gate case FLK_ACTIVE_STATE: 40437c478bd9Sstevel@tonic-gate if (new_state == FLK_DEAD_STATE) { 40447c478bd9Sstevel@tonic-gate return (0); 40457c478bd9Sstevel@tonic-gate } else { 40467c478bd9Sstevel@tonic-gate return (1); 40477c478bd9Sstevel@tonic-gate } 40487c478bd9Sstevel@tonic-gate case FLK_SLEEPING_STATE: 40497c478bd9Sstevel@tonic-gate if ((new_state == FLK_GRANTED_STATE) || 40507c478bd9Sstevel@tonic-gate (new_state == FLK_INTERRUPTED_STATE) || 40517c478bd9Sstevel@tonic-gate (new_state == FLK_CANCELLED_STATE)) { 40527c478bd9Sstevel@tonic-gate return (0); 40537c478bd9Sstevel@tonic-gate } else { 40547c478bd9Sstevel@tonic-gate return (1); 40557c478bd9Sstevel@tonic-gate } 40567c478bd9Sstevel@tonic-gate case FLK_GRANTED_STATE: 40577c478bd9Sstevel@tonic-gate if ((new_state == FLK_START_STATE) || 40587c478bd9Sstevel@tonic-gate (new_state == FLK_INTERRUPTED_STATE) || 40597c478bd9Sstevel@tonic-gate (new_state == FLK_CANCELLED_STATE)) { 40607c478bd9Sstevel@tonic-gate return (0); 40617c478bd9Sstevel@tonic-gate } else { 40627c478bd9Sstevel@tonic-gate return (1); 40637c478bd9Sstevel@tonic-gate } 40647c478bd9Sstevel@tonic-gate case FLK_CANCELLED_STATE: 40657c478bd9Sstevel@tonic-gate if ((new_state == FLK_INTERRUPTED_STATE) || 40667c478bd9Sstevel@tonic-gate (new_state == FLK_DEAD_STATE)) { 40677c478bd9Sstevel@tonic-gate return (0); 40687c478bd9Sstevel@tonic-gate } else { 40697c478bd9Sstevel@tonic-gate return (1); 40707c478bd9Sstevel@tonic-gate } 40717c478bd9Sstevel@tonic-gate case FLK_INTERRUPTED_STATE: 40727c478bd9Sstevel@tonic-gate if (new_state == FLK_DEAD_STATE) { 40737c478bd9Sstevel@tonic-gate return (0); 40747c478bd9Sstevel@tonic-gate } else { 40757c478bd9Sstevel@tonic-gate return (1); 40767c478bd9Sstevel@tonic-gate } 40777c478bd9Sstevel@tonic-gate case FLK_DEAD_STATE: 40787c478bd9Sstevel@tonic-gate /* May be set more than once */ 40797c478bd9Sstevel@tonic-gate if (new_state == FLK_DEAD_STATE) { 40807c478bd9Sstevel@tonic-gate return (0); 40817c478bd9Sstevel@tonic-gate } else { 40827c478bd9Sstevel@tonic-gate return (1); 40837c478bd9Sstevel@tonic-gate } 40847c478bd9Sstevel@tonic-gate default: 40857c478bd9Sstevel@tonic-gate return (1); 40867c478bd9Sstevel@tonic-gate } 40877c478bd9Sstevel@tonic-gate } 40887c478bd9Sstevel@tonic-gate 40897c478bd9Sstevel@tonic-gate static void 40907c478bd9Sstevel@tonic-gate check_sleeping_locks(graph_t *gp) 40917c478bd9Sstevel@tonic-gate { 40927c478bd9Sstevel@tonic-gate lock_descriptor_t *lock1, *lock2; 40937c478bd9Sstevel@tonic-gate edge_t *ep; 40947c478bd9Sstevel@tonic-gate for (lock1 = SLEEPING_HEAD(gp)->l_next; lock1 != SLEEPING_HEAD(gp); 40957c478bd9Sstevel@tonic-gate lock1 = lock1->l_next) { 40967c478bd9Sstevel@tonic-gate ASSERT(!IS_BARRIER(lock1)); 40977c478bd9Sstevel@tonic-gate for (lock2 = lock1->l_next; lock2 != SLEEPING_HEAD(gp); 40987c478bd9Sstevel@tonic-gate lock2 = lock2->l_next) { 40997c478bd9Sstevel@tonic-gate if (lock1->l_vnode == lock2->l_vnode) { 41007c478bd9Sstevel@tonic-gate if (BLOCKS(lock2, lock1)) { 41017c478bd9Sstevel@tonic-gate ASSERT(!IS_GRANTED(lock1)); 41027c478bd9Sstevel@tonic-gate ASSERT(!NOT_BLOCKED(lock1)); 41037c478bd9Sstevel@tonic-gate path(lock1, lock2); 41047c478bd9Sstevel@tonic-gate } 41057c478bd9Sstevel@tonic-gate } 41067c478bd9Sstevel@tonic-gate } 41077c478bd9Sstevel@tonic-gate 41087c478bd9Sstevel@tonic-gate for (lock2 = ACTIVE_HEAD(gp)->l_next; lock2 != ACTIVE_HEAD(gp); 41097c478bd9Sstevel@tonic-gate lock2 = lock2->l_next) { 41107c478bd9Sstevel@tonic-gate ASSERT(!IS_BARRIER(lock1)); 41117c478bd9Sstevel@tonic-gate if (lock1->l_vnode == lock2->l_vnode) { 41127c478bd9Sstevel@tonic-gate if (BLOCKS(lock2, lock1)) { 41137c478bd9Sstevel@tonic-gate ASSERT(!IS_GRANTED(lock1)); 41147c478bd9Sstevel@tonic-gate ASSERT(!NOT_BLOCKED(lock1)); 41157c478bd9Sstevel@tonic-gate path(lock1, lock2); 41167c478bd9Sstevel@tonic-gate } 41177c478bd9Sstevel@tonic-gate } 41187c478bd9Sstevel@tonic-gate } 41197c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(lock1); 41207c478bd9Sstevel@tonic-gate while (ep != HEAD(lock1)) { 41217c478bd9Sstevel@tonic-gate ASSERT(BLOCKS(ep->to_vertex, lock1)); 41227c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep); 41237c478bd9Sstevel@tonic-gate } 41247c478bd9Sstevel@tonic-gate } 41257c478bd9Sstevel@tonic-gate } 41267c478bd9Sstevel@tonic-gate 41277c478bd9Sstevel@tonic-gate static int 41287c478bd9Sstevel@tonic-gate level_two_path(lock_descriptor_t *lock1, lock_descriptor_t *lock2, int no_path) 41297c478bd9Sstevel@tonic-gate { 41307c478bd9Sstevel@tonic-gate edge_t *ep; 41317c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex; 41327c478bd9Sstevel@tonic-gate lock_descriptor_t *vertex_stack; 41337c478bd9Sstevel@tonic-gate 41347c478bd9Sstevel@tonic-gate STACK_INIT(vertex_stack); 41357c478bd9Sstevel@tonic-gate 41367c478bd9Sstevel@tonic-gate flk_graph_uncolor(lock1->l_graph); 41377c478bd9Sstevel@tonic-gate ep = FIRST_ADJ(lock1); 41387c478bd9Sstevel@tonic-gate ASSERT(ep != HEAD(lock1)); 41397c478bd9Sstevel@tonic-gate while (ep != HEAD(lock1)) { 41407c478bd9Sstevel@tonic-gate if (no_path) 41417c478bd9Sstevel@tonic-gate ASSERT(ep->to_vertex != lock2); 41427c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, ep->to_vertex, l_dstack); 41437c478bd9Sstevel@tonic-gate COLOR(ep->to_vertex); 41447c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep); 41457c478bd9Sstevel@tonic-gate } 41467c478bd9Sstevel@tonic-gate 41477c478bd9Sstevel@tonic-gate while ((vertex = STACK_TOP(vertex_stack)) != NULL) { 41487c478bd9Sstevel@tonic-gate STACK_POP(vertex_stack, l_dstack); 41497c478bd9Sstevel@tonic-gate for (ep = FIRST_ADJ(vertex); ep != HEAD(vertex); 41507c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep)) { 41517c478bd9Sstevel@tonic-gate if (COLORED(ep->to_vertex)) 41527c478bd9Sstevel@tonic-gate continue; 41537c478bd9Sstevel@tonic-gate COLOR(ep->to_vertex); 41547c478bd9Sstevel@tonic-gate if (ep->to_vertex == lock2) 41557c478bd9Sstevel@tonic-gate return (1); 41567c478bd9Sstevel@tonic-gate 41577c478bd9Sstevel@tonic-gate STACK_PUSH(vertex_stack, ep->to_vertex, l_dstack); 41587c478bd9Sstevel@tonic-gate } 41597c478bd9Sstevel@tonic-gate } 41607c478bd9Sstevel@tonic-gate return (0); 41617c478bd9Sstevel@tonic-gate } 41627c478bd9Sstevel@tonic-gate 41637c478bd9Sstevel@tonic-gate static void 41647c478bd9Sstevel@tonic-gate check_owner_locks(graph_t *gp, pid_t pid, int sysid, vnode_t *vp) 41657c478bd9Sstevel@tonic-gate { 41667c478bd9Sstevel@tonic-gate lock_descriptor_t *lock; 41677c478bd9Sstevel@tonic-gate 41687c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp); 41697c478bd9Sstevel@tonic-gate 41707c478bd9Sstevel@tonic-gate if (lock) { 41717c478bd9Sstevel@tonic-gate while (lock != ACTIVE_HEAD(gp) && (lock->l_vnode == vp)) { 41727c478bd9Sstevel@tonic-gate if (lock->l_flock.l_pid == pid && 41737c478bd9Sstevel@tonic-gate lock->l_flock.l_sysid == sysid) 41747c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 41757c478bd9Sstevel@tonic-gate "owner pid %d's lock %p in active queue", 41767c478bd9Sstevel@tonic-gate pid, (void *)lock); 41777c478bd9Sstevel@tonic-gate lock = lock->l_next; 41787c478bd9Sstevel@tonic-gate } 41797c478bd9Sstevel@tonic-gate } 41807c478bd9Sstevel@tonic-gate SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp); 41817c478bd9Sstevel@tonic-gate 41827c478bd9Sstevel@tonic-gate if (lock) { 41837c478bd9Sstevel@tonic-gate while (lock != SLEEPING_HEAD(gp) && (lock->l_vnode == vp)) { 41847c478bd9Sstevel@tonic-gate if (lock->l_flock.l_pid == pid && 41857c478bd9Sstevel@tonic-gate lock->l_flock.l_sysid == sysid) 41867c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 41877c478bd9Sstevel@tonic-gate "owner pid %d's lock %p in sleep queue", 41887c478bd9Sstevel@tonic-gate pid, (void *)lock); 41897c478bd9Sstevel@tonic-gate lock = lock->l_next; 41907c478bd9Sstevel@tonic-gate } 41917c478bd9Sstevel@tonic-gate } 41927c478bd9Sstevel@tonic-gate } 41937c478bd9Sstevel@tonic-gate 41947c478bd9Sstevel@tonic-gate static int 41957c478bd9Sstevel@tonic-gate level_one_path(lock_descriptor_t *lock1, lock_descriptor_t *lock2) 41967c478bd9Sstevel@tonic-gate { 41977c478bd9Sstevel@tonic-gate edge_t *ep = FIRST_ADJ(lock1); 41987c478bd9Sstevel@tonic-gate 41997c478bd9Sstevel@tonic-gate while (ep != HEAD(lock1)) { 42007c478bd9Sstevel@tonic-gate if (ep->to_vertex == lock2) 42017c478bd9Sstevel@tonic-gate return (1); 42027c478bd9Sstevel@tonic-gate else 42037c478bd9Sstevel@tonic-gate ep = NEXT_ADJ(ep); 42047c478bd9Sstevel@tonic-gate } 42057c478bd9Sstevel@tonic-gate return (0); 42067c478bd9Sstevel@tonic-gate } 42077c478bd9Sstevel@tonic-gate 42087c478bd9Sstevel@tonic-gate static int 42097c478bd9Sstevel@tonic-gate no_path(lock_descriptor_t *lock1, lock_descriptor_t *lock2) 42107c478bd9Sstevel@tonic-gate { 42117c478bd9Sstevel@tonic-gate return (!level_two_path(lock1, lock2, 1)); 42127c478bd9Sstevel@tonic-gate } 42137c478bd9Sstevel@tonic-gate 42147c478bd9Sstevel@tonic-gate static void 42157c478bd9Sstevel@tonic-gate path(lock_descriptor_t *lock1, lock_descriptor_t *lock2) 42167c478bd9Sstevel@tonic-gate { 42177c478bd9Sstevel@tonic-gate if (level_one_path(lock1, lock2)) { 42187c478bd9Sstevel@tonic-gate if (level_two_path(lock1, lock2, 0) != 0) { 42197c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 42207c478bd9Sstevel@tonic-gate "one edge one path from lock1 %p lock2 %p", 42217c478bd9Sstevel@tonic-gate (void *)lock1, (void *)lock2); 42227c478bd9Sstevel@tonic-gate } 42237c478bd9Sstevel@tonic-gate } else if (no_path(lock1, lock2)) { 42247c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 42257c478bd9Sstevel@tonic-gate "No path from lock1 %p to lock2 %p", 42267c478bd9Sstevel@tonic-gate (void *)lock1, (void *)lock2); 42277c478bd9Sstevel@tonic-gate } 42287c478bd9Sstevel@tonic-gate } 42297c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 4230