19454b2d8SWarner Losh /*- 2dfdcada3SDoug Rabson * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3dfdcada3SDoug Rabson * Authors: Doug Rabson <dfr@rabson.org> 4dfdcada3SDoug Rabson * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5dfdcada3SDoug Rabson * 6dfdcada3SDoug Rabson * Redistribution and use in source and binary forms, with or without 7dfdcada3SDoug Rabson * modification, are permitted provided that the following conditions 8dfdcada3SDoug Rabson * are met: 9dfdcada3SDoug Rabson * 1. Redistributions of source code must retain the above copyright 10dfdcada3SDoug Rabson * notice, this list of conditions and the following disclaimer. 11dfdcada3SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 12dfdcada3SDoug Rabson * notice, this list of conditions and the following disclaimer in the 13dfdcada3SDoug Rabson * documentation and/or other materials provided with the distribution. 14dfdcada3SDoug Rabson * 15dfdcada3SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16dfdcada3SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17dfdcada3SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18dfdcada3SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19dfdcada3SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20dfdcada3SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21dfdcada3SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22dfdcada3SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23dfdcada3SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24dfdcada3SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25dfdcada3SDoug Rabson * SUCH DAMAGE. 26dfdcada3SDoug Rabson */ 27dfdcada3SDoug Rabson /*- 2892dc7331SDavid Greenman * Copyright (c) 1982, 1986, 1989, 1993 2992dc7331SDavid Greenman * The Regents of the University of California. All rights reserved. 3092dc7331SDavid Greenman * 3192dc7331SDavid Greenman * This code is derived from software contributed to Berkeley by 3292dc7331SDavid Greenman * Scooter Morris at Genentech Inc. 3392dc7331SDavid Greenman * 3492dc7331SDavid Greenman * Redistribution and use in source and binary forms, with or without 3592dc7331SDavid Greenman * modification, are permitted provided that the following conditions 3692dc7331SDavid Greenman * are met: 3792dc7331SDavid Greenman * 1. Redistributions of source code must retain the above copyright 3892dc7331SDavid Greenman * notice, this list of conditions and the following disclaimer. 3992dc7331SDavid Greenman * 2. Redistributions in binary form must reproduce the above copyright 4092dc7331SDavid Greenman * notice, this list of conditions and the following disclaimer in the 4192dc7331SDavid Greenman * documentation and/or other materials provided with the distribution. 4292dc7331SDavid Greenman * 4. Neither the name of the University nor the names of its contributors 4392dc7331SDavid Greenman * may be used to endorse or promote products derived from this software 4492dc7331SDavid Greenman * without specific prior written permission. 4592dc7331SDavid Greenman * 4692dc7331SDavid Greenman * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 4792dc7331SDavid Greenman * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 4892dc7331SDavid Greenman * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 4992dc7331SDavid Greenman * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 5092dc7331SDavid Greenman * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5192dc7331SDavid Greenman * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5292dc7331SDavid Greenman * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5392dc7331SDavid Greenman * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5492dc7331SDavid Greenman * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5592dc7331SDavid Greenman * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5692dc7331SDavid Greenman * SUCH DAMAGE. 5792dc7331SDavid Greenman * 5892dc7331SDavid Greenman * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 5992dc7331SDavid Greenman */ 6092dc7331SDavid Greenman 61677b542eSDavid E. O'Brien #include <sys/cdefs.h> 62677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 63677b542eSDavid E. O'Brien 643f2076daSEivind Eklund #include "opt_debug_lockf.h" 653f2076daSEivind Eklund 6692dc7331SDavid Greenman #include <sys/param.h> 6792dc7331SDavid Greenman #include <sys/systm.h> 68dfdcada3SDoug Rabson #include <sys/hash.h> 691c5bb3eaSPeter Wemm #include <sys/kernel.h> 70104a9b7eSAlexander Kabaev #include <sys/limits.h> 711cd52ec3SBruce Evans #include <sys/lock.h> 727f52a691SPoul-Henning Kamp #include <sys/mount.h> 73fb919e4dSMark Murray #include <sys/mutex.h> 7492dc7331SDavid Greenman #include <sys/proc.h> 75dfdcada3SDoug Rabson #include <sys/sx.h> 76b71fec07SBruce Evans #include <sys/unistd.h> 7792dc7331SDavid Greenman #include <sys/vnode.h> 7892dc7331SDavid Greenman #include <sys/malloc.h> 7992dc7331SDavid Greenman #include <sys/fcntl.h> 8092dc7331SDavid Greenman #include <sys/lockf.h> 81dfdcada3SDoug Rabson #include <sys/taskqueue.h> 8292dc7331SDavid Greenman 8392dc7331SDavid Greenman #ifdef LOCKF_DEBUG 84996c772fSJohn Dyson #include <sys/sysctl.h> 85a8687b6dSBruce Evans 86a8687b6dSBruce Evans #include <ufs/ufs/quota.h> 87a8687b6dSBruce Evans #include <ufs/ufs/inode.h> 88a8687b6dSBruce Evans 89dfdcada3SDoug Rabson static int lockf_debug = 0; /* control debug output */ 907f725eacSBruce Evans SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 9192dc7331SDavid Greenman #endif 9292dc7331SDavid Greenman 93603c8667SAlfred Perlstein MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 9455166637SPoul-Henning Kamp 95dfdcada3SDoug Rabson struct owner_edge; 96dfdcada3SDoug Rabson struct owner_vertex; 97dfdcada3SDoug Rabson struct owner_vertex_list; 98dfdcada3SDoug Rabson struct owner_graph; 99dfdcada3SDoug Rabson 100dfdcada3SDoug Rabson #define NOLOCKF (struct lockf_entry *)0 10192dc7331SDavid Greenman #define SELF 0x1 10292dc7331SDavid Greenman #define OTHERS 0x2 103dfdcada3SDoug Rabson static void lf_init(void *); 104dfdcada3SDoug Rabson static int lf_hash_owner(caddr_t, struct flock *, int); 105dfdcada3SDoug Rabson static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106dfdcada3SDoug Rabson int); 107dfdcada3SDoug Rabson static struct lockf_entry * 108dfdcada3SDoug Rabson lf_alloc_lock(struct lock_owner *); 109dfdcada3SDoug Rabson static void lf_free_lock(struct lockf_entry *); 110dfdcada3SDoug Rabson static int lf_clearlock(struct lockf *, struct lockf_entry *); 111dfdcada3SDoug Rabson static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112dfdcada3SDoug Rabson static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113dfdcada3SDoug Rabson static void lf_free_edge(struct lockf_edge *); 114dfdcada3SDoug Rabson static struct lockf_edge * 115dfdcada3SDoug Rabson lf_alloc_edge(void); 116dfdcada3SDoug Rabson static void lf_alloc_vertex(struct lockf_entry *); 117dfdcada3SDoug Rabson static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118dfdcada3SDoug Rabson static void lf_remove_edge(struct lockf_edge *); 119dfdcada3SDoug Rabson static void lf_remove_outgoing(struct lockf_entry *); 120dfdcada3SDoug Rabson static void lf_remove_incoming(struct lockf_entry *); 121dfdcada3SDoug Rabson static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122dfdcada3SDoug Rabson static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123dfdcada3SDoug Rabson static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124dfdcada3SDoug Rabson int); 125dfdcada3SDoug Rabson static struct lockf_entry * 126dfdcada3SDoug Rabson lf_getblock(struct lockf *, struct lockf_entry *); 127dfdcada3SDoug Rabson static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128dfdcada3SDoug Rabson static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129dfdcada3SDoug Rabson static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130dfdcada3SDoug Rabson static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131dfdcada3SDoug Rabson int all, struct lockf_entry_list *); 132dfdcada3SDoug Rabson static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133dfdcada3SDoug Rabson struct lockf_entry_list*); 134dfdcada3SDoug Rabson static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135dfdcada3SDoug Rabson struct lockf_entry_list*); 136dfdcada3SDoug Rabson static int lf_setlock(struct lockf *, struct lockf_entry *, 137dfdcada3SDoug Rabson struct vnode *, void **cookiep); 138dfdcada3SDoug Rabson static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139dfdcada3SDoug Rabson static void lf_split(struct lockf *, struct lockf_entry *, 140dfdcada3SDoug Rabson struct lockf_entry *, struct lockf_entry_list *); 141013e6650SJeff Roberson #ifdef LOCKF_DEBUG 142dfdcada3SDoug Rabson static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143dfdcada3SDoug Rabson struct owner_vertex_list *path); 144dfdcada3SDoug Rabson static void graph_check(struct owner_graph *g, int checkorder); 145dfdcada3SDoug Rabson static void graph_print_vertices(struct owner_vertex_list *set); 146013e6650SJeff Roberson #endif 147dfdcada3SDoug Rabson static int graph_delta_forward(struct owner_graph *g, 148dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y, 149dfdcada3SDoug Rabson struct owner_vertex_list *delta); 150dfdcada3SDoug Rabson static int graph_delta_backward(struct owner_graph *g, 151dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y, 152dfdcada3SDoug Rabson struct owner_vertex_list *delta); 153dfdcada3SDoug Rabson static int graph_add_indices(int *indices, int n, 154dfdcada3SDoug Rabson struct owner_vertex_list *set); 155dfdcada3SDoug Rabson static int graph_assign_indices(struct owner_graph *g, int *indices, 156dfdcada3SDoug Rabson int nextunused, struct owner_vertex_list *set); 157dfdcada3SDoug Rabson static int graph_add_edge(struct owner_graph *g, 158dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y); 159dfdcada3SDoug Rabson static void graph_remove_edge(struct owner_graph *g, 160dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y); 161dfdcada3SDoug Rabson static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162dfdcada3SDoug Rabson struct lock_owner *lo); 163dfdcada3SDoug Rabson static void graph_free_vertex(struct owner_graph *g, 164dfdcada3SDoug Rabson struct owner_vertex *v); 165dfdcada3SDoug Rabson static struct owner_graph * graph_init(struct owner_graph *g); 166dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 167dfdcada3SDoug Rabson static void lf_print(char *, struct lockf_entry *); 168dfdcada3SDoug Rabson static void lf_printlist(char *, struct lockf_entry *); 169dfdcada3SDoug Rabson static void lf_print_owner(struct lock_owner *); 170dfdcada3SDoug Rabson #endif 171dfdcada3SDoug Rabson 172dfdcada3SDoug Rabson /* 173dfdcada3SDoug Rabson * This structure is used to keep track of both local and remote lock 174dfdcada3SDoug Rabson * owners. The lf_owner field of the struct lockf_entry points back at 175dfdcada3SDoug Rabson * the lock owner structure. Each possible lock owner (local proc for 176dfdcada3SDoug Rabson * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177dfdcada3SDoug Rabson * pair for remote locks) is represented by a unique instance of 178dfdcada3SDoug Rabson * struct lock_owner. 179dfdcada3SDoug Rabson * 180dfdcada3SDoug Rabson * If a lock owner has a lock that blocks some other lock or a lock 181dfdcada3SDoug Rabson * that is waiting for some other lock, it also has a vertex in the 182dfdcada3SDoug Rabson * owner_graph below. 183dfdcada3SDoug Rabson * 184dfdcada3SDoug Rabson * Locks: 185dfdcada3SDoug Rabson * (s) locked by state->ls_lock 186dfdcada3SDoug Rabson * (S) locked by lf_lock_states_lock 187dfdcada3SDoug Rabson * (l) locked by lf_lock_owners_lock 188dfdcada3SDoug Rabson * (g) locked by lf_owner_graph_lock 189dfdcada3SDoug Rabson * (c) const until freeing 190dfdcada3SDoug Rabson */ 191dfdcada3SDoug Rabson #define LOCK_OWNER_HASH_SIZE 256 192dfdcada3SDoug Rabson 193dfdcada3SDoug Rabson struct lock_owner { 194dfdcada3SDoug Rabson LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195dfdcada3SDoug Rabson int lo_refs; /* (l) Number of locks referring to this */ 196dfdcada3SDoug Rabson int lo_flags; /* (c) Flags passwd to lf_advlock */ 197dfdcada3SDoug Rabson caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198dfdcada3SDoug Rabson pid_t lo_pid; /* (c) Process Id of the lock owner */ 199dfdcada3SDoug Rabson int lo_sysid; /* (c) System Id of the lock owner */ 200dfdcada3SDoug Rabson struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201dfdcada3SDoug Rabson }; 202dfdcada3SDoug Rabson 203dfdcada3SDoug Rabson LIST_HEAD(lock_owner_list, lock_owner); 204dfdcada3SDoug Rabson 205dfdcada3SDoug Rabson static struct sx lf_lock_states_lock; 206dfdcada3SDoug Rabson static struct lockf_list lf_lock_states; /* (S) */ 207dfdcada3SDoug Rabson static struct sx lf_lock_owners_lock; 208dfdcada3SDoug Rabson static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209dfdcada3SDoug Rabson 210dfdcada3SDoug Rabson /* 211dfdcada3SDoug Rabson * Structures for deadlock detection. 212dfdcada3SDoug Rabson * 213dfdcada3SDoug Rabson * We have two types of directed graph, the first is the set of locks, 214dfdcada3SDoug Rabson * both active and pending on a vnode. Within this graph, active locks 215dfdcada3SDoug Rabson * are terminal nodes in the graph (i.e. have no out-going 216dfdcada3SDoug Rabson * edges). Pending locks have out-going edges to each blocking active 217dfdcada3SDoug Rabson * lock that prevents the lock from being granted and also to each 218dfdcada3SDoug Rabson * older pending lock that would block them if it was active. The 219dfdcada3SDoug Rabson * graph for each vnode is naturally acyclic; new edges are only ever 220dfdcada3SDoug Rabson * added to or from new nodes (either new pending locks which only add 221dfdcada3SDoug Rabson * out-going edges or new active locks which only add in-coming edges) 222dfdcada3SDoug Rabson * therefore they cannot create loops in the lock graph. 223dfdcada3SDoug Rabson * 224dfdcada3SDoug Rabson * The second graph is a global graph of lock owners. Each lock owner 225dfdcada3SDoug Rabson * is a vertex in that graph and an edge is added to the graph 226dfdcada3SDoug Rabson * whenever an edge is added to a vnode graph, with end points 227dfdcada3SDoug Rabson * corresponding to owner of the new pending lock and the owner of the 228dfdcada3SDoug Rabson * lock upon which it waits. In order to prevent deadlock, we only add 229dfdcada3SDoug Rabson * an edge to this graph if the new edge would not create a cycle. 230dfdcada3SDoug Rabson * 231dfdcada3SDoug Rabson * The lock owner graph is topologically sorted, i.e. if a node has 232dfdcada3SDoug Rabson * any outgoing edges, then it has an order strictly less than any 233dfdcada3SDoug Rabson * node to which it has an outgoing edge. We preserve this ordering 234dfdcada3SDoug Rabson * (and detect cycles) on edge insertion using Algorithm PK from the 235dfdcada3SDoug Rabson * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236dfdcada3SDoug Rabson * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237dfdcada3SDoug Rabson * No. 1.7) 238dfdcada3SDoug Rabson */ 239dfdcada3SDoug Rabson struct owner_vertex; 240dfdcada3SDoug Rabson 241dfdcada3SDoug Rabson struct owner_edge { 242dfdcada3SDoug Rabson LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243dfdcada3SDoug Rabson LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244dfdcada3SDoug Rabson int e_refs; /* (g) number of times added */ 245dfdcada3SDoug Rabson struct owner_vertex *e_from; /* (c) out-going from here */ 246dfdcada3SDoug Rabson struct owner_vertex *e_to; /* (c) in-coming to here */ 247dfdcada3SDoug Rabson }; 248dfdcada3SDoug Rabson LIST_HEAD(owner_edge_list, owner_edge); 249dfdcada3SDoug Rabson 250dfdcada3SDoug Rabson struct owner_vertex { 251dfdcada3SDoug Rabson TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252dfdcada3SDoug Rabson uint32_t v_gen; /* (g) workspace for edge insertion */ 253dfdcada3SDoug Rabson int v_order; /* (g) order of vertex in graph */ 254dfdcada3SDoug Rabson struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255dfdcada3SDoug Rabson struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256dfdcada3SDoug Rabson struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257dfdcada3SDoug Rabson }; 258dfdcada3SDoug Rabson TAILQ_HEAD(owner_vertex_list, owner_vertex); 259dfdcada3SDoug Rabson 260dfdcada3SDoug Rabson struct owner_graph { 261dfdcada3SDoug Rabson struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262dfdcada3SDoug Rabson int g_size; /* (g) number of vertices */ 263dfdcada3SDoug Rabson int g_space; /* (g) space allocated for vertices */ 264dfdcada3SDoug Rabson int *g_indexbuf; /* (g) workspace for loop detection */ 265dfdcada3SDoug Rabson uint32_t g_gen; /* (g) increment when re-ordering */ 266dfdcada3SDoug Rabson }; 267dfdcada3SDoug Rabson 268dfdcada3SDoug Rabson static struct sx lf_owner_graph_lock; 269dfdcada3SDoug Rabson static struct owner_graph lf_owner_graph; 270dfdcada3SDoug Rabson 271dfdcada3SDoug Rabson /* 272dfdcada3SDoug Rabson * Initialise various structures and locks. 273dfdcada3SDoug Rabson */ 274dfdcada3SDoug Rabson static void 275dfdcada3SDoug Rabson lf_init(void *dummy) 276dfdcada3SDoug Rabson { 277dfdcada3SDoug Rabson int i; 278dfdcada3SDoug Rabson 279dfdcada3SDoug Rabson sx_init(&lf_lock_states_lock, "lock states lock"); 280dfdcada3SDoug Rabson LIST_INIT(&lf_lock_states); 281dfdcada3SDoug Rabson 282dfdcada3SDoug Rabson sx_init(&lf_lock_owners_lock, "lock owners lock"); 283dfdcada3SDoug Rabson for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284dfdcada3SDoug Rabson LIST_INIT(&lf_lock_owners[i]); 285dfdcada3SDoug Rabson 286dfdcada3SDoug Rabson sx_init(&lf_owner_graph_lock, "owner graph lock"); 287dfdcada3SDoug Rabson graph_init(&lf_owner_graph); 288dfdcada3SDoug Rabson } 289dfdcada3SDoug Rabson SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290dfdcada3SDoug Rabson 291dfdcada3SDoug Rabson /* 292dfdcada3SDoug Rabson * Generate a hash value for a lock owner. 293dfdcada3SDoug Rabson */ 294dfdcada3SDoug Rabson static int 295dfdcada3SDoug Rabson lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296dfdcada3SDoug Rabson { 297dfdcada3SDoug Rabson uint32_t h; 298dfdcada3SDoug Rabson 299dfdcada3SDoug Rabson if (flags & F_REMOTE) { 300dfdcada3SDoug Rabson h = HASHSTEP(0, fl->l_pid); 301dfdcada3SDoug Rabson h = HASHSTEP(h, fl->l_sysid); 302dfdcada3SDoug Rabson } else if (flags & F_FLOCK) { 303dfdcada3SDoug Rabson h = ((uintptr_t) id) >> 7; 304dfdcada3SDoug Rabson } else { 305dfdcada3SDoug Rabson struct proc *p = (struct proc *) id; 306dfdcada3SDoug Rabson h = HASHSTEP(0, p->p_pid); 307dfdcada3SDoug Rabson h = HASHSTEP(h, 0); 308dfdcada3SDoug Rabson } 309dfdcada3SDoug Rabson 310dfdcada3SDoug Rabson return (h % LOCK_OWNER_HASH_SIZE); 311dfdcada3SDoug Rabson } 312dfdcada3SDoug Rabson 313dfdcada3SDoug Rabson /* 314dfdcada3SDoug Rabson * Return true if a lock owner matches the details passed to 315dfdcada3SDoug Rabson * lf_advlock. 316dfdcada3SDoug Rabson */ 317dfdcada3SDoug Rabson static int 318dfdcada3SDoug Rabson lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319dfdcada3SDoug Rabson int flags) 320dfdcada3SDoug Rabson { 321dfdcada3SDoug Rabson if (flags & F_REMOTE) { 322dfdcada3SDoug Rabson return lo->lo_pid == fl->l_pid 323dfdcada3SDoug Rabson && lo->lo_sysid == fl->l_sysid; 324dfdcada3SDoug Rabson } else { 325dfdcada3SDoug Rabson return lo->lo_id == id; 326dfdcada3SDoug Rabson } 327dfdcada3SDoug Rabson } 328dfdcada3SDoug Rabson 329dfdcada3SDoug Rabson static struct lockf_entry * 330dfdcada3SDoug Rabson lf_alloc_lock(struct lock_owner *lo) 331dfdcada3SDoug Rabson { 332dfdcada3SDoug Rabson struct lockf_entry *lf; 333dfdcada3SDoug Rabson 334dfdcada3SDoug Rabson lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335dfdcada3SDoug Rabson 336dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 337dfdcada3SDoug Rabson if (lockf_debug & 4) 338dfdcada3SDoug Rabson printf("Allocated lock %p\n", lf); 339dfdcada3SDoug Rabson #endif 340dfdcada3SDoug Rabson if (lo) { 341dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 342dfdcada3SDoug Rabson lo->lo_refs++; 343dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 344dfdcada3SDoug Rabson lf->lf_owner = lo; 345dfdcada3SDoug Rabson } 346dfdcada3SDoug Rabson 347dfdcada3SDoug Rabson return (lf); 348dfdcada3SDoug Rabson } 349dfdcada3SDoug Rabson 350dfdcada3SDoug Rabson static void 351dfdcada3SDoug Rabson lf_free_lock(struct lockf_entry *lock) 352dfdcada3SDoug Rabson { 353dfdcada3SDoug Rabson /* 354dfdcada3SDoug Rabson * Adjust the lock_owner reference count and 355dfdcada3SDoug Rabson * reclaim the entry if this is the last lock 356dfdcada3SDoug Rabson * for that owner. 357dfdcada3SDoug Rabson */ 358dfdcada3SDoug Rabson struct lock_owner *lo = lock->lf_owner; 359dfdcada3SDoug Rabson if (lo) { 360dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&lock->lf_outedges), 361dfdcada3SDoug Rabson ("freeing lock with dependancies")); 362dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&lock->lf_inedges), 363dfdcada3SDoug Rabson ("freeing lock with dependants")); 364dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 365dfdcada3SDoug Rabson KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366dfdcada3SDoug Rabson lo->lo_refs--; 367dfdcada3SDoug Rabson if (lo->lo_refs == 0) { 368dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 369dfdcada3SDoug Rabson if (lockf_debug & 1) 370dfdcada3SDoug Rabson printf("lf_free_lock: freeing lock owner %p\n", 371dfdcada3SDoug Rabson lo); 372dfdcada3SDoug Rabson #endif 373dfdcada3SDoug Rabson if (lo->lo_vertex) { 374dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 375dfdcada3SDoug Rabson graph_free_vertex(&lf_owner_graph, 376dfdcada3SDoug Rabson lo->lo_vertex); 377dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 378dfdcada3SDoug Rabson } 379dfdcada3SDoug Rabson LIST_REMOVE(lo, lo_link); 380dfdcada3SDoug Rabson free(lo, M_LOCKF); 381dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 382dfdcada3SDoug Rabson if (lockf_debug & 4) 383dfdcada3SDoug Rabson printf("Freed lock owner %p\n", lo); 384dfdcada3SDoug Rabson #endif 385dfdcada3SDoug Rabson } 386dfdcada3SDoug Rabson sx_unlock(&lf_lock_owners_lock); 387dfdcada3SDoug Rabson } 388dfdcada3SDoug Rabson if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389dfdcada3SDoug Rabson vrele(lock->lf_vnode); 390dfdcada3SDoug Rabson lock->lf_vnode = NULL; 391dfdcada3SDoug Rabson } 392dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 393dfdcada3SDoug Rabson if (lockf_debug & 4) 394dfdcada3SDoug Rabson printf("Freed lock %p\n", lock); 395dfdcada3SDoug Rabson #endif 396dfdcada3SDoug Rabson free(lock, M_LOCKF); 397dfdcada3SDoug Rabson } 39892dc7331SDavid Greenman 39992dc7331SDavid Greenman /* 40092dc7331SDavid Greenman * Advisory record locking support 40192dc7331SDavid Greenman */ 40292dc7331SDavid Greenman int 403dfdcada3SDoug Rabson lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404dfdcada3SDoug Rabson u_quad_t size) 40592dc7331SDavid Greenman { 406dfdcada3SDoug Rabson struct lockf *state, *freestate = NULL; 407bc02f1d9SJeff Roberson struct flock *fl = ap->a_fl; 408dfdcada3SDoug Rabson struct lockf_entry *lock; 409bc02f1d9SJeff Roberson struct vnode *vp = ap->a_vp; 410dfdcada3SDoug Rabson caddr_t id = ap->a_id; 411dfdcada3SDoug Rabson int flags = ap->a_flags; 412dfdcada3SDoug Rabson int hash; 413dfdcada3SDoug Rabson struct lock_owner *lo; 414c4778eedSAndrey A. Chernov off_t start, end, oadd; 41592dc7331SDavid Greenman int error; 41692dc7331SDavid Greenman 41792dc7331SDavid Greenman /* 418dfdcada3SDoug Rabson * Handle the F_UNLKSYS case first - no need to mess about 419dfdcada3SDoug Rabson * creating a lock owner for this one. 420dfdcada3SDoug Rabson */ 421dfdcada3SDoug Rabson if (ap->a_op == F_UNLCKSYS) { 422dfdcada3SDoug Rabson lf_clearremotesys(fl->l_sysid); 423dfdcada3SDoug Rabson return (0); 424dfdcada3SDoug Rabson } 425dfdcada3SDoug Rabson 426dfdcada3SDoug Rabson /* 42792dc7331SDavid Greenman * Convert the flock structure into a start and end. 42892dc7331SDavid Greenman */ 42992dc7331SDavid Greenman switch (fl->l_whence) { 43092dc7331SDavid Greenman 43192dc7331SDavid Greenman case SEEK_SET: 43292dc7331SDavid Greenman case SEEK_CUR: 43392dc7331SDavid Greenman /* 43492dc7331SDavid Greenman * Caller is responsible for adding any necessary offset 43592dc7331SDavid Greenman * when SEEK_CUR is used. 43692dc7331SDavid Greenman */ 43792dc7331SDavid Greenman start = fl->l_start; 43892dc7331SDavid Greenman break; 43992dc7331SDavid Greenman 44092dc7331SDavid Greenman case SEEK_END: 441c8e76343SAndrey A. Chernov if (size > OFF_MAX || 442bc02f1d9SJeff Roberson (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443bc02f1d9SJeff Roberson return (EOVERFLOW); 44492dc7331SDavid Greenman start = size + fl->l_start; 44592dc7331SDavid Greenman break; 44692dc7331SDavid Greenman 44792dc7331SDavid Greenman default: 448bc02f1d9SJeff Roberson return (EINVAL); 44992dc7331SDavid Greenman } 450bc02f1d9SJeff Roberson if (start < 0) 451bc02f1d9SJeff Roberson return (EINVAL); 452f510e1c2SAndrey A. Chernov if (fl->l_len < 0) { 453bc02f1d9SJeff Roberson if (start == 0) 454bc02f1d9SJeff Roberson return (EINVAL); 455f510e1c2SAndrey A. Chernov end = start - 1; 45662be011eSAndrey A. Chernov start += fl->l_len; 457bc02f1d9SJeff Roberson if (start < 0) 458bc02f1d9SJeff Roberson return (EINVAL); 459dfdcada3SDoug Rabson } else if (fl->l_len == 0) { 460dfdcada3SDoug Rabson end = OFF_MAX; 461dfdcada3SDoug Rabson } else { 462c4778eedSAndrey A. Chernov oadd = fl->l_len - 1; 463bc02f1d9SJeff Roberson if (oadd > OFF_MAX - start) 464bc02f1d9SJeff Roberson return (EOVERFLOW); 46569cc1d0dSAndrey A. Chernov end = start + oadd; 466a88bd8aaSBruce Evans } 467a88bd8aaSBruce Evans /* 468a88bd8aaSBruce Evans * Avoid the common case of unlocking when inode has no locks. 469a88bd8aaSBruce Evans */ 470dfdcada3SDoug Rabson if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) { 471a88bd8aaSBruce Evans if (ap->a_op != F_SETLK) { 472a88bd8aaSBruce Evans fl->l_type = F_UNLCK; 473bc02f1d9SJeff Roberson return (0); 474a88bd8aaSBruce Evans } 475a88bd8aaSBruce Evans } 476dfdcada3SDoug Rabson 47792dc7331SDavid Greenman /* 478dfdcada3SDoug Rabson * Map our arguments to an existing lock owner or create one 479dfdcada3SDoug Rabson * if this is the first time we have seen this owner. 480bc02f1d9SJeff Roberson */ 481dfdcada3SDoug Rabson hash = lf_hash_owner(id, fl, flags); 482dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 483dfdcada3SDoug Rabson LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 484dfdcada3SDoug Rabson if (lf_owner_matches(lo, id, fl, flags)) 485dfdcada3SDoug Rabson break; 486dfdcada3SDoug Rabson if (!lo) { 487dfdcada3SDoug Rabson /* 488dfdcada3SDoug Rabson * We initialise the lock with a reference 489dfdcada3SDoug Rabson * count which matches the new lockf_entry 490dfdcada3SDoug Rabson * structure created below. 491dfdcada3SDoug Rabson */ 492dfdcada3SDoug Rabson lo = malloc(sizeof(struct lock_owner), M_LOCKF, 493dfdcada3SDoug Rabson M_WAITOK|M_ZERO); 494dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 495dfdcada3SDoug Rabson if (lockf_debug & 4) 496dfdcada3SDoug Rabson printf("Allocated lock owner %p\n", lo); 497dfdcada3SDoug Rabson #endif 498dfdcada3SDoug Rabson 499dfdcada3SDoug Rabson lo->lo_refs = 1; 500dfdcada3SDoug Rabson lo->lo_flags = flags; 501dfdcada3SDoug Rabson lo->lo_id = id; 502dfdcada3SDoug Rabson if (flags & F_REMOTE) { 503dfdcada3SDoug Rabson lo->lo_pid = fl->l_pid; 504dfdcada3SDoug Rabson lo->lo_sysid = fl->l_sysid; 505dfdcada3SDoug Rabson } else if (flags & F_FLOCK) { 506dfdcada3SDoug Rabson lo->lo_pid = -1; 507dfdcada3SDoug Rabson lo->lo_sysid = 0; 508dfdcada3SDoug Rabson } else { 509dfdcada3SDoug Rabson struct proc *p = (struct proc *) id; 510dfdcada3SDoug Rabson lo->lo_pid = p->p_pid; 511dfdcada3SDoug Rabson lo->lo_sysid = 0; 512004e08beSKonstantin Belousov } 513dfdcada3SDoug Rabson lo->lo_vertex = NULL; 514dfdcada3SDoug Rabson 515dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 516dfdcada3SDoug Rabson if (lockf_debug & 1) { 517dfdcada3SDoug Rabson printf("lf_advlockasync: new lock owner %p ", lo); 518dfdcada3SDoug Rabson lf_print_owner(lo); 519dfdcada3SDoug Rabson printf("\n"); 520dfdcada3SDoug Rabson } 521dfdcada3SDoug Rabson #endif 522dfdcada3SDoug Rabson 523dfdcada3SDoug Rabson LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 524dfdcada3SDoug Rabson } else { 525bc02f1d9SJeff Roberson /* 526dfdcada3SDoug Rabson * We have seen this lock owner before, increase its 527dfdcada3SDoug Rabson * reference count to account for the new lockf_entry 528dfdcada3SDoug Rabson * structure we create below. 52992dc7331SDavid Greenman */ 530dfdcada3SDoug Rabson lo->lo_refs++; 531dfdcada3SDoug Rabson } 532dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 533dfdcada3SDoug Rabson 534dfdcada3SDoug Rabson /* 535dfdcada3SDoug Rabson * Create the lockf structure. We initialise the lf_owner 536dfdcada3SDoug Rabson * field here instead of in lf_alloc_lock() to avoid paying 537dfdcada3SDoug Rabson * the lf_lock_owners_lock tax twice. 538dfdcada3SDoug Rabson */ 539dfdcada3SDoug Rabson lock = lf_alloc_lock(NULL); 54092dc7331SDavid Greenman lock->lf_start = start; 54192dc7331SDavid Greenman lock->lf_end = end; 542dfdcada3SDoug Rabson lock->lf_owner = lo; 543dfdcada3SDoug Rabson lock->lf_vnode = vp; 544dfdcada3SDoug Rabson if (flags & F_REMOTE) { 545dfdcada3SDoug Rabson /* 546dfdcada3SDoug Rabson * For remote locks, the caller may release its ref to 547dfdcada3SDoug Rabson * the vnode at any time - we have to ref it here to 548dfdcada3SDoug Rabson * prevent it from being recycled unexpectedly. 549dfdcada3SDoug Rabson */ 550dfdcada3SDoug Rabson vref(vp); 551dfdcada3SDoug Rabson } 552dfdcada3SDoug Rabson 55359aff5fcSAlfred Perlstein /* 55459aff5fcSAlfred Perlstein * XXX The problem is that VTOI is ufs specific, so it will 55559aff5fcSAlfred Perlstein * break LOCKF_DEBUG for all other FS's other than UFS because 55659aff5fcSAlfred Perlstein * it casts the vnode->data ptr to struct inode *. 55759aff5fcSAlfred Perlstein */ 55859aff5fcSAlfred Perlstein /* lock->lf_inode = VTOI(ap->a_vp); */ 55959aff5fcSAlfred Perlstein lock->lf_inode = (struct inode *)0; 56092dc7331SDavid Greenman lock->lf_type = fl->l_type; 561dfdcada3SDoug Rabson LIST_INIT(&lock->lf_outedges); 562dfdcada3SDoug Rabson LIST_INIT(&lock->lf_inedges); 563dfdcada3SDoug Rabson lock->lf_async_task = ap->a_task; 56492dc7331SDavid Greenman lock->lf_flags = ap->a_flags; 565dfdcada3SDoug Rabson 56692dc7331SDavid Greenman /* 567dfdcada3SDoug Rabson * Do the requested operation. First find our state structure 568dfdcada3SDoug Rabson * and create a new one if necessary - the caller's *statep 569dfdcada3SDoug Rabson * variable and the state's ls_threads count is protected by 570dfdcada3SDoug Rabson * the vnode interlock. 57192dc7331SDavid Greenman */ 572bc02f1d9SJeff Roberson VI_LOCK(vp); 573dfdcada3SDoug Rabson 574dfdcada3SDoug Rabson /* 575dfdcada3SDoug Rabson * Allocate a state structure if necessary. 576dfdcada3SDoug Rabson */ 577dfdcada3SDoug Rabson state = *statep; 578dfdcada3SDoug Rabson if (state == NULL) { 579dfdcada3SDoug Rabson struct lockf *ls; 580dfdcada3SDoug Rabson 581dfdcada3SDoug Rabson VI_UNLOCK(vp); 582dfdcada3SDoug Rabson 583dfdcada3SDoug Rabson ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 584dfdcada3SDoug Rabson sx_init(&ls->ls_lock, "ls_lock"); 585dfdcada3SDoug Rabson LIST_INIT(&ls->ls_active); 586dfdcada3SDoug Rabson LIST_INIT(&ls->ls_pending); 58760cdfde0SDoug Rabson ls->ls_threads = 1; 588dfdcada3SDoug Rabson 589dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 590dfdcada3SDoug Rabson LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 591dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 592dfdcada3SDoug Rabson 593dfdcada3SDoug Rabson /* 594dfdcada3SDoug Rabson * Cope if we lost a race with some other thread while 595dfdcada3SDoug Rabson * trying to allocate memory. 596dfdcada3SDoug Rabson */ 597dfdcada3SDoug Rabson VI_LOCK(vp); 598dfdcada3SDoug Rabson if ((*statep) == NULL) { 59960cdfde0SDoug Rabson state = *statep = ls; 60060cdfde0SDoug Rabson VI_UNLOCK(vp); 601dfdcada3SDoug Rabson } else { 60260cdfde0SDoug Rabson state = *statep; 60360cdfde0SDoug Rabson state->ls_threads++; 60460cdfde0SDoug Rabson VI_UNLOCK(vp); 60560cdfde0SDoug Rabson 606dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 607dfdcada3SDoug Rabson LIST_REMOVE(ls, ls_link); 608dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 609dfdcada3SDoug Rabson sx_destroy(&ls->ls_lock); 610dfdcada3SDoug Rabson free(ls, M_LOCKF); 611dfdcada3SDoug Rabson } 61260cdfde0SDoug Rabson } else { 613dfdcada3SDoug Rabson state->ls_threads++; 614dfdcada3SDoug Rabson VI_UNLOCK(vp); 61560cdfde0SDoug Rabson } 616dfdcada3SDoug Rabson 617dfdcada3SDoug Rabson sx_xlock(&state->ls_lock); 61892dc7331SDavid Greenman switch(ap->a_op) { 61992dc7331SDavid Greenman case F_SETLK: 620dfdcada3SDoug Rabson error = lf_setlock(state, lock, vp, ap->a_cookiep); 621bc02f1d9SJeff Roberson break; 62292dc7331SDavid Greenman 62392dc7331SDavid Greenman case F_UNLCK: 624dfdcada3SDoug Rabson error = lf_clearlock(state, lock); 625dfdcada3SDoug Rabson lf_free_lock(lock); 626bc02f1d9SJeff Roberson break; 62792dc7331SDavid Greenman 62892dc7331SDavid Greenman case F_GETLK: 629dfdcada3SDoug Rabson error = lf_getlock(state, lock, fl); 630dfdcada3SDoug Rabson lf_free_lock(lock); 631dfdcada3SDoug Rabson break; 632dfdcada3SDoug Rabson 633dfdcada3SDoug Rabson case F_CANCEL: 634dfdcada3SDoug Rabson if (ap->a_cookiep) 635dfdcada3SDoug Rabson error = lf_cancel(state, lock, *ap->a_cookiep); 636dfdcada3SDoug Rabson else 637dfdcada3SDoug Rabson error = EINVAL; 638dfdcada3SDoug Rabson lf_free_lock(lock); 639bc02f1d9SJeff Roberson break; 64092dc7331SDavid Greenman 64192dc7331SDavid Greenman default: 642dfdcada3SDoug Rabson lf_free_lock(lock); 643013e6650SJeff Roberson error = EINVAL; 644bc02f1d9SJeff Roberson break; 64592dc7331SDavid Greenman } 646dfdcada3SDoug Rabson 647dfdcada3SDoug Rabson #ifdef INVARIANTS 648dfdcada3SDoug Rabson /* 649dfdcada3SDoug Rabson * Check for some can't happen stuff. In this case, the active 650dfdcada3SDoug Rabson * lock list becoming disordered or containing mutually 651dfdcada3SDoug Rabson * blocking locks. We also check the pending list for locks 652dfdcada3SDoug Rabson * which should be active (i.e. have no out-going edges). 653dfdcada3SDoug Rabson */ 654dfdcada3SDoug Rabson LIST_FOREACH(lock, &state->ls_active, lf_link) { 655dfdcada3SDoug Rabson struct lockf_entry *lf; 656dfdcada3SDoug Rabson if (LIST_NEXT(lock, lf_link)) 657dfdcada3SDoug Rabson KASSERT((lock->lf_start 658dfdcada3SDoug Rabson <= LIST_NEXT(lock, lf_link)->lf_start), 659dfdcada3SDoug Rabson ("locks disordered")); 660dfdcada3SDoug Rabson LIST_FOREACH(lf, &state->ls_active, lf_link) { 661dfdcada3SDoug Rabson if (lock == lf) 662dfdcada3SDoug Rabson break; 663dfdcada3SDoug Rabson KASSERT(!lf_blocks(lock, lf), 664dfdcada3SDoug Rabson ("two conflicting active locks")); 665dfdcada3SDoug Rabson if (lock->lf_owner == lf->lf_owner) 666dfdcada3SDoug Rabson KASSERT(!lf_overlaps(lock, lf), 667dfdcada3SDoug Rabson ("two overlapping locks from same owner")); 668dfdcada3SDoug Rabson } 669dfdcada3SDoug Rabson } 670dfdcada3SDoug Rabson LIST_FOREACH(lock, &state->ls_pending, lf_link) { 671dfdcada3SDoug Rabson KASSERT(!LIST_EMPTY(&lock->lf_outedges), 672dfdcada3SDoug Rabson ("pending lock which should be active")); 673dfdcada3SDoug Rabson } 674dfdcada3SDoug Rabson #endif 675dfdcada3SDoug Rabson sx_xunlock(&state->ls_lock); 676dfdcada3SDoug Rabson 677dfdcada3SDoug Rabson /* 678dfdcada3SDoug Rabson * If we have removed the last active lock on the vnode and 679dfdcada3SDoug Rabson * this is the last thread that was in-progress, we can free 680dfdcada3SDoug Rabson * the state structure. We update the caller's pointer inside 681dfdcada3SDoug Rabson * the vnode interlock but call free outside. 682dfdcada3SDoug Rabson * 683dfdcada3SDoug Rabson * XXX alternatively, keep the state structure around until 684dfdcada3SDoug Rabson * the filesystem recycles - requires a callback from the 685dfdcada3SDoug Rabson * filesystem. 686dfdcada3SDoug Rabson */ 687dfdcada3SDoug Rabson VI_LOCK(vp); 688dfdcada3SDoug Rabson 689dfdcada3SDoug Rabson state->ls_threads--; 690dfdcada3SDoug Rabson if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 691dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&state->ls_pending), 692dfdcada3SDoug Rabson ("freeing state with pending locks")); 693dfdcada3SDoug Rabson freestate = state; 694dfdcada3SDoug Rabson *statep = NULL; 695dfdcada3SDoug Rabson } 696dfdcada3SDoug Rabson 697bc02f1d9SJeff Roberson VI_UNLOCK(vp); 698dfdcada3SDoug Rabson 699dfdcada3SDoug Rabson if (freestate) { 700dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 701dfdcada3SDoug Rabson LIST_REMOVE(freestate, ls_link); 702dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 703dfdcada3SDoug Rabson sx_destroy(&freestate->ls_lock); 704dfdcada3SDoug Rabson free(freestate, M_LOCKF); 705004e08beSKonstantin Belousov } 706013e6650SJeff Roberson return (error); 70792dc7331SDavid Greenman } 70892dc7331SDavid Greenman 709dfdcada3SDoug Rabson int 710dfdcada3SDoug Rabson lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 711dfdcada3SDoug Rabson { 712dfdcada3SDoug Rabson struct vop_advlockasync_args a; 713dfdcada3SDoug Rabson 714dfdcada3SDoug Rabson a.a_vp = ap->a_vp; 715dfdcada3SDoug Rabson a.a_id = ap->a_id; 716dfdcada3SDoug Rabson a.a_op = ap->a_op; 717dfdcada3SDoug Rabson a.a_fl = ap->a_fl; 718dfdcada3SDoug Rabson a.a_flags = ap->a_flags; 719dfdcada3SDoug Rabson a.a_task = NULL; 720dfdcada3SDoug Rabson a.a_cookiep = NULL; 721dfdcada3SDoug Rabson 722dfdcada3SDoug Rabson return (lf_advlockasync(&a, statep, size)); 723dfdcada3SDoug Rabson } 724dfdcada3SDoug Rabson 725dfdcada3SDoug Rabson /* 726dfdcada3SDoug Rabson * Return non-zero if locks 'x' and 'y' overlap. 727dfdcada3SDoug Rabson */ 728dfdcada3SDoug Rabson static int 729dfdcada3SDoug Rabson lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 730dfdcada3SDoug Rabson { 731dfdcada3SDoug Rabson 732dfdcada3SDoug Rabson return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 733dfdcada3SDoug Rabson } 734dfdcada3SDoug Rabson 735dfdcada3SDoug Rabson /* 736dfdcada3SDoug Rabson * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 737dfdcada3SDoug Rabson */ 738dfdcada3SDoug Rabson static int 739dfdcada3SDoug Rabson lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 740dfdcada3SDoug Rabson { 741dfdcada3SDoug Rabson 742dfdcada3SDoug Rabson return x->lf_owner != y->lf_owner 743dfdcada3SDoug Rabson && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 744dfdcada3SDoug Rabson && lf_overlaps(x, y); 745dfdcada3SDoug Rabson } 746dfdcada3SDoug Rabson 747dfdcada3SDoug Rabson /* 748dfdcada3SDoug Rabson * Allocate a lock edge from the free list 749dfdcada3SDoug Rabson */ 750dfdcada3SDoug Rabson static struct lockf_edge * 751dfdcada3SDoug Rabson lf_alloc_edge(void) 752dfdcada3SDoug Rabson { 753dfdcada3SDoug Rabson 754dfdcada3SDoug Rabson return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 755dfdcada3SDoug Rabson } 756dfdcada3SDoug Rabson 757dfdcada3SDoug Rabson /* 758dfdcada3SDoug Rabson * Free a lock edge. 759dfdcada3SDoug Rabson */ 760dfdcada3SDoug Rabson static void 761dfdcada3SDoug Rabson lf_free_edge(struct lockf_edge *e) 762dfdcada3SDoug Rabson { 763dfdcada3SDoug Rabson 764dfdcada3SDoug Rabson free(e, M_LOCKF); 765dfdcada3SDoug Rabson } 766dfdcada3SDoug Rabson 767dfdcada3SDoug Rabson 768dfdcada3SDoug Rabson /* 769dfdcada3SDoug Rabson * Ensure that the lock's owner has a corresponding vertex in the 770dfdcada3SDoug Rabson * owner graph. 771dfdcada3SDoug Rabson */ 772dfdcada3SDoug Rabson static void 773dfdcada3SDoug Rabson lf_alloc_vertex(struct lockf_entry *lock) 774dfdcada3SDoug Rabson { 775dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 776dfdcada3SDoug Rabson 777dfdcada3SDoug Rabson if (!lock->lf_owner->lo_vertex) 778dfdcada3SDoug Rabson lock->lf_owner->lo_vertex = 779dfdcada3SDoug Rabson graph_alloc_vertex(g, lock->lf_owner); 780dfdcada3SDoug Rabson } 781dfdcada3SDoug Rabson 782dfdcada3SDoug Rabson /* 783dfdcada3SDoug Rabson * Attempt to record an edge from lock x to lock y. Return EDEADLK if 784dfdcada3SDoug Rabson * the new edge would cause a cycle in the owner graph. 785dfdcada3SDoug Rabson */ 786dfdcada3SDoug Rabson static int 787dfdcada3SDoug Rabson lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 788dfdcada3SDoug Rabson { 789dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 790dfdcada3SDoug Rabson struct lockf_edge *e; 791dfdcada3SDoug Rabson int error; 792dfdcada3SDoug Rabson 793dfdcada3SDoug Rabson #ifdef INVARIANTS 794dfdcada3SDoug Rabson LIST_FOREACH(e, &x->lf_outedges, le_outlink) 795dfdcada3SDoug Rabson KASSERT(e->le_to != y, ("adding lock edge twice")); 796dfdcada3SDoug Rabson #endif 797dfdcada3SDoug Rabson 798dfdcada3SDoug Rabson /* 799dfdcada3SDoug Rabson * Make sure the two owners have entries in the owner graph. 800dfdcada3SDoug Rabson */ 801dfdcada3SDoug Rabson lf_alloc_vertex(x); 802dfdcada3SDoug Rabson lf_alloc_vertex(y); 803dfdcada3SDoug Rabson 804dfdcada3SDoug Rabson error = graph_add_edge(g, x->lf_owner->lo_vertex, 805dfdcada3SDoug Rabson y->lf_owner->lo_vertex); 806dfdcada3SDoug Rabson if (error) 807dfdcada3SDoug Rabson return (error); 808dfdcada3SDoug Rabson 809dfdcada3SDoug Rabson e = lf_alloc_edge(); 810dfdcada3SDoug Rabson LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 811dfdcada3SDoug Rabson LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 812dfdcada3SDoug Rabson e->le_from = x; 813dfdcada3SDoug Rabson e->le_to = y; 814dfdcada3SDoug Rabson 815dfdcada3SDoug Rabson return (0); 816dfdcada3SDoug Rabson } 817dfdcada3SDoug Rabson 818dfdcada3SDoug Rabson /* 819dfdcada3SDoug Rabson * Remove an edge from the lock graph. 820dfdcada3SDoug Rabson */ 821dfdcada3SDoug Rabson static void 822dfdcada3SDoug Rabson lf_remove_edge(struct lockf_edge *e) 823dfdcada3SDoug Rabson { 824dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 825dfdcada3SDoug Rabson struct lockf_entry *x = e->le_from; 826dfdcada3SDoug Rabson struct lockf_entry *y = e->le_to; 827dfdcada3SDoug Rabson 828dfdcada3SDoug Rabson graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 829dfdcada3SDoug Rabson LIST_REMOVE(e, le_outlink); 830dfdcada3SDoug Rabson LIST_REMOVE(e, le_inlink); 831dfdcada3SDoug Rabson e->le_from = NULL; 832dfdcada3SDoug Rabson e->le_to = NULL; 833dfdcada3SDoug Rabson lf_free_edge(e); 834dfdcada3SDoug Rabson } 835dfdcada3SDoug Rabson 836dfdcada3SDoug Rabson /* 837dfdcada3SDoug Rabson * Remove all out-going edges from lock x. 838dfdcada3SDoug Rabson */ 839dfdcada3SDoug Rabson static void 840dfdcada3SDoug Rabson lf_remove_outgoing(struct lockf_entry *x) 841dfdcada3SDoug Rabson { 842dfdcada3SDoug Rabson struct lockf_edge *e; 843dfdcada3SDoug Rabson 844dfdcada3SDoug Rabson while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 845dfdcada3SDoug Rabson lf_remove_edge(e); 846dfdcada3SDoug Rabson } 847dfdcada3SDoug Rabson } 848dfdcada3SDoug Rabson 849dfdcada3SDoug Rabson /* 850dfdcada3SDoug Rabson * Remove all in-coming edges from lock x. 851dfdcada3SDoug Rabson */ 852dfdcada3SDoug Rabson static void 853dfdcada3SDoug Rabson lf_remove_incoming(struct lockf_entry *x) 854dfdcada3SDoug Rabson { 855dfdcada3SDoug Rabson struct lockf_edge *e; 856dfdcada3SDoug Rabson 857dfdcada3SDoug Rabson while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 858dfdcada3SDoug Rabson lf_remove_edge(e); 859dfdcada3SDoug Rabson } 860dfdcada3SDoug Rabson } 861dfdcada3SDoug Rabson 862dfdcada3SDoug Rabson /* 863dfdcada3SDoug Rabson * Walk the list of locks for the file and create an out-going edge 864dfdcada3SDoug Rabson * from lock to each blocking lock. 865dfdcada3SDoug Rabson */ 866dfdcada3SDoug Rabson static int 867dfdcada3SDoug Rabson lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 868dfdcada3SDoug Rabson { 869dfdcada3SDoug Rabson struct lockf_entry *overlap; 870dfdcada3SDoug Rabson int error; 871dfdcada3SDoug Rabson 872dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_active, lf_link) { 873dfdcada3SDoug Rabson /* 874dfdcada3SDoug Rabson * We may assume that the active list is sorted by 875dfdcada3SDoug Rabson * lf_start. 876dfdcada3SDoug Rabson */ 877dfdcada3SDoug Rabson if (overlap->lf_start > lock->lf_end) 878dfdcada3SDoug Rabson break; 879dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 880dfdcada3SDoug Rabson continue; 881dfdcada3SDoug Rabson 882dfdcada3SDoug Rabson /* 883dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 884dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 885dfdcada3SDoug Rabson * deadlock. 886dfdcada3SDoug Rabson */ 887dfdcada3SDoug Rabson error = lf_add_edge(lock, overlap); 888dfdcada3SDoug Rabson 889dfdcada3SDoug Rabson /* 890dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 891dfdcada3SDoug Rabson * Remove any edges we added and return the error. 892dfdcada3SDoug Rabson */ 893dfdcada3SDoug Rabson if (error) { 894dfdcada3SDoug Rabson lf_remove_outgoing(lock); 895dfdcada3SDoug Rabson return (error); 896dfdcada3SDoug Rabson } 897dfdcada3SDoug Rabson } 898dfdcada3SDoug Rabson 899dfdcada3SDoug Rabson /* 900dfdcada3SDoug Rabson * We also need to add edges to sleeping locks that block 901dfdcada3SDoug Rabson * us. This ensures that lf_wakeup_lock cannot grant two 902dfdcada3SDoug Rabson * mutually blocking locks simultaneously and also enforces a 903dfdcada3SDoug Rabson * 'first come, first served' fairness model. Note that this 904dfdcada3SDoug Rabson * only happens if we are blocked by at least one active lock 905dfdcada3SDoug Rabson * due to the call to lf_getblock in lf_setlock below. 906dfdcada3SDoug Rabson */ 907dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 908dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 909dfdcada3SDoug Rabson continue; 910dfdcada3SDoug Rabson /* 911dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 912dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 913dfdcada3SDoug Rabson * deadlock. 914dfdcada3SDoug Rabson */ 915dfdcada3SDoug Rabson error = lf_add_edge(lock, overlap); 916dfdcada3SDoug Rabson 917dfdcada3SDoug Rabson /* 918dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 919dfdcada3SDoug Rabson * Remove any edges we added and return the error. 920dfdcada3SDoug Rabson */ 921dfdcada3SDoug Rabson if (error) { 922dfdcada3SDoug Rabson lf_remove_outgoing(lock); 923dfdcada3SDoug Rabson return (error); 924dfdcada3SDoug Rabson } 925dfdcada3SDoug Rabson } 926dfdcada3SDoug Rabson 927dfdcada3SDoug Rabson return (0); 928dfdcada3SDoug Rabson } 929dfdcada3SDoug Rabson 930dfdcada3SDoug Rabson /* 931dfdcada3SDoug Rabson * Walk the list of pending locks for the file and create an in-coming 932dfdcada3SDoug Rabson * edge from lock to each blocking lock. 933dfdcada3SDoug Rabson */ 934dfdcada3SDoug Rabson static int 935dfdcada3SDoug Rabson lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 936dfdcada3SDoug Rabson { 937dfdcada3SDoug Rabson struct lockf_entry *overlap; 938dfdcada3SDoug Rabson int error; 939dfdcada3SDoug Rabson 940dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 941dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 942dfdcada3SDoug Rabson continue; 943dfdcada3SDoug Rabson 944dfdcada3SDoug Rabson /* 945dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 946dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 947dfdcada3SDoug Rabson * deadlock. 948dfdcada3SDoug Rabson */ 949dfdcada3SDoug Rabson error = lf_add_edge(overlap, lock); 950dfdcada3SDoug Rabson 951dfdcada3SDoug Rabson /* 952dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 953dfdcada3SDoug Rabson * Remove any edges we added and return the error. 954dfdcada3SDoug Rabson */ 955dfdcada3SDoug Rabson if (error) { 956dfdcada3SDoug Rabson lf_remove_incoming(lock); 957dfdcada3SDoug Rabson return (error); 958dfdcada3SDoug Rabson } 959dfdcada3SDoug Rabson } 960dfdcada3SDoug Rabson return (0); 961dfdcada3SDoug Rabson } 962dfdcada3SDoug Rabson 963dfdcada3SDoug Rabson /* 964dfdcada3SDoug Rabson * Insert lock into the active list, keeping list entries ordered by 965dfdcada3SDoug Rabson * increasing values of lf_start. 966dfdcada3SDoug Rabson */ 967dfdcada3SDoug Rabson static void 968dfdcada3SDoug Rabson lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 969dfdcada3SDoug Rabson { 970dfdcada3SDoug Rabson struct lockf_entry *lf, *lfprev; 971dfdcada3SDoug Rabson 972dfdcada3SDoug Rabson if (LIST_EMPTY(&state->ls_active)) { 973dfdcada3SDoug Rabson LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 974dfdcada3SDoug Rabson return; 975dfdcada3SDoug Rabson } 976dfdcada3SDoug Rabson 977dfdcada3SDoug Rabson lfprev = NULL; 978dfdcada3SDoug Rabson LIST_FOREACH(lf, &state->ls_active, lf_link) { 979dfdcada3SDoug Rabson if (lf->lf_start > lock->lf_start) { 980dfdcada3SDoug Rabson LIST_INSERT_BEFORE(lf, lock, lf_link); 981dfdcada3SDoug Rabson return; 982dfdcada3SDoug Rabson } 983dfdcada3SDoug Rabson lfprev = lf; 984dfdcada3SDoug Rabson } 985dfdcada3SDoug Rabson LIST_INSERT_AFTER(lfprev, lock, lf_link); 986dfdcada3SDoug Rabson } 987dfdcada3SDoug Rabson 988dfdcada3SDoug Rabson /* 989dfdcada3SDoug Rabson * Wake up a sleeping lock and remove it from the pending list now 990dfdcada3SDoug Rabson * that all its dependancies have been resolved. The caller should 991dfdcada3SDoug Rabson * arrange for the lock to be added to the active list, adjusting any 992dfdcada3SDoug Rabson * existing locks for the same owner as needed. 993dfdcada3SDoug Rabson */ 994dfdcada3SDoug Rabson static void 995dfdcada3SDoug Rabson lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 996dfdcada3SDoug Rabson { 997dfdcada3SDoug Rabson 998dfdcada3SDoug Rabson /* 999dfdcada3SDoug Rabson * Remove from ls_pending list and wake up the caller 1000dfdcada3SDoug Rabson * or start the async notification, as appropriate. 1001dfdcada3SDoug Rabson */ 1002dfdcada3SDoug Rabson LIST_REMOVE(wakelock, lf_link); 1003dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1004dfdcada3SDoug Rabson if (lockf_debug & 1) 1005dfdcada3SDoug Rabson lf_print("lf_wakeup_lock: awakening", wakelock); 1006dfdcada3SDoug Rabson #endif /* LOCKF_DEBUG */ 1007dfdcada3SDoug Rabson if (wakelock->lf_async_task) { 1008dfdcada3SDoug Rabson taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1009dfdcada3SDoug Rabson } else { 1010dfdcada3SDoug Rabson wakeup(wakelock); 1011dfdcada3SDoug Rabson } 1012dfdcada3SDoug Rabson } 1013dfdcada3SDoug Rabson 1014dfdcada3SDoug Rabson /* 1015dfdcada3SDoug Rabson * Re-check all dependant locks and remove edges to locks that we no 1016dfdcada3SDoug Rabson * longer block. If 'all' is non-zero, the lock has been removed and 1017dfdcada3SDoug Rabson * we must remove all the dependancies, otherwise it has simply been 1018dfdcada3SDoug Rabson * reduced but remains active. Any pending locks which have been been 1019dfdcada3SDoug Rabson * unblocked are added to 'granted' 1020dfdcada3SDoug Rabson */ 1021dfdcada3SDoug Rabson static void 1022dfdcada3SDoug Rabson lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1023dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1024dfdcada3SDoug Rabson { 1025dfdcada3SDoug Rabson struct lockf_edge *e, *ne; 1026dfdcada3SDoug Rabson struct lockf_entry *deplock; 1027dfdcada3SDoug Rabson 1028dfdcada3SDoug Rabson LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1029dfdcada3SDoug Rabson deplock = e->le_from; 1030dfdcada3SDoug Rabson if (all || !lf_blocks(lock, deplock)) { 1031dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1032dfdcada3SDoug Rabson lf_remove_edge(e); 1033dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1034dfdcada3SDoug Rabson if (LIST_EMPTY(&deplock->lf_outedges)) { 1035dfdcada3SDoug Rabson lf_wakeup_lock(state, deplock); 1036dfdcada3SDoug Rabson LIST_INSERT_HEAD(granted, deplock, lf_link); 1037dfdcada3SDoug Rabson } 1038dfdcada3SDoug Rabson } 1039dfdcada3SDoug Rabson } 1040dfdcada3SDoug Rabson } 1041dfdcada3SDoug Rabson 1042dfdcada3SDoug Rabson /* 1043dfdcada3SDoug Rabson * Set the start of an existing active lock, updating dependancies and 1044dfdcada3SDoug Rabson * adding any newly woken locks to 'granted'. 1045dfdcada3SDoug Rabson */ 1046dfdcada3SDoug Rabson static void 1047dfdcada3SDoug Rabson lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1048dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1049dfdcada3SDoug Rabson { 1050dfdcada3SDoug Rabson 1051dfdcada3SDoug Rabson KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1052dfdcada3SDoug Rabson lock->lf_start = new_start; 1053dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1054dfdcada3SDoug Rabson lf_insert_lock(state, lock); 1055dfdcada3SDoug Rabson lf_update_dependancies(state, lock, FALSE, granted); 1056dfdcada3SDoug Rabson } 1057dfdcada3SDoug Rabson 1058dfdcada3SDoug Rabson /* 1059dfdcada3SDoug Rabson * Set the end of an existing active lock, updating dependancies and 1060dfdcada3SDoug Rabson * adding any newly woken locks to 'granted'. 1061dfdcada3SDoug Rabson */ 1062dfdcada3SDoug Rabson static void 1063dfdcada3SDoug Rabson lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1064dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1065dfdcada3SDoug Rabson { 1066dfdcada3SDoug Rabson 1067dfdcada3SDoug Rabson KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1068dfdcada3SDoug Rabson lock->lf_end = new_end; 1069dfdcada3SDoug Rabson lf_update_dependancies(state, lock, FALSE, granted); 1070dfdcada3SDoug Rabson } 1071dfdcada3SDoug Rabson 1072dfdcada3SDoug Rabson /* 1073dfdcada3SDoug Rabson * Add a lock to the active list, updating or removing any current 1074dfdcada3SDoug Rabson * locks owned by the same owner and processing any pending locks that 1075dfdcada3SDoug Rabson * become unblocked as a result. This code is also used for unlock 1076dfdcada3SDoug Rabson * since the logic for updating existing locks is identical. 1077dfdcada3SDoug Rabson * 1078dfdcada3SDoug Rabson * As a result of processing the new lock, we may unblock existing 1079dfdcada3SDoug Rabson * pending locks as a result of downgrading/unlocking. We simply 1080dfdcada3SDoug Rabson * activate the newly granted locks by looping. 1081dfdcada3SDoug Rabson * 1082dfdcada3SDoug Rabson * Since the new lock already has its dependancies set up, we always 1083dfdcada3SDoug Rabson * add it to the list (unless its an unlock request). This may 1084dfdcada3SDoug Rabson * fragment the lock list in some pathological cases but its probably 1085dfdcada3SDoug Rabson * not a real problem. 1086dfdcada3SDoug Rabson */ 1087dfdcada3SDoug Rabson static void 1088dfdcada3SDoug Rabson lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1089dfdcada3SDoug Rabson { 1090dfdcada3SDoug Rabson struct lockf_entry *overlap, *lf; 1091dfdcada3SDoug Rabson struct lockf_entry_list granted; 1092dfdcada3SDoug Rabson int ovcase; 1093dfdcada3SDoug Rabson 1094dfdcada3SDoug Rabson LIST_INIT(&granted); 1095dfdcada3SDoug Rabson LIST_INSERT_HEAD(&granted, lock, lf_link); 1096dfdcada3SDoug Rabson 1097dfdcada3SDoug Rabson while (!LIST_EMPTY(&granted)) { 1098dfdcada3SDoug Rabson lock = LIST_FIRST(&granted); 1099dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1100dfdcada3SDoug Rabson 1101dfdcada3SDoug Rabson /* 1102dfdcada3SDoug Rabson * Skip over locks owned by other processes. Handle 1103dfdcada3SDoug Rabson * any locks that overlap and are owned by ourselves. 1104dfdcada3SDoug Rabson */ 1105dfdcada3SDoug Rabson overlap = LIST_FIRST(&state->ls_active); 1106dfdcada3SDoug Rabson for (;;) { 1107dfdcada3SDoug Rabson ovcase = lf_findoverlap(&overlap, lock, SELF); 1108dfdcada3SDoug Rabson 1109dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1110dfdcada3SDoug Rabson if (ovcase && (lockf_debug & 2)) { 1111dfdcada3SDoug Rabson printf("lf_setlock: overlap %d", ovcase); 1112dfdcada3SDoug Rabson lf_print("", overlap); 1113dfdcada3SDoug Rabson } 1114dfdcada3SDoug Rabson #endif 1115dfdcada3SDoug Rabson /* 1116dfdcada3SDoug Rabson * Six cases: 1117dfdcada3SDoug Rabson * 0) no overlap 1118dfdcada3SDoug Rabson * 1) overlap == lock 1119dfdcada3SDoug Rabson * 2) overlap contains lock 1120dfdcada3SDoug Rabson * 3) lock contains overlap 1121dfdcada3SDoug Rabson * 4) overlap starts before lock 1122dfdcada3SDoug Rabson * 5) overlap ends after lock 1123dfdcada3SDoug Rabson */ 1124dfdcada3SDoug Rabson switch (ovcase) { 1125dfdcada3SDoug Rabson case 0: /* no overlap */ 1126dfdcada3SDoug Rabson break; 1127dfdcada3SDoug Rabson 1128dfdcada3SDoug Rabson case 1: /* overlap == lock */ 1129dfdcada3SDoug Rabson /* 1130dfdcada3SDoug Rabson * We have already setup the 1131dfdcada3SDoug Rabson * dependants for the new lock, taking 1132dfdcada3SDoug Rabson * into account a possible downgrade 1133dfdcada3SDoug Rabson * or unlock. Remove the old lock. 1134dfdcada3SDoug Rabson */ 1135dfdcada3SDoug Rabson LIST_REMOVE(overlap, lf_link); 1136dfdcada3SDoug Rabson lf_update_dependancies(state, overlap, TRUE, 1137dfdcada3SDoug Rabson &granted); 1138dfdcada3SDoug Rabson lf_free_lock(overlap); 1139dfdcada3SDoug Rabson break; 1140dfdcada3SDoug Rabson 1141dfdcada3SDoug Rabson case 2: /* overlap contains lock */ 1142dfdcada3SDoug Rabson /* 1143dfdcada3SDoug Rabson * Just split the existing lock. 1144dfdcada3SDoug Rabson */ 1145dfdcada3SDoug Rabson lf_split(state, overlap, lock, &granted); 1146dfdcada3SDoug Rabson break; 1147dfdcada3SDoug Rabson 1148dfdcada3SDoug Rabson case 3: /* lock contains overlap */ 1149dfdcada3SDoug Rabson /* 1150dfdcada3SDoug Rabson * Delete the overlap and advance to 1151dfdcada3SDoug Rabson * the next entry in the list. 1152dfdcada3SDoug Rabson */ 1153dfdcada3SDoug Rabson lf = LIST_NEXT(overlap, lf_link); 1154dfdcada3SDoug Rabson LIST_REMOVE(overlap, lf_link); 1155dfdcada3SDoug Rabson lf_update_dependancies(state, overlap, TRUE, 1156dfdcada3SDoug Rabson &granted); 1157dfdcada3SDoug Rabson lf_free_lock(overlap); 1158dfdcada3SDoug Rabson overlap = lf; 1159dfdcada3SDoug Rabson continue; 1160dfdcada3SDoug Rabson 1161dfdcada3SDoug Rabson case 4: /* overlap starts before lock */ 1162dfdcada3SDoug Rabson /* 1163dfdcada3SDoug Rabson * Just update the overlap end and 1164dfdcada3SDoug Rabson * move on. 1165dfdcada3SDoug Rabson */ 1166dfdcada3SDoug Rabson lf_set_end(state, overlap, lock->lf_start - 1, 1167dfdcada3SDoug Rabson &granted); 1168dfdcada3SDoug Rabson overlap = LIST_NEXT(overlap, lf_link); 1169dfdcada3SDoug Rabson continue; 1170dfdcada3SDoug Rabson 1171dfdcada3SDoug Rabson case 5: /* overlap ends after lock */ 1172dfdcada3SDoug Rabson /* 1173dfdcada3SDoug Rabson * Change the start of overlap and 1174dfdcada3SDoug Rabson * re-insert. 1175dfdcada3SDoug Rabson */ 1176dfdcada3SDoug Rabson lf_set_start(state, overlap, lock->lf_end + 1, 1177dfdcada3SDoug Rabson &granted); 1178dfdcada3SDoug Rabson break; 1179dfdcada3SDoug Rabson } 1180dfdcada3SDoug Rabson break; 1181dfdcada3SDoug Rabson } 1182dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1183dfdcada3SDoug Rabson if (lockf_debug & 1) { 1184dfdcada3SDoug Rabson if (lock->lf_type != F_UNLCK) 1185dfdcada3SDoug Rabson lf_print("lf_activate_lock: activated", lock); 1186dfdcada3SDoug Rabson else 1187dfdcada3SDoug Rabson lf_print("lf_activate_lock: unlocked", lock); 1188dfdcada3SDoug Rabson lf_printlist("lf_activate_lock", lock); 1189dfdcada3SDoug Rabson } 1190dfdcada3SDoug Rabson #endif /* LOCKF_DEBUG */ 1191dfdcada3SDoug Rabson if (lock->lf_type != F_UNLCK) 1192dfdcada3SDoug Rabson lf_insert_lock(state, lock); 1193dfdcada3SDoug Rabson } 1194dfdcada3SDoug Rabson } 1195dfdcada3SDoug Rabson 1196dfdcada3SDoug Rabson /* 1197dfdcada3SDoug Rabson * Cancel a pending lock request, either as a result of a signal or a 1198dfdcada3SDoug Rabson * cancel request for an async lock. 1199dfdcada3SDoug Rabson */ 1200dfdcada3SDoug Rabson static void 1201dfdcada3SDoug Rabson lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1202dfdcada3SDoug Rabson { 1203dfdcada3SDoug Rabson struct lockf_entry_list granted; 1204dfdcada3SDoug Rabson 1205dfdcada3SDoug Rabson /* 1206dfdcada3SDoug Rabson * Note it is theoretically possible that cancelling this lock 1207dfdcada3SDoug Rabson * may allow some other pending lock to become 1208dfdcada3SDoug Rabson * active. Consider this case: 1209dfdcada3SDoug Rabson * 1210dfdcada3SDoug Rabson * Owner Action Result Dependancies 1211dfdcada3SDoug Rabson * 1212dfdcada3SDoug Rabson * A: lock [0..0] succeeds 1213dfdcada3SDoug Rabson * B: lock [2..2] succeeds 1214dfdcada3SDoug Rabson * C: lock [1..2] blocked C->B 1215dfdcada3SDoug Rabson * D: lock [0..1] blocked C->B,D->A,D->C 1216dfdcada3SDoug Rabson * A: unlock [0..0] C->B,D->C 1217dfdcada3SDoug Rabson * C: cancel [1..2] 1218dfdcada3SDoug Rabson */ 1219dfdcada3SDoug Rabson 1220dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1221dfdcada3SDoug Rabson 1222dfdcada3SDoug Rabson /* 1223dfdcada3SDoug Rabson * Removing out-going edges is simple. 1224dfdcada3SDoug Rabson */ 1225dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1226dfdcada3SDoug Rabson lf_remove_outgoing(lock); 1227dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1228dfdcada3SDoug Rabson 1229dfdcada3SDoug Rabson /* 1230dfdcada3SDoug Rabson * Removing in-coming edges may allow some other lock to 1231dfdcada3SDoug Rabson * become active - we use lf_update_dependancies to figure 1232dfdcada3SDoug Rabson * this out. 1233dfdcada3SDoug Rabson */ 1234dfdcada3SDoug Rabson LIST_INIT(&granted); 1235dfdcada3SDoug Rabson lf_update_dependancies(state, lock, TRUE, &granted); 1236dfdcada3SDoug Rabson lf_free_lock(lock); 1237dfdcada3SDoug Rabson 1238dfdcada3SDoug Rabson /* 1239dfdcada3SDoug Rabson * Feed any newly active locks to lf_activate_lock. 1240dfdcada3SDoug Rabson */ 1241dfdcada3SDoug Rabson while (!LIST_EMPTY(&granted)) { 1242dfdcada3SDoug Rabson lock = LIST_FIRST(&granted); 1243dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1244dfdcada3SDoug Rabson lf_activate_lock(state, lock); 1245dfdcada3SDoug Rabson } 1246dfdcada3SDoug Rabson } 1247dfdcada3SDoug Rabson 124892dc7331SDavid Greenman /* 124992dc7331SDavid Greenman * Set a byte-range lock. 125092dc7331SDavid Greenman */ 125187b6de2bSPoul-Henning Kamp static int 1252dfdcada3SDoug Rabson lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1253dfdcada3SDoug Rabson void **cookiep) 125492dc7331SDavid Greenman { 1255dfdcada3SDoug Rabson struct lockf_entry *block; 125692dc7331SDavid Greenman static char lockstr[] = "lockf"; 1257dfdcada3SDoug Rabson int priority, error; 125892dc7331SDavid Greenman 125992dc7331SDavid Greenman #ifdef LOCKF_DEBUG 126092dc7331SDavid Greenman if (lockf_debug & 1) 126192dc7331SDavid Greenman lf_print("lf_setlock", lock); 126292dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 126392dc7331SDavid Greenman 126492dc7331SDavid Greenman /* 126592dc7331SDavid Greenman * Set the priority 126692dc7331SDavid Greenman */ 126792dc7331SDavid Greenman priority = PLOCK; 126892dc7331SDavid Greenman if (lock->lf_type == F_WRLCK) 126992dc7331SDavid Greenman priority += 4; 127092dc7331SDavid Greenman priority |= PCATCH; 127192dc7331SDavid Greenman /* 127292dc7331SDavid Greenman * Scan lock list for this file looking for locks that would block us. 127392dc7331SDavid Greenman */ 1274dfdcada3SDoug Rabson while ((block = lf_getblock(state, lock))) { 127592dc7331SDavid Greenman /* 127692dc7331SDavid Greenman * Free the structure and return if nonblocking. 127792dc7331SDavid Greenman */ 1278dfdcada3SDoug Rabson if ((lock->lf_flags & F_WAIT) == 0 1279dfdcada3SDoug Rabson && lock->lf_async_task == NULL) { 1280dfdcada3SDoug Rabson lf_free_lock(lock); 1281dfdcada3SDoug Rabson error = EAGAIN; 1282dfdcada3SDoug Rabson goto out; 128392dc7331SDavid Greenman } 128492dc7331SDavid Greenman 1285dfdcada3SDoug Rabson /* 1286dfdcada3SDoug Rabson * We are blocked. Create edges to each blocking lock, 1287dfdcada3SDoug Rabson * checking for deadlock using the owner graph. For 1288dfdcada3SDoug Rabson * simplicity, we run deadlock detection for all 1289dfdcada3SDoug Rabson * locks, posix and otherwise. 1290dfdcada3SDoug Rabson */ 1291dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1292dfdcada3SDoug Rabson error = lf_add_outgoing(state, lock); 1293dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1294dfdcada3SDoug Rabson 1295dfdcada3SDoug Rabson if (error) { 1296dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1297dfdcada3SDoug Rabson if (lockf_debug & 1) 1298dfdcada3SDoug Rabson lf_print("lf_setlock: deadlock", lock); 1299dfdcada3SDoug Rabson #endif 1300dfdcada3SDoug Rabson lf_free_lock(lock); 1301dfdcada3SDoug Rabson goto out; 130292dc7331SDavid Greenman } 1303dfdcada3SDoug Rabson 130492dc7331SDavid Greenman /* 130592dc7331SDavid Greenman * For flock type locks, we must first remove 130692dc7331SDavid Greenman * any shared locks that we hold before we sleep 130792dc7331SDavid Greenman * waiting for an exclusive lock. 130892dc7331SDavid Greenman */ 130992dc7331SDavid Greenman if ((lock->lf_flags & F_FLOCK) && 131092dc7331SDavid Greenman lock->lf_type == F_WRLCK) { 131192dc7331SDavid Greenman lock->lf_type = F_UNLCK; 1312dfdcada3SDoug Rabson lf_activate_lock(state, lock); 131392dc7331SDavid Greenman lock->lf_type = F_WRLCK; 131492dc7331SDavid Greenman } 131592dc7331SDavid Greenman /* 1316dfdcada3SDoug Rabson * We have added edges to everything that blocks 1317dfdcada3SDoug Rabson * us. Sleep until they all go away. 131892dc7331SDavid Greenman */ 1319dfdcada3SDoug Rabson LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 132092dc7331SDavid Greenman #ifdef LOCKF_DEBUG 132192dc7331SDavid Greenman if (lockf_debug & 1) { 1322dfdcada3SDoug Rabson struct lockf_edge *e; 1323dfdcada3SDoug Rabson LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1324dfdcada3SDoug Rabson lf_print("lf_setlock: blocking on", e->le_to); 1325dfdcada3SDoug Rabson lf_printlist("lf_setlock", e->le_to); 1326dfdcada3SDoug Rabson } 132792dc7331SDavid Greenman } 132892dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1329dfdcada3SDoug Rabson 1330dfdcada3SDoug Rabson if ((lock->lf_flags & F_WAIT) == 0) { 1331dfdcada3SDoug Rabson /* 1332dfdcada3SDoug Rabson * The caller requested async notification - 1333dfdcada3SDoug Rabson * this callback happens when the blocking 1334dfdcada3SDoug Rabson * lock is released, allowing the caller to 1335dfdcada3SDoug Rabson * make another attempt to take the lock. 1336dfdcada3SDoug Rabson */ 1337dfdcada3SDoug Rabson *cookiep = (void *) lock; 1338dfdcada3SDoug Rabson error = EINPROGRESS; 1339dfdcada3SDoug Rabson goto out; 1340dfdcada3SDoug Rabson } 1341dfdcada3SDoug Rabson 1342dfdcada3SDoug Rabson error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 134392dc7331SDavid Greenman /* 13441168ab08SBruce Evans * We may have been awakened by a signal and/or by a 1345dfdcada3SDoug Rabson * debugger continuing us (in which cases we must 1346dfdcada3SDoug Rabson * remove our lock graph edges) and/or by another 1347dfdcada3SDoug Rabson * process releasing a lock (in which case our edges 1348dfdcada3SDoug Rabson * have already been removed and we have been moved to 1349dfdcada3SDoug Rabson * the active list). 1350dfdcada3SDoug Rabson * 1351dfdcada3SDoug Rabson * Note that it is possible to receive a signal after 1352dfdcada3SDoug Rabson * we were successfully woken (and moved to the active 1353dfdcada3SDoug Rabson * list) but before we resumed execution. In this 1354dfdcada3SDoug Rabson * case, our lf_outedges list will be clear. We 1355dfdcada3SDoug Rabson * pretend there was no error. 1356dfdcada3SDoug Rabson * 1357dfdcada3SDoug Rabson * Note also, if we have been sleeping long enough, we 1358dfdcada3SDoug Rabson * may now have incoming edges from some newer lock 1359dfdcada3SDoug Rabson * which is waiting behind us in the queue. 136092dc7331SDavid Greenman */ 1361dfdcada3SDoug Rabson if (LIST_EMPTY(&lock->lf_outedges)) { 1362dfdcada3SDoug Rabson error = 0; 1363dfdcada3SDoug Rabson } else { 1364dfdcada3SDoug Rabson lf_cancel_lock(state, lock); 1365dfdcada3SDoug Rabson goto out; 13661168ab08SBruce Evans } 1367dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1368dfdcada3SDoug Rabson if (lockf_debug & 1) { 1369dfdcada3SDoug Rabson lf_print("lf_setlock: granted", lock); 1370dfdcada3SDoug Rabson } 1371dfdcada3SDoug Rabson #endif 1372dfdcada3SDoug Rabson goto out; 1373dfdcada3SDoug Rabson } 1374dfdcada3SDoug Rabson /* 1375dfdcada3SDoug Rabson * It looks like we are going to grant the lock. First add 1376dfdcada3SDoug Rabson * edges from any currently pending lock that the new lock 1377dfdcada3SDoug Rabson * would block. 1378dfdcada3SDoug Rabson */ 1379dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1380dfdcada3SDoug Rabson error = lf_add_incoming(state, lock); 1381dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 13821168ab08SBruce Evans if (error) { 1383dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1384dfdcada3SDoug Rabson if (lockf_debug & 1) 1385dfdcada3SDoug Rabson lf_print("lf_setlock: deadlock", lock); 1386dfdcada3SDoug Rabson #endif 1387dfdcada3SDoug Rabson lf_free_lock(lock); 1388dfdcada3SDoug Rabson goto out; 138992dc7331SDavid Greenman } 1390dfdcada3SDoug Rabson 139192dc7331SDavid Greenman /* 139292dc7331SDavid Greenman * No blocks!! Add the lock. Note that we will 139392dc7331SDavid Greenman * downgrade or upgrade any overlapping locks this 139492dc7331SDavid Greenman * process already owns. 139592dc7331SDavid Greenman */ 1396dfdcada3SDoug Rabson lf_activate_lock(state, lock); 1397dfdcada3SDoug Rabson error = 0; 1398dfdcada3SDoug Rabson out: 1399dfdcada3SDoug Rabson return (error); 140092dc7331SDavid Greenman } 140192dc7331SDavid Greenman 140292dc7331SDavid Greenman /* 140392dc7331SDavid Greenman * Remove a byte-range lock on an inode. 140492dc7331SDavid Greenman * 140592dc7331SDavid Greenman * Generally, find the lock (or an overlap to that lock) 140692dc7331SDavid Greenman * and remove it (or shrink it), then wakeup anyone we can. 140792dc7331SDavid Greenman */ 140887b6de2bSPoul-Henning Kamp static int 1409dfdcada3SDoug Rabson lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 141092dc7331SDavid Greenman { 1411dfdcada3SDoug Rabson struct lockf_entry *overlap; 141292dc7331SDavid Greenman 1413dfdcada3SDoug Rabson overlap = LIST_FIRST(&state->ls_active); 1414dfdcada3SDoug Rabson 1415dfdcada3SDoug Rabson if (overlap == NOLOCKF) 141692dc7331SDavid Greenman return (0); 141792dc7331SDavid Greenman #ifdef LOCKF_DEBUG 141892dc7331SDavid Greenman if (unlock->lf_type != F_UNLCK) 141992dc7331SDavid Greenman panic("lf_clearlock: bad type"); 142092dc7331SDavid Greenman if (lockf_debug & 1) 142192dc7331SDavid Greenman lf_print("lf_clearlock", unlock); 142292dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 142392dc7331SDavid Greenman 1424dfdcada3SDoug Rabson lf_activate_lock(state, unlock); 142592dc7331SDavid Greenman 142692dc7331SDavid Greenman return (0); 142792dc7331SDavid Greenman } 142892dc7331SDavid Greenman 142992dc7331SDavid Greenman /* 1430dfdcada3SDoug Rabson * Check whether there is a blocking lock, and if so return its 1431dfdcada3SDoug Rabson * details in '*fl'. 143292dc7331SDavid Greenman */ 143387b6de2bSPoul-Henning Kamp static int 1434dfdcada3SDoug Rabson lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 143592dc7331SDavid Greenman { 1436dfdcada3SDoug Rabson struct lockf_entry *block; 143792dc7331SDavid Greenman 143892dc7331SDavid Greenman #ifdef LOCKF_DEBUG 143992dc7331SDavid Greenman if (lockf_debug & 1) 144092dc7331SDavid Greenman lf_print("lf_getlock", lock); 144192dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 144292dc7331SDavid Greenman 1443dfdcada3SDoug Rabson if ((block = lf_getblock(state, lock))) { 144492dc7331SDavid Greenman fl->l_type = block->lf_type; 144592dc7331SDavid Greenman fl->l_whence = SEEK_SET; 144692dc7331SDavid Greenman fl->l_start = block->lf_start; 1447dfdcada3SDoug Rabson if (block->lf_end == OFF_MAX) 144892dc7331SDavid Greenman fl->l_len = 0; 144992dc7331SDavid Greenman else 145092dc7331SDavid Greenman fl->l_len = block->lf_end - block->lf_start + 1; 1451dfdcada3SDoug Rabson fl->l_pid = block->lf_owner->lo_pid; 1452dfdcada3SDoug Rabson fl->l_sysid = block->lf_owner->lo_sysid; 145392dc7331SDavid Greenman } else { 145492dc7331SDavid Greenman fl->l_type = F_UNLCK; 145592dc7331SDavid Greenman } 145692dc7331SDavid Greenman return (0); 145792dc7331SDavid Greenman } 145892dc7331SDavid Greenman 145992dc7331SDavid Greenman /* 1460dfdcada3SDoug Rabson * Cancel an async lock request. 1461dfdcada3SDoug Rabson */ 1462dfdcada3SDoug Rabson static int 1463dfdcada3SDoug Rabson lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1464dfdcada3SDoug Rabson { 1465dfdcada3SDoug Rabson struct lockf_entry *reallock; 1466dfdcada3SDoug Rabson 1467dfdcada3SDoug Rabson /* 1468dfdcada3SDoug Rabson * We need to match this request with an existing lock 1469dfdcada3SDoug Rabson * request. 1470dfdcada3SDoug Rabson */ 1471dfdcada3SDoug Rabson LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1472dfdcada3SDoug Rabson if ((void *) reallock == cookie) { 1473dfdcada3SDoug Rabson /* 1474dfdcada3SDoug Rabson * Double-check that this lock looks right 1475dfdcada3SDoug Rabson * (maybe use a rolling ID for the cancel 1476dfdcada3SDoug Rabson * cookie instead?) 1477dfdcada3SDoug Rabson */ 1478dfdcada3SDoug Rabson if (!(reallock->lf_vnode == lock->lf_vnode 1479dfdcada3SDoug Rabson && reallock->lf_start == lock->lf_start 1480dfdcada3SDoug Rabson && reallock->lf_end == lock->lf_end)) { 1481dfdcada3SDoug Rabson return (ENOENT); 1482dfdcada3SDoug Rabson } 1483dfdcada3SDoug Rabson 1484dfdcada3SDoug Rabson /* 1485dfdcada3SDoug Rabson * Make sure this lock was async and then just 1486dfdcada3SDoug Rabson * remove it from its wait lists. 1487dfdcada3SDoug Rabson */ 1488dfdcada3SDoug Rabson if (!reallock->lf_async_task) { 1489dfdcada3SDoug Rabson return (ENOENT); 1490dfdcada3SDoug Rabson } 1491dfdcada3SDoug Rabson 1492dfdcada3SDoug Rabson /* 1493dfdcada3SDoug Rabson * Note that since any other thread must take 1494dfdcada3SDoug Rabson * state->ls_lock before it can possibly 1495dfdcada3SDoug Rabson * trigger the async callback, we are safe 1496dfdcada3SDoug Rabson * from a race with lf_wakeup_lock, i.e. we 1497dfdcada3SDoug Rabson * can free the lock (actually our caller does 1498dfdcada3SDoug Rabson * this). 1499dfdcada3SDoug Rabson */ 1500dfdcada3SDoug Rabson lf_cancel_lock(state, reallock); 1501dfdcada3SDoug Rabson return (0); 1502dfdcada3SDoug Rabson } 1503dfdcada3SDoug Rabson } 1504dfdcada3SDoug Rabson 1505dfdcada3SDoug Rabson /* 1506dfdcada3SDoug Rabson * We didn't find a matching lock - not much we can do here. 1507dfdcada3SDoug Rabson */ 1508dfdcada3SDoug Rabson return (ENOENT); 1509dfdcada3SDoug Rabson } 1510dfdcada3SDoug Rabson 1511dfdcada3SDoug Rabson /* 151292dc7331SDavid Greenman * Walk the list of locks for an inode and 151392dc7331SDavid Greenman * return the first blocking lock. 151492dc7331SDavid Greenman */ 1515dfdcada3SDoug Rabson static struct lockf_entry * 1516dfdcada3SDoug Rabson lf_getblock(struct lockf *state, struct lockf_entry *lock) 151792dc7331SDavid Greenman { 1518dfdcada3SDoug Rabson struct lockf_entry *overlap; 151992dc7331SDavid Greenman 1520dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_active, lf_link) { 152192dc7331SDavid Greenman /* 1522dfdcada3SDoug Rabson * We may assume that the active list is sorted by 1523dfdcada3SDoug Rabson * lf_start. 152492dc7331SDavid Greenman */ 1525dfdcada3SDoug Rabson if (overlap->lf_start > lock->lf_end) 1526dfdcada3SDoug Rabson break; 1527dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 1528dfdcada3SDoug Rabson continue; 152992dc7331SDavid Greenman return (overlap); 153092dc7331SDavid Greenman } 153192dc7331SDavid Greenman return (NOLOCKF); 153292dc7331SDavid Greenman } 153392dc7331SDavid Greenman 153492dc7331SDavid Greenman /* 1535dfdcada3SDoug Rabson * Walk the list of locks for an inode to find an overlapping lock (if 1536dfdcada3SDoug Rabson * any) and return a classification of that overlap. 1537dfdcada3SDoug Rabson * 1538dfdcada3SDoug Rabson * Arguments: 1539dfdcada3SDoug Rabson * *overlap The place in the lock list to start looking 1540dfdcada3SDoug Rabson * lock The lock which is being tested 1541dfdcada3SDoug Rabson * type Pass 'SELF' to test only locks with the same 1542dfdcada3SDoug Rabson * owner as lock, or 'OTHER' to test only locks 1543dfdcada3SDoug Rabson * with a different owner 1544dfdcada3SDoug Rabson * 1545dfdcada3SDoug Rabson * Returns one of six values: 1546dfdcada3SDoug Rabson * 0) no overlap 1547dfdcada3SDoug Rabson * 1) overlap == lock 1548dfdcada3SDoug Rabson * 2) overlap contains lock 1549dfdcada3SDoug Rabson * 3) lock contains overlap 1550dfdcada3SDoug Rabson * 4) overlap starts before lock 1551dfdcada3SDoug Rabson * 5) overlap ends after lock 1552dfdcada3SDoug Rabson * 1553dfdcada3SDoug Rabson * If there is an overlapping lock, '*overlap' is set to point at the 1554dfdcada3SDoug Rabson * overlapping lock. 155592dc7331SDavid Greenman * 155692dc7331SDavid Greenman * NOTE: this returns only the FIRST overlapping lock. There 155792dc7331SDavid Greenman * may be more than one. 155892dc7331SDavid Greenman */ 155987b6de2bSPoul-Henning Kamp static int 1560dfdcada3SDoug Rabson lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 156192dc7331SDavid Greenman { 1562dfdcada3SDoug Rabson struct lockf_entry *lf; 156392dc7331SDavid Greenman off_t start, end; 1564dfdcada3SDoug Rabson int res; 156592dc7331SDavid Greenman 1566dfdcada3SDoug Rabson if ((*overlap) == NOLOCKF) { 156792dc7331SDavid Greenman return (0); 1568dfdcada3SDoug Rabson } 156992dc7331SDavid Greenman #ifdef LOCKF_DEBUG 157092dc7331SDavid Greenman if (lockf_debug & 2) 157192dc7331SDavid Greenman lf_print("lf_findoverlap: looking for overlap in", lock); 157292dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 157392dc7331SDavid Greenman start = lock->lf_start; 157492dc7331SDavid Greenman end = lock->lf_end; 1575dfdcada3SDoug Rabson res = 0; 1576dfdcada3SDoug Rabson while (*overlap) { 1577dfdcada3SDoug Rabson lf = *overlap; 1578dfdcada3SDoug Rabson if (lf->lf_start > end) 1579dfdcada3SDoug Rabson break; 1580dfdcada3SDoug Rabson if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1581dfdcada3SDoug Rabson ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1582dfdcada3SDoug Rabson *overlap = LIST_NEXT(lf, lf_link); 158392dc7331SDavid Greenman continue; 158492dc7331SDavid Greenman } 158592dc7331SDavid Greenman #ifdef LOCKF_DEBUG 158692dc7331SDavid Greenman if (lockf_debug & 2) 158792dc7331SDavid Greenman lf_print("\tchecking", lf); 158892dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 158992dc7331SDavid Greenman /* 159092dc7331SDavid Greenman * OK, check for overlap 159192dc7331SDavid Greenman * 159292dc7331SDavid Greenman * Six cases: 159392dc7331SDavid Greenman * 0) no overlap 159492dc7331SDavid Greenman * 1) overlap == lock 159592dc7331SDavid Greenman * 2) overlap contains lock 159692dc7331SDavid Greenman * 3) lock contains overlap 159792dc7331SDavid Greenman * 4) overlap starts before lock 159892dc7331SDavid Greenman * 5) overlap ends after lock 159992dc7331SDavid Greenman */ 1600dfdcada3SDoug Rabson if (start > lf->lf_end) { 160192dc7331SDavid Greenman /* Case 0 */ 160292dc7331SDavid Greenman #ifdef LOCKF_DEBUG 160392dc7331SDavid Greenman if (lockf_debug & 2) 160492dc7331SDavid Greenman printf("no overlap\n"); 160592dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1606dfdcada3SDoug Rabson *overlap = LIST_NEXT(lf, lf_link); 160792dc7331SDavid Greenman continue; 160892dc7331SDavid Greenman } 1609dfdcada3SDoug Rabson if (lf->lf_start == start && lf->lf_end == end) { 161092dc7331SDavid Greenman /* Case 1 */ 161192dc7331SDavid Greenman #ifdef LOCKF_DEBUG 161292dc7331SDavid Greenman if (lockf_debug & 2) 161392dc7331SDavid Greenman printf("overlap == lock\n"); 161492dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1615dfdcada3SDoug Rabson res = 1; 1616dfdcada3SDoug Rabson break; 161792dc7331SDavid Greenman } 1618dfdcada3SDoug Rabson if (lf->lf_start <= start && lf->lf_end >= end) { 161992dc7331SDavid Greenman /* Case 2 */ 162092dc7331SDavid Greenman #ifdef LOCKF_DEBUG 162192dc7331SDavid Greenman if (lockf_debug & 2) 162292dc7331SDavid Greenman printf("overlap contains lock\n"); 162392dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1624dfdcada3SDoug Rabson res = 2; 1625dfdcada3SDoug Rabson break; 162692dc7331SDavid Greenman } 1627dfdcada3SDoug Rabson if (start <= lf->lf_start && end >= lf->lf_end) { 162892dc7331SDavid Greenman /* Case 3 */ 162992dc7331SDavid Greenman #ifdef LOCKF_DEBUG 163092dc7331SDavid Greenman if (lockf_debug & 2) 163192dc7331SDavid Greenman printf("lock contains overlap\n"); 163292dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1633dfdcada3SDoug Rabson res = 3; 1634dfdcada3SDoug Rabson break; 163592dc7331SDavid Greenman } 1636dfdcada3SDoug Rabson if (lf->lf_start < start && lf->lf_end >= start) { 163792dc7331SDavid Greenman /* Case 4 */ 163892dc7331SDavid Greenman #ifdef LOCKF_DEBUG 163992dc7331SDavid Greenman if (lockf_debug & 2) 164092dc7331SDavid Greenman printf("overlap starts before lock\n"); 164192dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1642dfdcada3SDoug Rabson res = 4; 1643dfdcada3SDoug Rabson break; 164492dc7331SDavid Greenman } 1645dfdcada3SDoug Rabson if (lf->lf_start > start && lf->lf_end > end) { 164692dc7331SDavid Greenman /* Case 5 */ 164792dc7331SDavid Greenman #ifdef LOCKF_DEBUG 164892dc7331SDavid Greenman if (lockf_debug & 2) 164992dc7331SDavid Greenman printf("overlap ends after lock\n"); 165092dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1651dfdcada3SDoug Rabson res = 5; 1652dfdcada3SDoug Rabson break; 165392dc7331SDavid Greenman } 165492dc7331SDavid Greenman panic("lf_findoverlap: default"); 165592dc7331SDavid Greenman } 1656dfdcada3SDoug Rabson return (res); 165792dc7331SDavid Greenman } 165892dc7331SDavid Greenman 165992dc7331SDavid Greenman /* 1660dfdcada3SDoug Rabson * Split an the existing 'lock1', based on the extent of the lock 1661dfdcada3SDoug Rabson * described by 'lock2'. The existing lock should cover 'lock2' 1662dfdcada3SDoug Rabson * entirely. 1663dfdcada3SDoug Rabson * 1664dfdcada3SDoug Rabson * Any pending locks which have been been unblocked are added to 1665dfdcada3SDoug Rabson * 'granted' 166692dc7331SDavid Greenman */ 166787b6de2bSPoul-Henning Kamp static void 1668dfdcada3SDoug Rabson lf_split(struct lockf *state, struct lockf_entry *lock1, 1669dfdcada3SDoug Rabson struct lockf_entry *lock2, struct lockf_entry_list *granted) 167092dc7331SDavid Greenman { 1671dfdcada3SDoug Rabson struct lockf_entry *splitlock; 167292dc7331SDavid Greenman 167392dc7331SDavid Greenman #ifdef LOCKF_DEBUG 167492dc7331SDavid Greenman if (lockf_debug & 2) { 167592dc7331SDavid Greenman lf_print("lf_split", lock1); 167692dc7331SDavid Greenman lf_print("splitting from", lock2); 167792dc7331SDavid Greenman } 167892dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 167992dc7331SDavid Greenman /* 1680dfdcada3SDoug Rabson * Check to see if we don't need to split at all. 168192dc7331SDavid Greenman */ 168292dc7331SDavid Greenman if (lock1->lf_start == lock2->lf_start) { 1683dfdcada3SDoug Rabson lf_set_start(state, lock1, lock2->lf_end + 1, granted); 168492dc7331SDavid Greenman return; 168592dc7331SDavid Greenman } 168692dc7331SDavid Greenman if (lock1->lf_end == lock2->lf_end) { 1687dfdcada3SDoug Rabson lf_set_end(state, lock1, lock2->lf_start - 1, granted); 168892dc7331SDavid Greenman return; 168992dc7331SDavid Greenman } 169092dc7331SDavid Greenman /* 169192dc7331SDavid Greenman * Make a new lock consisting of the last part of 1692dfdcada3SDoug Rabson * the encompassing lock. 169392dc7331SDavid Greenman */ 1694dfdcada3SDoug Rabson splitlock = lf_alloc_lock(lock1->lf_owner); 1695dfdcada3SDoug Rabson memcpy(splitlock, lock1, sizeof *splitlock); 1696dfdcada3SDoug Rabson if (splitlock->lf_flags & F_REMOTE) 1697dfdcada3SDoug Rabson vref(splitlock->lf_vnode); 1698dfdcada3SDoug Rabson 1699dfdcada3SDoug Rabson /* 1700dfdcada3SDoug Rabson * This cannot cause a deadlock since any edges we would add 1701dfdcada3SDoug Rabson * to splitlock already exist in lock1. We must be sure to add 1702dfdcada3SDoug Rabson * necessary dependancies to splitlock before we reduce lock1 1703dfdcada3SDoug Rabson * otherwise we may accidentally grant a pending lock that 1704dfdcada3SDoug Rabson * was blocked by the tail end of lock1. 1705dfdcada3SDoug Rabson */ 170692dc7331SDavid Greenman splitlock->lf_start = lock2->lf_end + 1; 1707dfdcada3SDoug Rabson LIST_INIT(&splitlock->lf_outedges); 1708dfdcada3SDoug Rabson LIST_INIT(&splitlock->lf_inedges); 1709dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1710dfdcada3SDoug Rabson lf_add_incoming(state, splitlock); 1711dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1712dfdcada3SDoug Rabson 1713dfdcada3SDoug Rabson lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1714dfdcada3SDoug Rabson 171592dc7331SDavid Greenman /* 171692dc7331SDavid Greenman * OK, now link it in 171792dc7331SDavid Greenman */ 1718dfdcada3SDoug Rabson lf_insert_lock(state, splitlock); 1719dfdcada3SDoug Rabson } 1720dfdcada3SDoug Rabson 1721dfdcada3SDoug Rabson struct clearlock { 1722dfdcada3SDoug Rabson STAILQ_ENTRY(clearlock) link; 1723dfdcada3SDoug Rabson struct vnode *vp; 1724dfdcada3SDoug Rabson struct flock fl; 1725dfdcada3SDoug Rabson }; 1726dfdcada3SDoug Rabson STAILQ_HEAD(clearlocklist, clearlock); 1727dfdcada3SDoug Rabson 1728dfdcada3SDoug Rabson void 1729dfdcada3SDoug Rabson lf_clearremotesys(int sysid) 1730dfdcada3SDoug Rabson { 1731dfdcada3SDoug Rabson struct lockf *ls; 1732dfdcada3SDoug Rabson struct lockf_entry *lf; 1733dfdcada3SDoug Rabson struct clearlock *cl; 1734dfdcada3SDoug Rabson struct clearlocklist locks; 1735dfdcada3SDoug Rabson 1736dfdcada3SDoug Rabson KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1737dfdcada3SDoug Rabson 1738dfdcada3SDoug Rabson /* 1739dfdcada3SDoug Rabson * In order to keep the locking simple, we iterate over the 1740dfdcada3SDoug Rabson * active lock lists to build a list of locks that need 1741dfdcada3SDoug Rabson * releasing. We then call VOP_ADVLOCK for each one in turn. 1742dfdcada3SDoug Rabson * 1743dfdcada3SDoug Rabson * We take an extra reference to the vnode for the duration to 1744dfdcada3SDoug Rabson * make sure it doesn't go away before we are finished. 1745dfdcada3SDoug Rabson */ 1746dfdcada3SDoug Rabson STAILQ_INIT(&locks); 1747dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 1748dfdcada3SDoug Rabson LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1749dfdcada3SDoug Rabson sx_xlock(&ls->ls_lock); 1750dfdcada3SDoug Rabson LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1751dfdcada3SDoug Rabson if (lf->lf_owner->lo_sysid != sysid) 1752dfdcada3SDoug Rabson continue; 1753dfdcada3SDoug Rabson 1754dfdcada3SDoug Rabson cl = malloc(sizeof(struct clearlock), M_LOCKF, 1755dfdcada3SDoug Rabson M_WAITOK); 1756dfdcada3SDoug Rabson cl->vp = lf->lf_vnode; 1757dfdcada3SDoug Rabson vref(cl->vp); 1758dfdcada3SDoug Rabson cl->fl.l_start = lf->lf_start; 1759dfdcada3SDoug Rabson if (lf->lf_end == OFF_MAX) 1760dfdcada3SDoug Rabson cl->fl.l_len = 0; 1761dfdcada3SDoug Rabson else 1762dfdcada3SDoug Rabson cl->fl.l_len = 1763dfdcada3SDoug Rabson lf->lf_end - lf->lf_start + 1; 1764dfdcada3SDoug Rabson cl->fl.l_whence = SEEK_SET; 1765dfdcada3SDoug Rabson cl->fl.l_type = F_UNLCK; 1766dfdcada3SDoug Rabson cl->fl.l_pid = lf->lf_owner->lo_pid; 1767dfdcada3SDoug Rabson cl->fl.l_sysid = sysid; 1768dfdcada3SDoug Rabson STAILQ_INSERT_TAIL(&locks, cl, link); 1769dfdcada3SDoug Rabson } 1770dfdcada3SDoug Rabson sx_xunlock(&ls->ls_lock); 1771dfdcada3SDoug Rabson } 1772dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 1773dfdcada3SDoug Rabson 1774dfdcada3SDoug Rabson while ((cl = STAILQ_FIRST(&locks)) != NULL) { 1775dfdcada3SDoug Rabson STAILQ_REMOVE_HEAD(&locks, link); 1776dfdcada3SDoug Rabson VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE); 1777dfdcada3SDoug Rabson vrele(cl->vp); 1778dfdcada3SDoug Rabson free(cl, M_LOCKF); 1779dfdcada3SDoug Rabson } 1780dfdcada3SDoug Rabson } 1781dfdcada3SDoug Rabson 1782dfdcada3SDoug Rabson int 1783dfdcada3SDoug Rabson lf_countlocks(int sysid) 1784dfdcada3SDoug Rabson { 1785dfdcada3SDoug Rabson int i; 1786dfdcada3SDoug Rabson struct lock_owner *lo; 1787dfdcada3SDoug Rabson int count; 1788dfdcada3SDoug Rabson 1789dfdcada3SDoug Rabson count = 0; 1790dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 1791dfdcada3SDoug Rabson for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1792dfdcada3SDoug Rabson LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1793dfdcada3SDoug Rabson if (lo->lo_sysid == sysid) 1794dfdcada3SDoug Rabson count += lo->lo_refs; 1795dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 1796dfdcada3SDoug Rabson 1797dfdcada3SDoug Rabson return (count); 1798dfdcada3SDoug Rabson } 1799dfdcada3SDoug Rabson 1800dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1801dfdcada3SDoug Rabson 1802dfdcada3SDoug Rabson /* 1803dfdcada3SDoug Rabson * Return non-zero if y is reachable from x using a brute force 1804dfdcada3SDoug Rabson * search. If reachable and path is non-null, return the route taken 1805dfdcada3SDoug Rabson * in path. 1806dfdcada3SDoug Rabson */ 1807dfdcada3SDoug Rabson static int 1808dfdcada3SDoug Rabson graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 1809dfdcada3SDoug Rabson struct owner_vertex_list *path) 1810dfdcada3SDoug Rabson { 1811dfdcada3SDoug Rabson struct owner_edge *e; 1812dfdcada3SDoug Rabson 1813dfdcada3SDoug Rabson if (x == y) { 1814dfdcada3SDoug Rabson if (path) 1815dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(path, x, v_link); 1816dfdcada3SDoug Rabson return 1; 1817dfdcada3SDoug Rabson } 1818dfdcada3SDoug Rabson 1819dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 1820dfdcada3SDoug Rabson if (graph_reaches(e->e_to, y, path)) { 1821dfdcada3SDoug Rabson if (path) 1822dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(path, x, v_link); 1823dfdcada3SDoug Rabson return 1; 1824dfdcada3SDoug Rabson } 1825dfdcada3SDoug Rabson } 1826dfdcada3SDoug Rabson return 0; 182792dc7331SDavid Greenman } 182892dc7331SDavid Greenman 182992dc7331SDavid Greenman /* 1830dfdcada3SDoug Rabson * Perform consistency checks on the graph. Make sure the values of 1831dfdcada3SDoug Rabson * v_order are correct. If checkorder is non-zero, check no vertex can 1832dfdcada3SDoug Rabson * reach any other vertex with a smaller order. 183392dc7331SDavid Greenman */ 183487b6de2bSPoul-Henning Kamp static void 1835dfdcada3SDoug Rabson graph_check(struct owner_graph *g, int checkorder) 183692dc7331SDavid Greenman { 1837dfdcada3SDoug Rabson int i, j; 183892dc7331SDavid Greenman 1839dfdcada3SDoug Rabson for (i = 0; i < g->g_size; i++) { 1840dfdcada3SDoug Rabson if (!g->g_vertices[i]->v_owner) 1841dfdcada3SDoug Rabson continue; 1842dfdcada3SDoug Rabson KASSERT(g->g_vertices[i]->v_order == i, 1843dfdcada3SDoug Rabson ("lock graph vertices disordered")); 1844dfdcada3SDoug Rabson if (checkorder) { 1845dfdcada3SDoug Rabson for (j = 0; j < i; j++) { 1846dfdcada3SDoug Rabson if (!g->g_vertices[j]->v_owner) 1847dfdcada3SDoug Rabson continue; 1848dfdcada3SDoug Rabson KASSERT(!graph_reaches(g->g_vertices[i], 1849dfdcada3SDoug Rabson g->g_vertices[j], NULL), 1850dfdcada3SDoug Rabson ("lock graph vertices disordered")); 1851dfdcada3SDoug Rabson } 1852dfdcada3SDoug Rabson } 1853dfdcada3SDoug Rabson } 1854dfdcada3SDoug Rabson } 1855dfdcada3SDoug Rabson 1856dfdcada3SDoug Rabson static void 1857dfdcada3SDoug Rabson graph_print_vertices(struct owner_vertex_list *set) 1858dfdcada3SDoug Rabson { 1859dfdcada3SDoug Rabson struct owner_vertex *v; 1860dfdcada3SDoug Rabson 1861dfdcada3SDoug Rabson printf("{ "); 1862dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 1863dfdcada3SDoug Rabson printf("%d:", v->v_order); 1864dfdcada3SDoug Rabson lf_print_owner(v->v_owner); 1865dfdcada3SDoug Rabson if (TAILQ_NEXT(v, v_link)) 1866dfdcada3SDoug Rabson printf(", "); 1867dfdcada3SDoug Rabson } 1868dfdcada3SDoug Rabson printf(" }\n"); 1869dfdcada3SDoug Rabson } 1870dfdcada3SDoug Rabson 1871dfdcada3SDoug Rabson #endif 1872dfdcada3SDoug Rabson 1873dfdcada3SDoug Rabson /* 1874dfdcada3SDoug Rabson * Calculate the sub-set of vertices v from the affected region [y..x] 1875dfdcada3SDoug Rabson * where v is reachable from y. Return -1 if a loop was detected 1876dfdcada3SDoug Rabson * (i.e. x is reachable from y, otherwise the number of vertices in 1877dfdcada3SDoug Rabson * this subset. 1878dfdcada3SDoug Rabson */ 1879dfdcada3SDoug Rabson static int 1880dfdcada3SDoug Rabson graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 1881dfdcada3SDoug Rabson struct owner_vertex *y, struct owner_vertex_list *delta) 1882dfdcada3SDoug Rabson { 1883dfdcada3SDoug Rabson uint32_t gen; 1884dfdcada3SDoug Rabson struct owner_vertex *v; 1885dfdcada3SDoug Rabson struct owner_edge *e; 1886dfdcada3SDoug Rabson int n; 1887dfdcada3SDoug Rabson 1888dfdcada3SDoug Rabson /* 1889dfdcada3SDoug Rabson * We start with a set containing just y. Then for each vertex 1890dfdcada3SDoug Rabson * v in the set so far unprocessed, we add each vertex that v 1891dfdcada3SDoug Rabson * has an out-edge to and that is within the affected region 1892dfdcada3SDoug Rabson * [y..x]. If we see the vertex x on our travels, stop 1893dfdcada3SDoug Rabson * immediately. 1894dfdcada3SDoug Rabson */ 1895dfdcada3SDoug Rabson TAILQ_INIT(delta); 1896dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, y, v_link); 1897dfdcada3SDoug Rabson v = y; 1898dfdcada3SDoug Rabson n = 1; 1899dfdcada3SDoug Rabson gen = g->g_gen; 1900dfdcada3SDoug Rabson while (v) { 1901dfdcada3SDoug Rabson LIST_FOREACH(e, &v->v_outedges, e_outlink) { 1902dfdcada3SDoug Rabson if (e->e_to == x) 1903dfdcada3SDoug Rabson return -1; 1904dfdcada3SDoug Rabson if (e->e_to->v_order < x->v_order 1905dfdcada3SDoug Rabson && e->e_to->v_gen != gen) { 1906dfdcada3SDoug Rabson e->e_to->v_gen = gen; 1907dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 1908dfdcada3SDoug Rabson n++; 1909dfdcada3SDoug Rabson } 1910dfdcada3SDoug Rabson } 1911dfdcada3SDoug Rabson v = TAILQ_NEXT(v, v_link); 1912dfdcada3SDoug Rabson } 1913dfdcada3SDoug Rabson 1914dfdcada3SDoug Rabson return (n); 1915dfdcada3SDoug Rabson } 1916dfdcada3SDoug Rabson 1917dfdcada3SDoug Rabson /* 1918dfdcada3SDoug Rabson * Calculate the sub-set of vertices v from the affected region [y..x] 1919dfdcada3SDoug Rabson * where v reaches x. Return the number of vertices in this subset. 1920dfdcada3SDoug Rabson */ 1921dfdcada3SDoug Rabson static int 1922dfdcada3SDoug Rabson graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 1923dfdcada3SDoug Rabson struct owner_vertex *y, struct owner_vertex_list *delta) 1924dfdcada3SDoug Rabson { 1925dfdcada3SDoug Rabson uint32_t gen; 1926dfdcada3SDoug Rabson struct owner_vertex *v; 1927dfdcada3SDoug Rabson struct owner_edge *e; 1928dfdcada3SDoug Rabson int n; 1929dfdcada3SDoug Rabson 1930dfdcada3SDoug Rabson /* 1931dfdcada3SDoug Rabson * We start with a set containing just x. Then for each vertex 1932dfdcada3SDoug Rabson * v in the set so far unprocessed, we add each vertex that v 1933dfdcada3SDoug Rabson * has an in-edge from and that is within the affected region 1934dfdcada3SDoug Rabson * [y..x]. 1935dfdcada3SDoug Rabson */ 1936dfdcada3SDoug Rabson TAILQ_INIT(delta); 1937dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, x, v_link); 1938dfdcada3SDoug Rabson v = x; 1939dfdcada3SDoug Rabson n = 1; 1940dfdcada3SDoug Rabson gen = g->g_gen; 1941dfdcada3SDoug Rabson while (v) { 1942dfdcada3SDoug Rabson LIST_FOREACH(e, &v->v_inedges, e_inlink) { 1943dfdcada3SDoug Rabson if (e->e_from->v_order > y->v_order 1944dfdcada3SDoug Rabson && e->e_from->v_gen != gen) { 1945dfdcada3SDoug Rabson e->e_from->v_gen = gen; 1946dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 1947dfdcada3SDoug Rabson n++; 1948dfdcada3SDoug Rabson } 1949dfdcada3SDoug Rabson } 1950dfdcada3SDoug Rabson v = TAILQ_PREV(v, owner_vertex_list, v_link); 1951dfdcada3SDoug Rabson } 1952dfdcada3SDoug Rabson 1953dfdcada3SDoug Rabson return (n); 1954dfdcada3SDoug Rabson } 1955dfdcada3SDoug Rabson 1956dfdcada3SDoug Rabson static int 1957dfdcada3SDoug Rabson graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 1958dfdcada3SDoug Rabson { 1959dfdcada3SDoug Rabson struct owner_vertex *v; 1960dfdcada3SDoug Rabson int i, j; 1961dfdcada3SDoug Rabson 1962dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 1963dfdcada3SDoug Rabson for (i = n; 1964dfdcada3SDoug Rabson i > 0 && indices[i - 1] > v->v_order; i--) 1965dfdcada3SDoug Rabson ; 1966dfdcada3SDoug Rabson for (j = n - 1; j >= i; j--) 1967dfdcada3SDoug Rabson indices[j + 1] = indices[j]; 1968dfdcada3SDoug Rabson indices[i] = v->v_order; 1969dfdcada3SDoug Rabson n++; 1970dfdcada3SDoug Rabson } 1971dfdcada3SDoug Rabson 1972dfdcada3SDoug Rabson return (n); 1973dfdcada3SDoug Rabson } 1974dfdcada3SDoug Rabson 1975dfdcada3SDoug Rabson static int 1976dfdcada3SDoug Rabson graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 1977dfdcada3SDoug Rabson struct owner_vertex_list *set) 1978dfdcada3SDoug Rabson { 1979dfdcada3SDoug Rabson struct owner_vertex *v, *vlowest; 1980dfdcada3SDoug Rabson 1981dfdcada3SDoug Rabson while (!TAILQ_EMPTY(set)) { 1982dfdcada3SDoug Rabson vlowest = NULL; 1983dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 1984dfdcada3SDoug Rabson if (!vlowest || v->v_order < vlowest->v_order) 1985dfdcada3SDoug Rabson vlowest = v; 1986dfdcada3SDoug Rabson } 1987dfdcada3SDoug Rabson TAILQ_REMOVE(set, vlowest, v_link); 1988dfdcada3SDoug Rabson vlowest->v_order = indices[nextunused]; 1989dfdcada3SDoug Rabson g->g_vertices[vlowest->v_order] = vlowest; 1990dfdcada3SDoug Rabson nextunused++; 1991dfdcada3SDoug Rabson } 1992dfdcada3SDoug Rabson 1993dfdcada3SDoug Rabson return (nextunused); 1994dfdcada3SDoug Rabson } 1995dfdcada3SDoug Rabson 1996dfdcada3SDoug Rabson static int 1997dfdcada3SDoug Rabson graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 1998dfdcada3SDoug Rabson struct owner_vertex *y) 1999dfdcada3SDoug Rabson { 2000dfdcada3SDoug Rabson struct owner_edge *e; 2001dfdcada3SDoug Rabson struct owner_vertex_list deltaF, deltaB; 2002dfdcada3SDoug Rabson int nF, nB, n, vi, i; 2003dfdcada3SDoug Rabson int *indices; 2004dfdcada3SDoug Rabson 2005dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2006dfdcada3SDoug Rabson 2007dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2008dfdcada3SDoug Rabson if (e->e_to == y) { 2009dfdcada3SDoug Rabson e->e_refs++; 2010dfdcada3SDoug Rabson return (0); 201192dc7331SDavid Greenman } 201292dc7331SDavid Greenman } 201392dc7331SDavid Greenman 201492dc7331SDavid Greenman #ifdef LOCKF_DEBUG 2015dfdcada3SDoug Rabson if (lockf_debug & 8) { 2016dfdcada3SDoug Rabson printf("adding edge %d:", x->v_order); 2017dfdcada3SDoug Rabson lf_print_owner(x->v_owner); 2018dfdcada3SDoug Rabson printf(" -> %d:", y->v_order); 2019dfdcada3SDoug Rabson lf_print_owner(y->v_owner); 2020dfdcada3SDoug Rabson printf("\n"); 2021dfdcada3SDoug Rabson } 2022dfdcada3SDoug Rabson #endif 2023dfdcada3SDoug Rabson if (y->v_order < x->v_order) { 2024dfdcada3SDoug Rabson /* 2025dfdcada3SDoug Rabson * The new edge violates the order. First find the set 2026dfdcada3SDoug Rabson * of affected vertices reachable from y (deltaF) and 2027dfdcada3SDoug Rabson * the set of affect vertices affected that reach x 2028dfdcada3SDoug Rabson * (deltaB), using the graph generation number to 2029dfdcada3SDoug Rabson * detect whether we have visited a given vertex 2030dfdcada3SDoug Rabson * already. We re-order the graph so that each vertex 2031dfdcada3SDoug Rabson * in deltaB appears before each vertex in deltaF. 2032dfdcada3SDoug Rabson * 2033dfdcada3SDoug Rabson * If x is a member of deltaF, then the new edge would 2034dfdcada3SDoug Rabson * create a cycle. Otherwise, we may assume that 2035dfdcada3SDoug Rabson * deltaF and deltaB are disjoint. 2036dfdcada3SDoug Rabson */ 2037dfdcada3SDoug Rabson g->g_gen++; 2038dfdcada3SDoug Rabson if (g->g_gen == 0) { 2039dfdcada3SDoug Rabson /* 2040dfdcada3SDoug Rabson * Generation wrap. 2041dfdcada3SDoug Rabson */ 2042dfdcada3SDoug Rabson for (vi = 0; vi < g->g_size; vi++) { 2043dfdcada3SDoug Rabson g->g_vertices[vi]->v_gen = 0; 2044dfdcada3SDoug Rabson } 2045dfdcada3SDoug Rabson g->g_gen++; 2046dfdcada3SDoug Rabson } 2047dfdcada3SDoug Rabson nF = graph_delta_forward(g, x, y, &deltaF); 2048dfdcada3SDoug Rabson if (nF < 0) { 2049dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2050dfdcada3SDoug Rabson if (lockf_debug & 8) { 2051dfdcada3SDoug Rabson struct owner_vertex_list path; 2052dfdcada3SDoug Rabson printf("deadlock: "); 2053dfdcada3SDoug Rabson TAILQ_INIT(&path); 2054dfdcada3SDoug Rabson graph_reaches(y, x, &path); 2055dfdcada3SDoug Rabson graph_print_vertices(&path); 2056dfdcada3SDoug Rabson } 2057dfdcada3SDoug Rabson #endif 2058dfdcada3SDoug Rabson return (EDEADLK); 2059dfdcada3SDoug Rabson } 2060dfdcada3SDoug Rabson 2061dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2062dfdcada3SDoug Rabson if (lockf_debug & 8) { 2063dfdcada3SDoug Rabson printf("re-ordering graph vertices\n"); 2064dfdcada3SDoug Rabson printf("deltaF = "); 2065dfdcada3SDoug Rabson graph_print_vertices(&deltaF); 2066dfdcada3SDoug Rabson } 2067dfdcada3SDoug Rabson #endif 2068dfdcada3SDoug Rabson 2069dfdcada3SDoug Rabson nB = graph_delta_backward(g, x, y, &deltaB); 2070dfdcada3SDoug Rabson 2071dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2072dfdcada3SDoug Rabson if (lockf_debug & 8) { 2073dfdcada3SDoug Rabson printf("deltaB = "); 2074dfdcada3SDoug Rabson graph_print_vertices(&deltaB); 2075dfdcada3SDoug Rabson } 2076dfdcada3SDoug Rabson #endif 2077dfdcada3SDoug Rabson 2078dfdcada3SDoug Rabson /* 2079dfdcada3SDoug Rabson * We first build a set of vertex indices (vertex 2080dfdcada3SDoug Rabson * order values) that we may use, then we re-assign 2081dfdcada3SDoug Rabson * orders first to those vertices in deltaB, then to 2082dfdcada3SDoug Rabson * deltaF. Note that the contents of deltaF and deltaB 2083dfdcada3SDoug Rabson * may be partially disordered - we perform an 2084dfdcada3SDoug Rabson * insertion sort while building our index set. 2085dfdcada3SDoug Rabson */ 2086dfdcada3SDoug Rabson indices = g->g_indexbuf; 2087dfdcada3SDoug Rabson n = graph_add_indices(indices, 0, &deltaF); 2088dfdcada3SDoug Rabson graph_add_indices(indices, n, &deltaB); 2089dfdcada3SDoug Rabson 2090dfdcada3SDoug Rabson /* 2091dfdcada3SDoug Rabson * We must also be sure to maintain the relative 2092dfdcada3SDoug Rabson * ordering of deltaF and deltaB when re-assigning 2093dfdcada3SDoug Rabson * vertices. We do this by iteratively removing the 2094dfdcada3SDoug Rabson * lowest ordered element from the set and assigning 2095dfdcada3SDoug Rabson * it the next value from our new ordering. 2096dfdcada3SDoug Rabson */ 2097dfdcada3SDoug Rabson i = graph_assign_indices(g, indices, 0, &deltaB); 2098dfdcada3SDoug Rabson graph_assign_indices(g, indices, i, &deltaF); 2099dfdcada3SDoug Rabson 2100dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2101dfdcada3SDoug Rabson if (lockf_debug & 8) { 2102dfdcada3SDoug Rabson struct owner_vertex_list set; 2103dfdcada3SDoug Rabson TAILQ_INIT(&set); 2104dfdcada3SDoug Rabson for (i = 0; i < nB + nF; i++) 2105dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&set, 2106dfdcada3SDoug Rabson g->g_vertices[indices[i]], v_link); 2107dfdcada3SDoug Rabson printf("new ordering = "); 2108dfdcada3SDoug Rabson graph_print_vertices(&set); 2109dfdcada3SDoug Rabson } 2110dfdcada3SDoug Rabson #endif 2111dfdcada3SDoug Rabson } 2112dfdcada3SDoug Rabson 2113dfdcada3SDoug Rabson KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2114dfdcada3SDoug Rabson 2115dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2116dfdcada3SDoug Rabson if (lockf_debug & 8) { 2117dfdcada3SDoug Rabson graph_check(g, TRUE); 2118dfdcada3SDoug Rabson } 2119dfdcada3SDoug Rabson #endif 2120dfdcada3SDoug Rabson 2121dfdcada3SDoug Rabson e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2122dfdcada3SDoug Rabson 2123dfdcada3SDoug Rabson LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2124dfdcada3SDoug Rabson LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2125dfdcada3SDoug Rabson e->e_refs = 1; 2126dfdcada3SDoug Rabson e->e_from = x; 2127dfdcada3SDoug Rabson e->e_to = y; 2128dfdcada3SDoug Rabson 2129dfdcada3SDoug Rabson return (0); 2130dfdcada3SDoug Rabson } 2131dfdcada3SDoug Rabson 2132dfdcada3SDoug Rabson /* 2133dfdcada3SDoug Rabson * Remove an edge x->y from the graph. 2134dfdcada3SDoug Rabson */ 2135dfdcada3SDoug Rabson static void 2136dfdcada3SDoug Rabson graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2137dfdcada3SDoug Rabson struct owner_vertex *y) 2138dfdcada3SDoug Rabson { 2139dfdcada3SDoug Rabson struct owner_edge *e; 2140dfdcada3SDoug Rabson 2141dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2142dfdcada3SDoug Rabson 2143dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2144dfdcada3SDoug Rabson if (e->e_to == y) 2145dfdcada3SDoug Rabson break; 2146dfdcada3SDoug Rabson } 2147dfdcada3SDoug Rabson KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2148dfdcada3SDoug Rabson 2149dfdcada3SDoug Rabson e->e_refs--; 2150dfdcada3SDoug Rabson if (e->e_refs == 0) { 2151dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2152dfdcada3SDoug Rabson if (lockf_debug & 8) { 2153dfdcada3SDoug Rabson printf("removing edge %d:", x->v_order); 2154dfdcada3SDoug Rabson lf_print_owner(x->v_owner); 2155dfdcada3SDoug Rabson printf(" -> %d:", y->v_order); 2156dfdcada3SDoug Rabson lf_print_owner(y->v_owner); 2157dfdcada3SDoug Rabson printf("\n"); 2158dfdcada3SDoug Rabson } 2159dfdcada3SDoug Rabson #endif 2160dfdcada3SDoug Rabson LIST_REMOVE(e, e_outlink); 2161dfdcada3SDoug Rabson LIST_REMOVE(e, e_inlink); 2162dfdcada3SDoug Rabson free(e, M_LOCKF); 2163dfdcada3SDoug Rabson } 2164dfdcada3SDoug Rabson } 2165dfdcada3SDoug Rabson 2166dfdcada3SDoug Rabson /* 2167dfdcada3SDoug Rabson * Allocate a vertex from the free list. Return ENOMEM if there are 2168dfdcada3SDoug Rabson * none. 2169dfdcada3SDoug Rabson */ 2170dfdcada3SDoug Rabson static struct owner_vertex * 2171dfdcada3SDoug Rabson graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2172dfdcada3SDoug Rabson { 2173dfdcada3SDoug Rabson struct owner_vertex *v; 2174dfdcada3SDoug Rabson 2175dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2176dfdcada3SDoug Rabson 2177dfdcada3SDoug Rabson v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2178dfdcada3SDoug Rabson if (g->g_size == g->g_space) { 2179dfdcada3SDoug Rabson g->g_vertices = realloc(g->g_vertices, 2180dfdcada3SDoug Rabson 2 * g->g_space * sizeof(struct owner_vertex *), 2181dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2182dfdcada3SDoug Rabson free(g->g_indexbuf, M_LOCKF); 2183dfdcada3SDoug Rabson g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2184dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2185dfdcada3SDoug Rabson g->g_space = 2 * g->g_space; 2186dfdcada3SDoug Rabson } 2187dfdcada3SDoug Rabson v->v_order = g->g_size; 2188dfdcada3SDoug Rabson v->v_gen = g->g_gen; 2189dfdcada3SDoug Rabson g->g_vertices[g->g_size] = v; 2190dfdcada3SDoug Rabson g->g_size++; 2191dfdcada3SDoug Rabson 2192dfdcada3SDoug Rabson LIST_INIT(&v->v_outedges); 2193dfdcada3SDoug Rabson LIST_INIT(&v->v_inedges); 2194dfdcada3SDoug Rabson v->v_owner = lo; 2195dfdcada3SDoug Rabson 2196dfdcada3SDoug Rabson return (v); 2197dfdcada3SDoug Rabson } 2198dfdcada3SDoug Rabson 2199dfdcada3SDoug Rabson static void 2200dfdcada3SDoug Rabson graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2201dfdcada3SDoug Rabson { 2202dfdcada3SDoug Rabson struct owner_vertex *w; 2203dfdcada3SDoug Rabson int i; 2204dfdcada3SDoug Rabson 2205dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2206dfdcada3SDoug Rabson 2207dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2208dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2209dfdcada3SDoug Rabson 2210dfdcada3SDoug Rabson /* 2211dfdcada3SDoug Rabson * Remove from the graph's array and close up the gap, 2212dfdcada3SDoug Rabson * renumbering the other vertices. 2213dfdcada3SDoug Rabson */ 2214dfdcada3SDoug Rabson for (i = v->v_order + 1; i < g->g_size; i++) { 2215dfdcada3SDoug Rabson w = g->g_vertices[i]; 2216dfdcada3SDoug Rabson w->v_order--; 2217dfdcada3SDoug Rabson g->g_vertices[i - 1] = w; 2218dfdcada3SDoug Rabson } 2219dfdcada3SDoug Rabson g->g_size--; 2220dfdcada3SDoug Rabson 2221dfdcada3SDoug Rabson free(v, M_LOCKF); 2222dfdcada3SDoug Rabson } 2223dfdcada3SDoug Rabson 2224dfdcada3SDoug Rabson static struct owner_graph * 2225dfdcada3SDoug Rabson graph_init(struct owner_graph *g) 2226dfdcada3SDoug Rabson { 2227dfdcada3SDoug Rabson 2228dfdcada3SDoug Rabson g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2229dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2230dfdcada3SDoug Rabson g->g_size = 0; 2231dfdcada3SDoug Rabson g->g_space = 10; 2232dfdcada3SDoug Rabson g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2233dfdcada3SDoug Rabson g->g_gen = 0; 2234dfdcada3SDoug Rabson 2235dfdcada3SDoug Rabson return (g); 2236dfdcada3SDoug Rabson } 2237dfdcada3SDoug Rabson 2238dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2239dfdcada3SDoug Rabson /* 2240dfdcada3SDoug Rabson * Print description of a lock owner 2241dfdcada3SDoug Rabson */ 2242dfdcada3SDoug Rabson static void 2243dfdcada3SDoug Rabson lf_print_owner(struct lock_owner *lo) 2244dfdcada3SDoug Rabson { 2245dfdcada3SDoug Rabson 2246dfdcada3SDoug Rabson if (lo->lo_flags & F_REMOTE) { 2247dfdcada3SDoug Rabson printf("remote pid %d, system %d", 2248dfdcada3SDoug Rabson lo->lo_pid, lo->lo_sysid); 2249dfdcada3SDoug Rabson } else if (lo->lo_flags & F_FLOCK) { 2250dfdcada3SDoug Rabson printf("file %p", lo->lo_id); 2251dfdcada3SDoug Rabson } else { 2252dfdcada3SDoug Rabson printf("local pid %d", lo->lo_pid); 2253dfdcada3SDoug Rabson } 2254dfdcada3SDoug Rabson } 2255dfdcada3SDoug Rabson 225692dc7331SDavid Greenman /* 225792dc7331SDavid Greenman * Print out a lock. 225892dc7331SDavid Greenman */ 2259013e6650SJeff Roberson static void 2260dfdcada3SDoug Rabson lf_print(char *tag, struct lockf_entry *lock) 226192dc7331SDavid Greenman { 226292dc7331SDavid Greenman 2263d974cf4dSBruce Evans printf("%s: lock %p for ", tag, (void *)lock); 2264dfdcada3SDoug Rabson lf_print_owner(lock->lf_owner); 226559aff5fcSAlfred Perlstein if (lock->lf_inode != (struct inode *)0) 2266dfdcada3SDoug Rabson printf(" in ino %ju on dev <%s>,", 2267a7a00d05SMaxime Henrion (uintmax_t)lock->lf_inode->i_number, 2268dfdcada3SDoug Rabson devtoname(lock->lf_inode->i_dev)); 2269dfdcada3SDoug Rabson printf(" %s, start %jd, end ", 227092dc7331SDavid Greenman lock->lf_type == F_RDLCK ? "shared" : 227192dc7331SDavid Greenman lock->lf_type == F_WRLCK ? "exclusive" : 2272a7a00d05SMaxime Henrion lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2273dfdcada3SDoug Rabson (intmax_t)lock->lf_start); 2274dfdcada3SDoug Rabson if (lock->lf_end == OFF_MAX) 2275dfdcada3SDoug Rabson printf("EOF"); 227659aff5fcSAlfred Perlstein else 2277dfdcada3SDoug Rabson printf("%jd", (intmax_t)lock->lf_end); 2278dfdcada3SDoug Rabson if (!LIST_EMPTY(&lock->lf_outedges)) 2279dfdcada3SDoug Rabson printf(" block %p\n", 2280dfdcada3SDoug Rabson (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 228192dc7331SDavid Greenman else 228292dc7331SDavid Greenman printf("\n"); 228392dc7331SDavid Greenman } 228492dc7331SDavid Greenman 2285013e6650SJeff Roberson static void 2286dfdcada3SDoug Rabson lf_printlist(char *tag, struct lockf_entry *lock) 228792dc7331SDavid Greenman { 2288dfdcada3SDoug Rabson struct lockf_entry *lf, *blk; 2289dfdcada3SDoug Rabson struct lockf_edge *e; 229092dc7331SDavid Greenman 229159aff5fcSAlfred Perlstein if (lock->lf_inode == (struct inode *)0) 229259aff5fcSAlfred Perlstein return; 229359aff5fcSAlfred Perlstein 229497eb8cfaSPoul-Henning Kamp printf("%s: Lock list for ino %ju on dev <%s>:\n", 2295a7a00d05SMaxime Henrion tag, (uintmax_t)lock->lf_inode->i_number, 229697eb8cfaSPoul-Henning Kamp devtoname(lock->lf_inode->i_dev)); 2297dfdcada3SDoug Rabson LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) { 2298d974cf4dSBruce Evans printf("\tlock %p for ",(void *)lf); 2299dfdcada3SDoug Rabson lf_print_owner(lock->lf_owner); 2300a7a00d05SMaxime Henrion printf(", %s, start %jd, end %jd", 230192dc7331SDavid Greenman lf->lf_type == F_RDLCK ? "shared" : 230292dc7331SDavid Greenman lf->lf_type == F_WRLCK ? "exclusive" : 230392dc7331SDavid Greenman lf->lf_type == F_UNLCK ? "unlock" : 2304a7a00d05SMaxime Henrion "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2305dfdcada3SDoug Rabson LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2306dfdcada3SDoug Rabson blk = e->le_to; 2307d974cf4dSBruce Evans printf("\n\t\tlock request %p for ", (void *)blk); 2308dfdcada3SDoug Rabson lf_print_owner(blk->lf_owner); 2309a7a00d05SMaxime Henrion printf(", %s, start %jd, end %jd", 2310996c772fSJohn Dyson blk->lf_type == F_RDLCK ? "shared" : 2311996c772fSJohn Dyson blk->lf_type == F_WRLCK ? "exclusive" : 2312996c772fSJohn Dyson blk->lf_type == F_UNLCK ? "unlock" : 2313a7a00d05SMaxime Henrion "unknown", (intmax_t)blk->lf_start, 2314a7a00d05SMaxime Henrion (intmax_t)blk->lf_end); 2315dfdcada3SDoug Rabson if (!LIST_EMPTY(&blk->lf_inedges)) 2316996c772fSJohn Dyson panic("lf_printlist: bad list"); 2317996c772fSJohn Dyson } 231892dc7331SDavid Greenman printf("\n"); 231992dc7331SDavid Greenman } 232092dc7331SDavid Greenman } 232192dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 2322