19454b2d8SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4dfdcada3SDoug Rabson * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 5dfdcada3SDoug Rabson * Authors: Doug Rabson <dfr@rabson.org> 6dfdcada3SDoug Rabson * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 7dfdcada3SDoug Rabson * 8dfdcada3SDoug Rabson * Redistribution and use in source and binary forms, with or without 9dfdcada3SDoug Rabson * modification, are permitted provided that the following conditions 10dfdcada3SDoug Rabson * are met: 11dfdcada3SDoug Rabson * 1. Redistributions of source code must retain the above copyright 12dfdcada3SDoug Rabson * notice, this list of conditions and the following disclaimer. 13dfdcada3SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 14dfdcada3SDoug Rabson * notice, this list of conditions and the following disclaimer in the 15dfdcada3SDoug Rabson * documentation and/or other materials provided with the distribution. 16dfdcada3SDoug Rabson * 17dfdcada3SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18dfdcada3SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19dfdcada3SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20dfdcada3SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21dfdcada3SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22dfdcada3SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23dfdcada3SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24dfdcada3SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25dfdcada3SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26dfdcada3SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27dfdcada3SDoug Rabson * SUCH DAMAGE. 28dfdcada3SDoug Rabson */ 29dfdcada3SDoug Rabson /*- 3092dc7331SDavid Greenman * Copyright (c) 1982, 1986, 1989, 1993 3192dc7331SDavid Greenman * The Regents of the University of California. All rights reserved. 3292dc7331SDavid Greenman * 3392dc7331SDavid Greenman * This code is derived from software contributed to Berkeley by 3492dc7331SDavid Greenman * Scooter Morris at Genentech Inc. 3592dc7331SDavid Greenman * 3692dc7331SDavid Greenman * Redistribution and use in source and binary forms, with or without 3792dc7331SDavid Greenman * modification, are permitted provided that the following conditions 3892dc7331SDavid Greenman * are met: 3992dc7331SDavid Greenman * 1. Redistributions of source code must retain the above copyright 4092dc7331SDavid Greenman * notice, this list of conditions and the following disclaimer. 4192dc7331SDavid Greenman * 2. Redistributions in binary form must reproduce the above copyright 4292dc7331SDavid Greenman * notice, this list of conditions and the following disclaimer in the 4392dc7331SDavid Greenman * documentation and/or other materials provided with the distribution. 4469a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors 4592dc7331SDavid Greenman * may be used to endorse or promote products derived from this software 4692dc7331SDavid Greenman * without specific prior written permission. 4792dc7331SDavid Greenman * 4892dc7331SDavid Greenman * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 4992dc7331SDavid Greenman * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5092dc7331SDavid Greenman * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5192dc7331SDavid Greenman * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 5292dc7331SDavid Greenman * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5392dc7331SDavid Greenman * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5492dc7331SDavid Greenman * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5592dc7331SDavid Greenman * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5692dc7331SDavid Greenman * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5792dc7331SDavid Greenman * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5892dc7331SDavid Greenman * SUCH DAMAGE. 5992dc7331SDavid Greenman * 6092dc7331SDavid Greenman * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 6192dc7331SDavid Greenman */ 6292dc7331SDavid Greenman 63677b542eSDavid E. O'Brien #include <sys/cdefs.h> 64677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 65677b542eSDavid E. O'Brien 663f2076daSEivind Eklund #include "opt_debug_lockf.h" 673f2076daSEivind Eklund 6892dc7331SDavid Greenman #include <sys/param.h> 6992dc7331SDavid Greenman #include <sys/systm.h> 70dfdcada3SDoug Rabson #include <sys/hash.h> 711c5bb3eaSPeter Wemm #include <sys/kernel.h> 72104a9b7eSAlexander Kabaev #include <sys/limits.h> 731cd52ec3SBruce Evans #include <sys/lock.h> 747f52a691SPoul-Henning Kamp #include <sys/mount.h> 75fb919e4dSMark Murray #include <sys/mutex.h> 7692dc7331SDavid Greenman #include <sys/proc.h> 77dfdcada3SDoug Rabson #include <sys/sx.h> 78b71fec07SBruce Evans #include <sys/unistd.h> 7992dc7331SDavid Greenman #include <sys/vnode.h> 8092dc7331SDavid Greenman #include <sys/malloc.h> 8192dc7331SDavid Greenman #include <sys/fcntl.h> 8292dc7331SDavid Greenman #include <sys/lockf.h> 83dfdcada3SDoug Rabson #include <sys/taskqueue.h> 8492dc7331SDavid Greenman 8592dc7331SDavid Greenman #ifdef LOCKF_DEBUG 86996c772fSJohn Dyson #include <sys/sysctl.h> 87a8687b6dSBruce Evans 88a5ec35dfSSepherosa Ziehau #include <ufs/ufs/extattr.h> 89a8687b6dSBruce Evans #include <ufs/ufs/quota.h> 90a5ec35dfSSepherosa Ziehau #include <ufs/ufs/ufsmount.h> 91a8687b6dSBruce Evans #include <ufs/ufs/inode.h> 92a8687b6dSBruce Evans 93dfdcada3SDoug Rabson static int lockf_debug = 0; /* control debug output */ 947f725eacSBruce Evans SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 9592dc7331SDavid Greenman #endif 9692dc7331SDavid Greenman 97d745c852SEd Schouten static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 9855166637SPoul-Henning Kamp 99dfdcada3SDoug Rabson struct owner_edge; 100dfdcada3SDoug Rabson struct owner_vertex; 101dfdcada3SDoug Rabson struct owner_vertex_list; 102dfdcada3SDoug Rabson struct owner_graph; 103dfdcada3SDoug Rabson 104dfdcada3SDoug Rabson #define NOLOCKF (struct lockf_entry *)0 10592dc7331SDavid Greenman #define SELF 0x1 10692dc7331SDavid Greenman #define OTHERS 0x2 107dfdcada3SDoug Rabson static void lf_init(void *); 108dfdcada3SDoug Rabson static int lf_hash_owner(caddr_t, struct flock *, int); 109dfdcada3SDoug Rabson static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 110dfdcada3SDoug Rabson int); 111dfdcada3SDoug Rabson static struct lockf_entry * 112dfdcada3SDoug Rabson lf_alloc_lock(struct lock_owner *); 1138af54d4cSKonstantin Belousov static int lf_free_lock(struct lockf_entry *); 114dfdcada3SDoug Rabson static int lf_clearlock(struct lockf *, struct lockf_entry *); 115dfdcada3SDoug Rabson static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 116dfdcada3SDoug Rabson static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 117dfdcada3SDoug Rabson static void lf_free_edge(struct lockf_edge *); 118dfdcada3SDoug Rabson static struct lockf_edge * 119dfdcada3SDoug Rabson lf_alloc_edge(void); 120dfdcada3SDoug Rabson static void lf_alloc_vertex(struct lockf_entry *); 121dfdcada3SDoug Rabson static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 122dfdcada3SDoug Rabson static void lf_remove_edge(struct lockf_edge *); 123dfdcada3SDoug Rabson static void lf_remove_outgoing(struct lockf_entry *); 124dfdcada3SDoug Rabson static void lf_remove_incoming(struct lockf_entry *); 125dfdcada3SDoug Rabson static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 126dfdcada3SDoug Rabson static int lf_add_incoming(struct lockf *, struct lockf_entry *); 127dfdcada3SDoug Rabson static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 128dfdcada3SDoug Rabson int); 129dfdcada3SDoug Rabson static struct lockf_entry * 130dfdcada3SDoug Rabson lf_getblock(struct lockf *, struct lockf_entry *); 131dfdcada3SDoug Rabson static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 132dfdcada3SDoug Rabson static void lf_insert_lock(struct lockf *, struct lockf_entry *); 133dfdcada3SDoug Rabson static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 134dfdcada3SDoug Rabson static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 135dfdcada3SDoug Rabson int all, struct lockf_entry_list *); 136dfdcada3SDoug Rabson static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 137dfdcada3SDoug Rabson struct lockf_entry_list*); 138dfdcada3SDoug Rabson static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 139dfdcada3SDoug Rabson struct lockf_entry_list*); 140dfdcada3SDoug Rabson static int lf_setlock(struct lockf *, struct lockf_entry *, 141dfdcada3SDoug Rabson struct vnode *, void **cookiep); 142dfdcada3SDoug Rabson static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 143dfdcada3SDoug Rabson static void lf_split(struct lockf *, struct lockf_entry *, 144dfdcada3SDoug Rabson struct lockf_entry *, struct lockf_entry_list *); 145013e6650SJeff Roberson #ifdef LOCKF_DEBUG 146dfdcada3SDoug Rabson static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 147dfdcada3SDoug Rabson struct owner_vertex_list *path); 148dfdcada3SDoug Rabson static void graph_check(struct owner_graph *g, int checkorder); 149dfdcada3SDoug Rabson static void graph_print_vertices(struct owner_vertex_list *set); 150013e6650SJeff Roberson #endif 151dfdcada3SDoug Rabson static int graph_delta_forward(struct owner_graph *g, 152dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y, 153dfdcada3SDoug Rabson struct owner_vertex_list *delta); 154dfdcada3SDoug Rabson static int graph_delta_backward(struct owner_graph *g, 155dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y, 156dfdcada3SDoug Rabson struct owner_vertex_list *delta); 157dfdcada3SDoug Rabson static int graph_add_indices(int *indices, int n, 158dfdcada3SDoug Rabson struct owner_vertex_list *set); 159dfdcada3SDoug Rabson static int graph_assign_indices(struct owner_graph *g, int *indices, 160dfdcada3SDoug Rabson int nextunused, struct owner_vertex_list *set); 161dfdcada3SDoug Rabson static int graph_add_edge(struct owner_graph *g, 162dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y); 163dfdcada3SDoug Rabson static void graph_remove_edge(struct owner_graph *g, 164dfdcada3SDoug Rabson struct owner_vertex *x, struct owner_vertex *y); 165dfdcada3SDoug Rabson static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 166dfdcada3SDoug Rabson struct lock_owner *lo); 167dfdcada3SDoug Rabson static void graph_free_vertex(struct owner_graph *g, 168dfdcada3SDoug Rabson struct owner_vertex *v); 169dfdcada3SDoug Rabson static struct owner_graph * graph_init(struct owner_graph *g); 170dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 171dfdcada3SDoug Rabson static void lf_print(char *, struct lockf_entry *); 172dfdcada3SDoug Rabson static void lf_printlist(char *, struct lockf_entry *); 173dfdcada3SDoug Rabson static void lf_print_owner(struct lock_owner *); 174dfdcada3SDoug Rabson #endif 175dfdcada3SDoug Rabson 176dfdcada3SDoug Rabson /* 177dfdcada3SDoug Rabson * This structure is used to keep track of both local and remote lock 178dfdcada3SDoug Rabson * owners. The lf_owner field of the struct lockf_entry points back at 179dfdcada3SDoug Rabson * the lock owner structure. Each possible lock owner (local proc for 180dfdcada3SDoug Rabson * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 181dfdcada3SDoug Rabson * pair for remote locks) is represented by a unique instance of 182dfdcada3SDoug Rabson * struct lock_owner. 183dfdcada3SDoug Rabson * 184dfdcada3SDoug Rabson * If a lock owner has a lock that blocks some other lock or a lock 185dfdcada3SDoug Rabson * that is waiting for some other lock, it also has a vertex in the 186dfdcada3SDoug Rabson * owner_graph below. 187dfdcada3SDoug Rabson * 188dfdcada3SDoug Rabson * Locks: 189dfdcada3SDoug Rabson * (s) locked by state->ls_lock 190dfdcada3SDoug Rabson * (S) locked by lf_lock_states_lock 191dfdcada3SDoug Rabson * (l) locked by lf_lock_owners_lock 192dfdcada3SDoug Rabson * (g) locked by lf_owner_graph_lock 193dfdcada3SDoug Rabson * (c) const until freeing 194dfdcada3SDoug Rabson */ 195dfdcada3SDoug Rabson #define LOCK_OWNER_HASH_SIZE 256 196dfdcada3SDoug Rabson 197dfdcada3SDoug Rabson struct lock_owner { 198dfdcada3SDoug Rabson LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 199dfdcada3SDoug Rabson int lo_refs; /* (l) Number of locks referring to this */ 200dfdcada3SDoug Rabson int lo_flags; /* (c) Flags passwd to lf_advlock */ 201dfdcada3SDoug Rabson caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 202dfdcada3SDoug Rabson pid_t lo_pid; /* (c) Process Id of the lock owner */ 203dfdcada3SDoug Rabson int lo_sysid; /* (c) System Id of the lock owner */ 204dfdcada3SDoug Rabson struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 205dfdcada3SDoug Rabson }; 206dfdcada3SDoug Rabson 207dfdcada3SDoug Rabson LIST_HEAD(lock_owner_list, lock_owner); 208dfdcada3SDoug Rabson 209dfdcada3SDoug Rabson static struct sx lf_lock_states_lock; 210dfdcada3SDoug Rabson static struct lockf_list lf_lock_states; /* (S) */ 211dfdcada3SDoug Rabson static struct sx lf_lock_owners_lock; 212dfdcada3SDoug Rabson static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 213dfdcada3SDoug Rabson 214dfdcada3SDoug Rabson /* 215dfdcada3SDoug Rabson * Structures for deadlock detection. 216dfdcada3SDoug Rabson * 217dfdcada3SDoug Rabson * We have two types of directed graph, the first is the set of locks, 218dfdcada3SDoug Rabson * both active and pending on a vnode. Within this graph, active locks 219dfdcada3SDoug Rabson * are terminal nodes in the graph (i.e. have no out-going 220dfdcada3SDoug Rabson * edges). Pending locks have out-going edges to each blocking active 221dfdcada3SDoug Rabson * lock that prevents the lock from being granted and also to each 222dfdcada3SDoug Rabson * older pending lock that would block them if it was active. The 223dfdcada3SDoug Rabson * graph for each vnode is naturally acyclic; new edges are only ever 224dfdcada3SDoug Rabson * added to or from new nodes (either new pending locks which only add 225dfdcada3SDoug Rabson * out-going edges or new active locks which only add in-coming edges) 226dfdcada3SDoug Rabson * therefore they cannot create loops in the lock graph. 227dfdcada3SDoug Rabson * 228dfdcada3SDoug Rabson * The second graph is a global graph of lock owners. Each lock owner 229dfdcada3SDoug Rabson * is a vertex in that graph and an edge is added to the graph 230dfdcada3SDoug Rabson * whenever an edge is added to a vnode graph, with end points 231dfdcada3SDoug Rabson * corresponding to owner of the new pending lock and the owner of the 232dfdcada3SDoug Rabson * lock upon which it waits. In order to prevent deadlock, we only add 233dfdcada3SDoug Rabson * an edge to this graph if the new edge would not create a cycle. 234dfdcada3SDoug Rabson * 235dfdcada3SDoug Rabson * The lock owner graph is topologically sorted, i.e. if a node has 236dfdcada3SDoug Rabson * any outgoing edges, then it has an order strictly less than any 237dfdcada3SDoug Rabson * node to which it has an outgoing edge. We preserve this ordering 238dfdcada3SDoug Rabson * (and detect cycles) on edge insertion using Algorithm PK from the 239dfdcada3SDoug Rabson * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 240dfdcada3SDoug Rabson * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 241dfdcada3SDoug Rabson * No. 1.7) 242dfdcada3SDoug Rabson */ 243dfdcada3SDoug Rabson struct owner_vertex; 244dfdcada3SDoug Rabson 245dfdcada3SDoug Rabson struct owner_edge { 246dfdcada3SDoug Rabson LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 247dfdcada3SDoug Rabson LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 248dfdcada3SDoug Rabson int e_refs; /* (g) number of times added */ 249dfdcada3SDoug Rabson struct owner_vertex *e_from; /* (c) out-going from here */ 250dfdcada3SDoug Rabson struct owner_vertex *e_to; /* (c) in-coming to here */ 251dfdcada3SDoug Rabson }; 252dfdcada3SDoug Rabson LIST_HEAD(owner_edge_list, owner_edge); 253dfdcada3SDoug Rabson 254dfdcada3SDoug Rabson struct owner_vertex { 255dfdcada3SDoug Rabson TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 256dfdcada3SDoug Rabson uint32_t v_gen; /* (g) workspace for edge insertion */ 257dfdcada3SDoug Rabson int v_order; /* (g) order of vertex in graph */ 258dfdcada3SDoug Rabson struct owner_edge_list v_outedges;/* (g) list of out-edges */ 259dfdcada3SDoug Rabson struct owner_edge_list v_inedges; /* (g) list of in-edges */ 260dfdcada3SDoug Rabson struct lock_owner *v_owner; /* (c) corresponding lock owner */ 261dfdcada3SDoug Rabson }; 262dfdcada3SDoug Rabson TAILQ_HEAD(owner_vertex_list, owner_vertex); 263dfdcada3SDoug Rabson 264dfdcada3SDoug Rabson struct owner_graph { 265dfdcada3SDoug Rabson struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 266dfdcada3SDoug Rabson int g_size; /* (g) number of vertices */ 267dfdcada3SDoug Rabson int g_space; /* (g) space allocated for vertices */ 268dfdcada3SDoug Rabson int *g_indexbuf; /* (g) workspace for loop detection */ 269dfdcada3SDoug Rabson uint32_t g_gen; /* (g) increment when re-ordering */ 270dfdcada3SDoug Rabson }; 271dfdcada3SDoug Rabson 272dfdcada3SDoug Rabson static struct sx lf_owner_graph_lock; 273dfdcada3SDoug Rabson static struct owner_graph lf_owner_graph; 274dfdcada3SDoug Rabson 275dfdcada3SDoug Rabson /* 276dfdcada3SDoug Rabson * Initialise various structures and locks. 277dfdcada3SDoug Rabson */ 278dfdcada3SDoug Rabson static void 279dfdcada3SDoug Rabson lf_init(void *dummy) 280dfdcada3SDoug Rabson { 281dfdcada3SDoug Rabson int i; 282dfdcada3SDoug Rabson 283dfdcada3SDoug Rabson sx_init(&lf_lock_states_lock, "lock states lock"); 284dfdcada3SDoug Rabson LIST_INIT(&lf_lock_states); 285dfdcada3SDoug Rabson 286dfdcada3SDoug Rabson sx_init(&lf_lock_owners_lock, "lock owners lock"); 287dfdcada3SDoug Rabson for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 288dfdcada3SDoug Rabson LIST_INIT(&lf_lock_owners[i]); 289dfdcada3SDoug Rabson 290dfdcada3SDoug Rabson sx_init(&lf_owner_graph_lock, "owner graph lock"); 291dfdcada3SDoug Rabson graph_init(&lf_owner_graph); 292dfdcada3SDoug Rabson } 293dfdcada3SDoug Rabson SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 294dfdcada3SDoug Rabson 295dfdcada3SDoug Rabson /* 296dfdcada3SDoug Rabson * Generate a hash value for a lock owner. 297dfdcada3SDoug Rabson */ 298dfdcada3SDoug Rabson static int 299dfdcada3SDoug Rabson lf_hash_owner(caddr_t id, struct flock *fl, int flags) 300dfdcada3SDoug Rabson { 301dfdcada3SDoug Rabson uint32_t h; 302dfdcada3SDoug Rabson 303dfdcada3SDoug Rabson if (flags & F_REMOTE) { 304dfdcada3SDoug Rabson h = HASHSTEP(0, fl->l_pid); 305dfdcada3SDoug Rabson h = HASHSTEP(h, fl->l_sysid); 306dfdcada3SDoug Rabson } else if (flags & F_FLOCK) { 307dfdcada3SDoug Rabson h = ((uintptr_t) id) >> 7; 308dfdcada3SDoug Rabson } else { 309dfdcada3SDoug Rabson struct proc *p = (struct proc *) id; 310dfdcada3SDoug Rabson h = HASHSTEP(0, p->p_pid); 311dfdcada3SDoug Rabson h = HASHSTEP(h, 0); 312dfdcada3SDoug Rabson } 313dfdcada3SDoug Rabson 314dfdcada3SDoug Rabson return (h % LOCK_OWNER_HASH_SIZE); 315dfdcada3SDoug Rabson } 316dfdcada3SDoug Rabson 317dfdcada3SDoug Rabson /* 318dfdcada3SDoug Rabson * Return true if a lock owner matches the details passed to 319dfdcada3SDoug Rabson * lf_advlock. 320dfdcada3SDoug Rabson */ 321dfdcada3SDoug Rabson static int 322dfdcada3SDoug Rabson lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 323dfdcada3SDoug Rabson int flags) 324dfdcada3SDoug Rabson { 325dfdcada3SDoug Rabson if (flags & F_REMOTE) { 326dfdcada3SDoug Rabson return lo->lo_pid == fl->l_pid 327dfdcada3SDoug Rabson && lo->lo_sysid == fl->l_sysid; 328dfdcada3SDoug Rabson } else { 329dfdcada3SDoug Rabson return lo->lo_id == id; 330dfdcada3SDoug Rabson } 331dfdcada3SDoug Rabson } 332dfdcada3SDoug Rabson 333dfdcada3SDoug Rabson static struct lockf_entry * 334dfdcada3SDoug Rabson lf_alloc_lock(struct lock_owner *lo) 335dfdcada3SDoug Rabson { 336dfdcada3SDoug Rabson struct lockf_entry *lf; 337dfdcada3SDoug Rabson 338dfdcada3SDoug Rabson lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 339dfdcada3SDoug Rabson 340dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 341dfdcada3SDoug Rabson if (lockf_debug & 4) 342dfdcada3SDoug Rabson printf("Allocated lock %p\n", lf); 343dfdcada3SDoug Rabson #endif 344dfdcada3SDoug Rabson if (lo) { 345dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 346dfdcada3SDoug Rabson lo->lo_refs++; 347dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 348dfdcada3SDoug Rabson lf->lf_owner = lo; 349dfdcada3SDoug Rabson } 350dfdcada3SDoug Rabson 351dfdcada3SDoug Rabson return (lf); 352dfdcada3SDoug Rabson } 353dfdcada3SDoug Rabson 3548af54d4cSKonstantin Belousov static int 355dfdcada3SDoug Rabson lf_free_lock(struct lockf_entry *lock) 356dfdcada3SDoug Rabson { 3578af54d4cSKonstantin Belousov 3588af54d4cSKonstantin Belousov KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); 3598af54d4cSKonstantin Belousov if (--lock->lf_refs > 0) 3608af54d4cSKonstantin Belousov return (0); 361dfdcada3SDoug Rabson /* 362dfdcada3SDoug Rabson * Adjust the lock_owner reference count and 363dfdcada3SDoug Rabson * reclaim the entry if this is the last lock 364dfdcada3SDoug Rabson * for that owner. 365dfdcada3SDoug Rabson */ 366dfdcada3SDoug Rabson struct lock_owner *lo = lock->lf_owner; 367dfdcada3SDoug Rabson if (lo) { 368dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&lock->lf_outedges), 369e3043798SPedro F. Giffuni ("freeing lock with dependencies")); 370dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&lock->lf_inedges), 371dfdcada3SDoug Rabson ("freeing lock with dependants")); 372dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 373dfdcada3SDoug Rabson KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 374dfdcada3SDoug Rabson lo->lo_refs--; 375dfdcada3SDoug Rabson if (lo->lo_refs == 0) { 376dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 377dfdcada3SDoug Rabson if (lockf_debug & 1) 378dfdcada3SDoug Rabson printf("lf_free_lock: freeing lock owner %p\n", 379dfdcada3SDoug Rabson lo); 380dfdcada3SDoug Rabson #endif 381dfdcada3SDoug Rabson if (lo->lo_vertex) { 382dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 383dfdcada3SDoug Rabson graph_free_vertex(&lf_owner_graph, 384dfdcada3SDoug Rabson lo->lo_vertex); 385dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 386dfdcada3SDoug Rabson } 387dfdcada3SDoug Rabson LIST_REMOVE(lo, lo_link); 388dfdcada3SDoug Rabson free(lo, M_LOCKF); 389dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 390dfdcada3SDoug Rabson if (lockf_debug & 4) 391dfdcada3SDoug Rabson printf("Freed lock owner %p\n", lo); 392dfdcada3SDoug Rabson #endif 393dfdcada3SDoug Rabson } 394dfdcada3SDoug Rabson sx_unlock(&lf_lock_owners_lock); 395dfdcada3SDoug Rabson } 396dfdcada3SDoug Rabson if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 397dfdcada3SDoug Rabson vrele(lock->lf_vnode); 398dfdcada3SDoug Rabson lock->lf_vnode = NULL; 399dfdcada3SDoug Rabson } 400dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 401dfdcada3SDoug Rabson if (lockf_debug & 4) 402dfdcada3SDoug Rabson printf("Freed lock %p\n", lock); 403dfdcada3SDoug Rabson #endif 404dfdcada3SDoug Rabson free(lock, M_LOCKF); 4058af54d4cSKonstantin Belousov return (1); 406dfdcada3SDoug Rabson } 40792dc7331SDavid Greenman 40892dc7331SDavid Greenman /* 40992dc7331SDavid Greenman * Advisory record locking support 41092dc7331SDavid Greenman */ 41192dc7331SDavid Greenman int 412dfdcada3SDoug Rabson lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 413dfdcada3SDoug Rabson u_quad_t size) 41492dc7331SDavid Greenman { 415*0d3323f5SMateusz Guzik struct lockf *state; 416bc02f1d9SJeff Roberson struct flock *fl = ap->a_fl; 417dfdcada3SDoug Rabson struct lockf_entry *lock; 418bc02f1d9SJeff Roberson struct vnode *vp = ap->a_vp; 419dfdcada3SDoug Rabson caddr_t id = ap->a_id; 420dfdcada3SDoug Rabson int flags = ap->a_flags; 421dfdcada3SDoug Rabson int hash; 422dfdcada3SDoug Rabson struct lock_owner *lo; 423c4778eedSAndrey A. Chernov off_t start, end, oadd; 42492dc7331SDavid Greenman int error; 42592dc7331SDavid Greenman 42692dc7331SDavid Greenman /* 427dfdcada3SDoug Rabson * Handle the F_UNLKSYS case first - no need to mess about 428dfdcada3SDoug Rabson * creating a lock owner for this one. 429dfdcada3SDoug Rabson */ 430dfdcada3SDoug Rabson if (ap->a_op == F_UNLCKSYS) { 431dfdcada3SDoug Rabson lf_clearremotesys(fl->l_sysid); 432dfdcada3SDoug Rabson return (0); 433dfdcada3SDoug Rabson } 434dfdcada3SDoug Rabson 435dfdcada3SDoug Rabson /* 43692dc7331SDavid Greenman * Convert the flock structure into a start and end. 43792dc7331SDavid Greenman */ 43892dc7331SDavid Greenman switch (fl->l_whence) { 43992dc7331SDavid Greenman 44092dc7331SDavid Greenman case SEEK_SET: 44192dc7331SDavid Greenman case SEEK_CUR: 44292dc7331SDavid Greenman /* 44392dc7331SDavid Greenman * Caller is responsible for adding any necessary offset 44492dc7331SDavid Greenman * when SEEK_CUR is used. 44592dc7331SDavid Greenman */ 44692dc7331SDavid Greenman start = fl->l_start; 44792dc7331SDavid Greenman break; 44892dc7331SDavid Greenman 44992dc7331SDavid Greenman case SEEK_END: 450c8e76343SAndrey A. Chernov if (size > OFF_MAX || 451bc02f1d9SJeff Roberson (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 452bc02f1d9SJeff Roberson return (EOVERFLOW); 45392dc7331SDavid Greenman start = size + fl->l_start; 45492dc7331SDavid Greenman break; 45592dc7331SDavid Greenman 45692dc7331SDavid Greenman default: 457bc02f1d9SJeff Roberson return (EINVAL); 45892dc7331SDavid Greenman } 459bc02f1d9SJeff Roberson if (start < 0) 460bc02f1d9SJeff Roberson return (EINVAL); 461f510e1c2SAndrey A. Chernov if (fl->l_len < 0) { 462bc02f1d9SJeff Roberson if (start == 0) 463bc02f1d9SJeff Roberson return (EINVAL); 464f510e1c2SAndrey A. Chernov end = start - 1; 46562be011eSAndrey A. Chernov start += fl->l_len; 466bc02f1d9SJeff Roberson if (start < 0) 467bc02f1d9SJeff Roberson return (EINVAL); 468dfdcada3SDoug Rabson } else if (fl->l_len == 0) { 469dfdcada3SDoug Rabson end = OFF_MAX; 470dfdcada3SDoug Rabson } else { 471c4778eedSAndrey A. Chernov oadd = fl->l_len - 1; 472bc02f1d9SJeff Roberson if (oadd > OFF_MAX - start) 473bc02f1d9SJeff Roberson return (EOVERFLOW); 47469cc1d0dSAndrey A. Chernov end = start + oadd; 475a88bd8aaSBruce Evans } 4763bcc218fSKonstantin Belousov 4773bcc218fSKonstantin Belousov retry_setlock: 4783bcc218fSKonstantin Belousov 479a88bd8aaSBruce Evans /* 480a88bd8aaSBruce Evans * Avoid the common case of unlocking when inode has no locks. 481a88bd8aaSBruce Evans */ 4827d853f62SMateusz Guzik if (ap->a_op != F_SETLK && (*statep) == NULL) { 483842832aeSDoug Rabson VI_LOCK(vp); 484842832aeSDoug Rabson if ((*statep) == NULL) { 485a88bd8aaSBruce Evans fl->l_type = F_UNLCK; 486842832aeSDoug Rabson VI_UNLOCK(vp); 487bc02f1d9SJeff Roberson return (0); 488a88bd8aaSBruce Evans } 489842832aeSDoug Rabson VI_UNLOCK(vp); 4907d853f62SMateusz Guzik } 491dfdcada3SDoug Rabson 49292dc7331SDavid Greenman /* 493dfdcada3SDoug Rabson * Map our arguments to an existing lock owner or create one 494dfdcada3SDoug Rabson * if this is the first time we have seen this owner. 495bc02f1d9SJeff Roberson */ 496dfdcada3SDoug Rabson hash = lf_hash_owner(id, fl, flags); 497dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 498dfdcada3SDoug Rabson LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 499dfdcada3SDoug Rabson if (lf_owner_matches(lo, id, fl, flags)) 500dfdcada3SDoug Rabson break; 501dfdcada3SDoug Rabson if (!lo) { 502dfdcada3SDoug Rabson /* 503dfdcada3SDoug Rabson * We initialise the lock with a reference 504dfdcada3SDoug Rabson * count which matches the new lockf_entry 505dfdcada3SDoug Rabson * structure created below. 506dfdcada3SDoug Rabson */ 507dfdcada3SDoug Rabson lo = malloc(sizeof(struct lock_owner), M_LOCKF, 508dfdcada3SDoug Rabson M_WAITOK|M_ZERO); 509dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 510dfdcada3SDoug Rabson if (lockf_debug & 4) 511dfdcada3SDoug Rabson printf("Allocated lock owner %p\n", lo); 512dfdcada3SDoug Rabson #endif 513dfdcada3SDoug Rabson 514dfdcada3SDoug Rabson lo->lo_refs = 1; 515dfdcada3SDoug Rabson lo->lo_flags = flags; 516dfdcada3SDoug Rabson lo->lo_id = id; 517dfdcada3SDoug Rabson if (flags & F_REMOTE) { 518dfdcada3SDoug Rabson lo->lo_pid = fl->l_pid; 519dfdcada3SDoug Rabson lo->lo_sysid = fl->l_sysid; 520dfdcada3SDoug Rabson } else if (flags & F_FLOCK) { 521dfdcada3SDoug Rabson lo->lo_pid = -1; 522dfdcada3SDoug Rabson lo->lo_sysid = 0; 523dfdcada3SDoug Rabson } else { 524dfdcada3SDoug Rabson struct proc *p = (struct proc *) id; 525dfdcada3SDoug Rabson lo->lo_pid = p->p_pid; 526dfdcada3SDoug Rabson lo->lo_sysid = 0; 527004e08beSKonstantin Belousov } 528dfdcada3SDoug Rabson lo->lo_vertex = NULL; 529dfdcada3SDoug Rabson 530dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 531dfdcada3SDoug Rabson if (lockf_debug & 1) { 532dfdcada3SDoug Rabson printf("lf_advlockasync: new lock owner %p ", lo); 533dfdcada3SDoug Rabson lf_print_owner(lo); 534dfdcada3SDoug Rabson printf("\n"); 535dfdcada3SDoug Rabson } 536dfdcada3SDoug Rabson #endif 537dfdcada3SDoug Rabson 538dfdcada3SDoug Rabson LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 539dfdcada3SDoug Rabson } else { 540bc02f1d9SJeff Roberson /* 541dfdcada3SDoug Rabson * We have seen this lock owner before, increase its 542dfdcada3SDoug Rabson * reference count to account for the new lockf_entry 543dfdcada3SDoug Rabson * structure we create below. 54492dc7331SDavid Greenman */ 545dfdcada3SDoug Rabson lo->lo_refs++; 546dfdcada3SDoug Rabson } 547dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 548dfdcada3SDoug Rabson 549dfdcada3SDoug Rabson /* 550dfdcada3SDoug Rabson * Create the lockf structure. We initialise the lf_owner 551dfdcada3SDoug Rabson * field here instead of in lf_alloc_lock() to avoid paying 552dfdcada3SDoug Rabson * the lf_lock_owners_lock tax twice. 553dfdcada3SDoug Rabson */ 554dfdcada3SDoug Rabson lock = lf_alloc_lock(NULL); 5558af54d4cSKonstantin Belousov lock->lf_refs = 1; 55692dc7331SDavid Greenman lock->lf_start = start; 55792dc7331SDavid Greenman lock->lf_end = end; 558dfdcada3SDoug Rabson lock->lf_owner = lo; 559dfdcada3SDoug Rabson lock->lf_vnode = vp; 560dfdcada3SDoug Rabson if (flags & F_REMOTE) { 561dfdcada3SDoug Rabson /* 562dfdcada3SDoug Rabson * For remote locks, the caller may release its ref to 563dfdcada3SDoug Rabson * the vnode at any time - we have to ref it here to 564dfdcada3SDoug Rabson * prevent it from being recycled unexpectedly. 565dfdcada3SDoug Rabson */ 566dfdcada3SDoug Rabson vref(vp); 567dfdcada3SDoug Rabson } 568dfdcada3SDoug Rabson 56959aff5fcSAlfred Perlstein /* 57059aff5fcSAlfred Perlstein * XXX The problem is that VTOI is ufs specific, so it will 57159aff5fcSAlfred Perlstein * break LOCKF_DEBUG for all other FS's other than UFS because 57259aff5fcSAlfred Perlstein * it casts the vnode->data ptr to struct inode *. 57359aff5fcSAlfred Perlstein */ 57459aff5fcSAlfred Perlstein /* lock->lf_inode = VTOI(ap->a_vp); */ 57559aff5fcSAlfred Perlstein lock->lf_inode = (struct inode *)0; 57692dc7331SDavid Greenman lock->lf_type = fl->l_type; 577dfdcada3SDoug Rabson LIST_INIT(&lock->lf_outedges); 578dfdcada3SDoug Rabson LIST_INIT(&lock->lf_inedges); 579dfdcada3SDoug Rabson lock->lf_async_task = ap->a_task; 58092dc7331SDavid Greenman lock->lf_flags = ap->a_flags; 581dfdcada3SDoug Rabson 58292dc7331SDavid Greenman /* 583dfdcada3SDoug Rabson * Do the requested operation. First find our state structure 584dfdcada3SDoug Rabson * and create a new one if necessary - the caller's *statep 585dfdcada3SDoug Rabson * variable and the state's ls_threads count is protected by 586dfdcada3SDoug Rabson * the vnode interlock. 58792dc7331SDavid Greenman */ 588bc02f1d9SJeff Roberson VI_LOCK(vp); 589eab626f1SKonstantin Belousov if (vp->v_iflag & VI_DOOMED) { 590eab626f1SKonstantin Belousov VI_UNLOCK(vp); 591eab626f1SKonstantin Belousov lf_free_lock(lock); 592eab626f1SKonstantin Belousov return (ENOENT); 593eab626f1SKonstantin Belousov } 594dfdcada3SDoug Rabson 595dfdcada3SDoug Rabson /* 596dfdcada3SDoug Rabson * Allocate a state structure if necessary. 597dfdcada3SDoug Rabson */ 598dfdcada3SDoug Rabson state = *statep; 599dfdcada3SDoug Rabson if (state == NULL) { 600dfdcada3SDoug Rabson struct lockf *ls; 601dfdcada3SDoug Rabson 602dfdcada3SDoug Rabson VI_UNLOCK(vp); 603dfdcada3SDoug Rabson 604dfdcada3SDoug Rabson ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 605dfdcada3SDoug Rabson sx_init(&ls->ls_lock, "ls_lock"); 606dfdcada3SDoug Rabson LIST_INIT(&ls->ls_active); 607dfdcada3SDoug Rabson LIST_INIT(&ls->ls_pending); 60860cdfde0SDoug Rabson ls->ls_threads = 1; 609dfdcada3SDoug Rabson 610dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 611dfdcada3SDoug Rabson LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 612dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 613dfdcada3SDoug Rabson 614dfdcada3SDoug Rabson /* 615dfdcada3SDoug Rabson * Cope if we lost a race with some other thread while 616dfdcada3SDoug Rabson * trying to allocate memory. 617dfdcada3SDoug Rabson */ 618dfdcada3SDoug Rabson VI_LOCK(vp); 619eab626f1SKonstantin Belousov if (vp->v_iflag & VI_DOOMED) { 620eab626f1SKonstantin Belousov VI_UNLOCK(vp); 621eab626f1SKonstantin Belousov sx_xlock(&lf_lock_states_lock); 622eab626f1SKonstantin Belousov LIST_REMOVE(ls, ls_link); 623eab626f1SKonstantin Belousov sx_xunlock(&lf_lock_states_lock); 624eab626f1SKonstantin Belousov sx_destroy(&ls->ls_lock); 625eab626f1SKonstantin Belousov free(ls, M_LOCKF); 626eab626f1SKonstantin Belousov lf_free_lock(lock); 627eab626f1SKonstantin Belousov return (ENOENT); 628eab626f1SKonstantin Belousov } 629dfdcada3SDoug Rabson if ((*statep) == NULL) { 63060cdfde0SDoug Rabson state = *statep = ls; 63160cdfde0SDoug Rabson VI_UNLOCK(vp); 632dfdcada3SDoug Rabson } else { 63360cdfde0SDoug Rabson state = *statep; 63460cdfde0SDoug Rabson state->ls_threads++; 63560cdfde0SDoug Rabson VI_UNLOCK(vp); 63660cdfde0SDoug Rabson 637dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 638dfdcada3SDoug Rabson LIST_REMOVE(ls, ls_link); 639dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 640dfdcada3SDoug Rabson sx_destroy(&ls->ls_lock); 641dfdcada3SDoug Rabson free(ls, M_LOCKF); 642dfdcada3SDoug Rabson } 64360cdfde0SDoug Rabson } else { 644dfdcada3SDoug Rabson state->ls_threads++; 645dfdcada3SDoug Rabson VI_UNLOCK(vp); 64660cdfde0SDoug Rabson } 647dfdcada3SDoug Rabson 648dfdcada3SDoug Rabson sx_xlock(&state->ls_lock); 649b33d6177SKonstantin Belousov /* 650b33d6177SKonstantin Belousov * Recheck the doomed vnode after state->ls_lock is 651b33d6177SKonstantin Belousov * locked. lf_purgelocks() requires that no new threads add 652b33d6177SKonstantin Belousov * pending locks when vnode is marked by VI_DOOMED flag. 653b33d6177SKonstantin Belousov */ 654b33d6177SKonstantin Belousov VI_LOCK(vp); 655b33d6177SKonstantin Belousov if (vp->v_iflag & VI_DOOMED) { 656f02c9d28SKonstantin Belousov state->ls_threads--; 657f02c9d28SKonstantin Belousov wakeup(state); 658b33d6177SKonstantin Belousov VI_UNLOCK(vp); 6595dd6aabaSKonstantin Belousov sx_xunlock(&state->ls_lock); 660b33d6177SKonstantin Belousov lf_free_lock(lock); 661b33d6177SKonstantin Belousov return (ENOENT); 662b33d6177SKonstantin Belousov } 663b33d6177SKonstantin Belousov VI_UNLOCK(vp); 664b33d6177SKonstantin Belousov 66592dc7331SDavid Greenman switch (ap->a_op) { 66692dc7331SDavid Greenman case F_SETLK: 667dfdcada3SDoug Rabson error = lf_setlock(state, lock, vp, ap->a_cookiep); 668bc02f1d9SJeff Roberson break; 66992dc7331SDavid Greenman 67092dc7331SDavid Greenman case F_UNLCK: 671dfdcada3SDoug Rabson error = lf_clearlock(state, lock); 672dfdcada3SDoug Rabson lf_free_lock(lock); 673bc02f1d9SJeff Roberson break; 67492dc7331SDavid Greenman 67592dc7331SDavid Greenman case F_GETLK: 676dfdcada3SDoug Rabson error = lf_getlock(state, lock, fl); 677dfdcada3SDoug Rabson lf_free_lock(lock); 678dfdcada3SDoug Rabson break; 679dfdcada3SDoug Rabson 680dfdcada3SDoug Rabson case F_CANCEL: 681dfdcada3SDoug Rabson if (ap->a_cookiep) 682dfdcada3SDoug Rabson error = lf_cancel(state, lock, *ap->a_cookiep); 683dfdcada3SDoug Rabson else 684dfdcada3SDoug Rabson error = EINVAL; 685dfdcada3SDoug Rabson lf_free_lock(lock); 686bc02f1d9SJeff Roberson break; 68792dc7331SDavid Greenman 68892dc7331SDavid Greenman default: 689dfdcada3SDoug Rabson lf_free_lock(lock); 690013e6650SJeff Roberson error = EINVAL; 691bc02f1d9SJeff Roberson break; 69292dc7331SDavid Greenman } 693dfdcada3SDoug Rabson 694826b3d31SAndriy Gapon #ifdef DIAGNOSTIC 695dfdcada3SDoug Rabson /* 696dfdcada3SDoug Rabson * Check for some can't happen stuff. In this case, the active 697dfdcada3SDoug Rabson * lock list becoming disordered or containing mutually 698dfdcada3SDoug Rabson * blocking locks. We also check the pending list for locks 699dfdcada3SDoug Rabson * which should be active (i.e. have no out-going edges). 700dfdcada3SDoug Rabson */ 701dfdcada3SDoug Rabson LIST_FOREACH(lock, &state->ls_active, lf_link) { 702dfdcada3SDoug Rabson struct lockf_entry *lf; 703dfdcada3SDoug Rabson if (LIST_NEXT(lock, lf_link)) 704dfdcada3SDoug Rabson KASSERT((lock->lf_start 705dfdcada3SDoug Rabson <= LIST_NEXT(lock, lf_link)->lf_start), 706dfdcada3SDoug Rabson ("locks disordered")); 707dfdcada3SDoug Rabson LIST_FOREACH(lf, &state->ls_active, lf_link) { 708dfdcada3SDoug Rabson if (lock == lf) 709dfdcada3SDoug Rabson break; 710dfdcada3SDoug Rabson KASSERT(!lf_blocks(lock, lf), 711dfdcada3SDoug Rabson ("two conflicting active locks")); 712dfdcada3SDoug Rabson if (lock->lf_owner == lf->lf_owner) 713dfdcada3SDoug Rabson KASSERT(!lf_overlaps(lock, lf), 714dfdcada3SDoug Rabson ("two overlapping locks from same owner")); 715dfdcada3SDoug Rabson } 716dfdcada3SDoug Rabson } 717dfdcada3SDoug Rabson LIST_FOREACH(lock, &state->ls_pending, lf_link) { 718dfdcada3SDoug Rabson KASSERT(!LIST_EMPTY(&lock->lf_outedges), 719dfdcada3SDoug Rabson ("pending lock which should be active")); 720dfdcada3SDoug Rabson } 721dfdcada3SDoug Rabson #endif 722dfdcada3SDoug Rabson sx_xunlock(&state->ls_lock); 723dfdcada3SDoug Rabson 724dfdcada3SDoug Rabson VI_LOCK(vp); 725dfdcada3SDoug Rabson 726dfdcada3SDoug Rabson state->ls_threads--; 727eab626f1SKonstantin Belousov wakeup(state); 728dfdcada3SDoug Rabson if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 729dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&state->ls_pending), 730*0d3323f5SMateusz Guzik ("freeable state with pending locks")); 731dfdcada3SDoug Rabson } 732dfdcada3SDoug Rabson 733bc02f1d9SJeff Roberson VI_UNLOCK(vp); 734dfdcada3SDoug Rabson 7353bcc218fSKonstantin Belousov if (error == EDOOFUS) { 7363bcc218fSKonstantin Belousov KASSERT(ap->a_op == F_SETLK, ("EDOOFUS")); 7373bcc218fSKonstantin Belousov goto retry_setlock; 7383bcc218fSKonstantin Belousov } 739013e6650SJeff Roberson return (error); 74092dc7331SDavid Greenman } 74192dc7331SDavid Greenman 742dfdcada3SDoug Rabson int 743dfdcada3SDoug Rabson lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 744dfdcada3SDoug Rabson { 745dfdcada3SDoug Rabson struct vop_advlockasync_args a; 746dfdcada3SDoug Rabson 747dfdcada3SDoug Rabson a.a_vp = ap->a_vp; 748dfdcada3SDoug Rabson a.a_id = ap->a_id; 749dfdcada3SDoug Rabson a.a_op = ap->a_op; 750dfdcada3SDoug Rabson a.a_fl = ap->a_fl; 751dfdcada3SDoug Rabson a.a_flags = ap->a_flags; 752dfdcada3SDoug Rabson a.a_task = NULL; 753dfdcada3SDoug Rabson a.a_cookiep = NULL; 754dfdcada3SDoug Rabson 755dfdcada3SDoug Rabson return (lf_advlockasync(&a, statep, size)); 756dfdcada3SDoug Rabson } 757dfdcada3SDoug Rabson 758eab626f1SKonstantin Belousov void 759eab626f1SKonstantin Belousov lf_purgelocks(struct vnode *vp, struct lockf **statep) 760eab626f1SKonstantin Belousov { 761eab626f1SKonstantin Belousov struct lockf *state; 762eab626f1SKonstantin Belousov struct lockf_entry *lock, *nlock; 763eab626f1SKonstantin Belousov 764eab626f1SKonstantin Belousov /* 765eab626f1SKonstantin Belousov * For this to work correctly, the caller must ensure that no 766eab626f1SKonstantin Belousov * other threads enter the locking system for this vnode, 767eab626f1SKonstantin Belousov * e.g. by checking VI_DOOMED. We wake up any threads that are 768eab626f1SKonstantin Belousov * sleeping waiting for locks on this vnode and then free all 769eab626f1SKonstantin Belousov * the remaining locks. 770eab626f1SKonstantin Belousov */ 771eab626f1SKonstantin Belousov VI_LOCK(vp); 772b33d6177SKonstantin Belousov KASSERT(vp->v_iflag & VI_DOOMED, 773b33d6177SKonstantin Belousov ("lf_purgelocks: vp %p has not vgone yet", vp)); 774eab626f1SKonstantin Belousov state = *statep; 775*0d3323f5SMateusz Guzik if (state == NULL) { 776*0d3323f5SMateusz Guzik VI_UNLOCK(vp); 777*0d3323f5SMateusz Guzik return; 778*0d3323f5SMateusz Guzik } 779b33d6177SKonstantin Belousov *statep = NULL; 780eab626f1SKonstantin Belousov state->ls_threads++; 781eab626f1SKonstantin Belousov VI_UNLOCK(vp); 782eab626f1SKonstantin Belousov 783eab626f1SKonstantin Belousov sx_xlock(&state->ls_lock); 784eab626f1SKonstantin Belousov sx_xlock(&lf_owner_graph_lock); 785eab626f1SKonstantin Belousov LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 786eab626f1SKonstantin Belousov LIST_REMOVE(lock, lf_link); 787eab626f1SKonstantin Belousov lf_remove_outgoing(lock); 788eab626f1SKonstantin Belousov lf_remove_incoming(lock); 789eab626f1SKonstantin Belousov 790eab626f1SKonstantin Belousov /* 791eab626f1SKonstantin Belousov * If its an async lock, we can just free it 792eab626f1SKonstantin Belousov * here, otherwise we let the sleeping thread 793eab626f1SKonstantin Belousov * free it. 794eab626f1SKonstantin Belousov */ 795eab626f1SKonstantin Belousov if (lock->lf_async_task) { 796eab626f1SKonstantin Belousov lf_free_lock(lock); 797eab626f1SKonstantin Belousov } else { 798eab626f1SKonstantin Belousov lock->lf_flags |= F_INTR; 799eab626f1SKonstantin Belousov wakeup(lock); 800eab626f1SKonstantin Belousov } 801eab626f1SKonstantin Belousov } 802eab626f1SKonstantin Belousov sx_xunlock(&lf_owner_graph_lock); 803eab626f1SKonstantin Belousov sx_xunlock(&state->ls_lock); 804eab626f1SKonstantin Belousov 805eab626f1SKonstantin Belousov /* 806eab626f1SKonstantin Belousov * Wait for all other threads, sleeping and otherwise 807eab626f1SKonstantin Belousov * to leave. 808eab626f1SKonstantin Belousov */ 809eab626f1SKonstantin Belousov VI_LOCK(vp); 810eab626f1SKonstantin Belousov while (state->ls_threads > 1) 811eab626f1SKonstantin Belousov msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 812eab626f1SKonstantin Belousov VI_UNLOCK(vp); 813eab626f1SKonstantin Belousov 814eab626f1SKonstantin Belousov /* 815eab626f1SKonstantin Belousov * We can just free all the active locks since they 816e3043798SPedro F. Giffuni * will have no dependencies (we removed them all 817eab626f1SKonstantin Belousov * above). We don't need to bother locking since we 818eab626f1SKonstantin Belousov * are the last thread using this state structure. 819eab626f1SKonstantin Belousov */ 8209727972eSKonstantin Belousov KASSERT(LIST_EMPTY(&state->ls_pending), 8219727972eSKonstantin Belousov ("lock pending for %p", state)); 8229727972eSKonstantin Belousov LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { 823eab626f1SKonstantin Belousov LIST_REMOVE(lock, lf_link); 824eab626f1SKonstantin Belousov lf_free_lock(lock); 825eab626f1SKonstantin Belousov } 826eab626f1SKonstantin Belousov sx_xlock(&lf_lock_states_lock); 827eab626f1SKonstantin Belousov LIST_REMOVE(state, ls_link); 828eab626f1SKonstantin Belousov sx_xunlock(&lf_lock_states_lock); 829eab626f1SKonstantin Belousov sx_destroy(&state->ls_lock); 830eab626f1SKonstantin Belousov free(state, M_LOCKF); 831eab626f1SKonstantin Belousov } 832eab626f1SKonstantin Belousov 833dfdcada3SDoug Rabson /* 834dfdcada3SDoug Rabson * Return non-zero if locks 'x' and 'y' overlap. 835dfdcada3SDoug Rabson */ 836dfdcada3SDoug Rabson static int 837dfdcada3SDoug Rabson lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 838dfdcada3SDoug Rabson { 839dfdcada3SDoug Rabson 840dfdcada3SDoug Rabson return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 841dfdcada3SDoug Rabson } 842dfdcada3SDoug Rabson 843dfdcada3SDoug Rabson /* 844dfdcada3SDoug Rabson * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 845dfdcada3SDoug Rabson */ 846dfdcada3SDoug Rabson static int 847dfdcada3SDoug Rabson lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 848dfdcada3SDoug Rabson { 849dfdcada3SDoug Rabson 850dfdcada3SDoug Rabson return x->lf_owner != y->lf_owner 851dfdcada3SDoug Rabson && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 852dfdcada3SDoug Rabson && lf_overlaps(x, y); 853dfdcada3SDoug Rabson } 854dfdcada3SDoug Rabson 855dfdcada3SDoug Rabson /* 856dfdcada3SDoug Rabson * Allocate a lock edge from the free list 857dfdcada3SDoug Rabson */ 858dfdcada3SDoug Rabson static struct lockf_edge * 859dfdcada3SDoug Rabson lf_alloc_edge(void) 860dfdcada3SDoug Rabson { 861dfdcada3SDoug Rabson 862dfdcada3SDoug Rabson return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 863dfdcada3SDoug Rabson } 864dfdcada3SDoug Rabson 865dfdcada3SDoug Rabson /* 866dfdcada3SDoug Rabson * Free a lock edge. 867dfdcada3SDoug Rabson */ 868dfdcada3SDoug Rabson static void 869dfdcada3SDoug Rabson lf_free_edge(struct lockf_edge *e) 870dfdcada3SDoug Rabson { 871dfdcada3SDoug Rabson 872dfdcada3SDoug Rabson free(e, M_LOCKF); 873dfdcada3SDoug Rabson } 874dfdcada3SDoug Rabson 875dfdcada3SDoug Rabson 876dfdcada3SDoug Rabson /* 877dfdcada3SDoug Rabson * Ensure that the lock's owner has a corresponding vertex in the 878dfdcada3SDoug Rabson * owner graph. 879dfdcada3SDoug Rabson */ 880dfdcada3SDoug Rabson static void 881dfdcada3SDoug Rabson lf_alloc_vertex(struct lockf_entry *lock) 882dfdcada3SDoug Rabson { 883dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 884dfdcada3SDoug Rabson 885dfdcada3SDoug Rabson if (!lock->lf_owner->lo_vertex) 886dfdcada3SDoug Rabson lock->lf_owner->lo_vertex = 887dfdcada3SDoug Rabson graph_alloc_vertex(g, lock->lf_owner); 888dfdcada3SDoug Rabson } 889dfdcada3SDoug Rabson 890dfdcada3SDoug Rabson /* 891dfdcada3SDoug Rabson * Attempt to record an edge from lock x to lock y. Return EDEADLK if 892dfdcada3SDoug Rabson * the new edge would cause a cycle in the owner graph. 893dfdcada3SDoug Rabson */ 894dfdcada3SDoug Rabson static int 895dfdcada3SDoug Rabson lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 896dfdcada3SDoug Rabson { 897dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 898dfdcada3SDoug Rabson struct lockf_edge *e; 899dfdcada3SDoug Rabson int error; 900dfdcada3SDoug Rabson 901826b3d31SAndriy Gapon #ifdef DIAGNOSTIC 902dfdcada3SDoug Rabson LIST_FOREACH(e, &x->lf_outedges, le_outlink) 903dfdcada3SDoug Rabson KASSERT(e->le_to != y, ("adding lock edge twice")); 904dfdcada3SDoug Rabson #endif 905dfdcada3SDoug Rabson 906dfdcada3SDoug Rabson /* 907dfdcada3SDoug Rabson * Make sure the two owners have entries in the owner graph. 908dfdcada3SDoug Rabson */ 909dfdcada3SDoug Rabson lf_alloc_vertex(x); 910dfdcada3SDoug Rabson lf_alloc_vertex(y); 911dfdcada3SDoug Rabson 912dfdcada3SDoug Rabson error = graph_add_edge(g, x->lf_owner->lo_vertex, 913dfdcada3SDoug Rabson y->lf_owner->lo_vertex); 914dfdcada3SDoug Rabson if (error) 915dfdcada3SDoug Rabson return (error); 916dfdcada3SDoug Rabson 917dfdcada3SDoug Rabson e = lf_alloc_edge(); 918dfdcada3SDoug Rabson LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 919dfdcada3SDoug Rabson LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 920dfdcada3SDoug Rabson e->le_from = x; 921dfdcada3SDoug Rabson e->le_to = y; 922dfdcada3SDoug Rabson 923dfdcada3SDoug Rabson return (0); 924dfdcada3SDoug Rabson } 925dfdcada3SDoug Rabson 926dfdcada3SDoug Rabson /* 927dfdcada3SDoug Rabson * Remove an edge from the lock graph. 928dfdcada3SDoug Rabson */ 929dfdcada3SDoug Rabson static void 930dfdcada3SDoug Rabson lf_remove_edge(struct lockf_edge *e) 931dfdcada3SDoug Rabson { 932dfdcada3SDoug Rabson struct owner_graph *g = &lf_owner_graph; 933dfdcada3SDoug Rabson struct lockf_entry *x = e->le_from; 934dfdcada3SDoug Rabson struct lockf_entry *y = e->le_to; 935dfdcada3SDoug Rabson 936dfdcada3SDoug Rabson graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 937dfdcada3SDoug Rabson LIST_REMOVE(e, le_outlink); 938dfdcada3SDoug Rabson LIST_REMOVE(e, le_inlink); 939dfdcada3SDoug Rabson e->le_from = NULL; 940dfdcada3SDoug Rabson e->le_to = NULL; 941dfdcada3SDoug Rabson lf_free_edge(e); 942dfdcada3SDoug Rabson } 943dfdcada3SDoug Rabson 944dfdcada3SDoug Rabson /* 945dfdcada3SDoug Rabson * Remove all out-going edges from lock x. 946dfdcada3SDoug Rabson */ 947dfdcada3SDoug Rabson static void 948dfdcada3SDoug Rabson lf_remove_outgoing(struct lockf_entry *x) 949dfdcada3SDoug Rabson { 950dfdcada3SDoug Rabson struct lockf_edge *e; 951dfdcada3SDoug Rabson 952dfdcada3SDoug Rabson while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 953dfdcada3SDoug Rabson lf_remove_edge(e); 954dfdcada3SDoug Rabson } 955dfdcada3SDoug Rabson } 956dfdcada3SDoug Rabson 957dfdcada3SDoug Rabson /* 958dfdcada3SDoug Rabson * Remove all in-coming edges from lock x. 959dfdcada3SDoug Rabson */ 960dfdcada3SDoug Rabson static void 961dfdcada3SDoug Rabson lf_remove_incoming(struct lockf_entry *x) 962dfdcada3SDoug Rabson { 963dfdcada3SDoug Rabson struct lockf_edge *e; 964dfdcada3SDoug Rabson 965dfdcada3SDoug Rabson while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 966dfdcada3SDoug Rabson lf_remove_edge(e); 967dfdcada3SDoug Rabson } 968dfdcada3SDoug Rabson } 969dfdcada3SDoug Rabson 970dfdcada3SDoug Rabson /* 971dfdcada3SDoug Rabson * Walk the list of locks for the file and create an out-going edge 972dfdcada3SDoug Rabson * from lock to each blocking lock. 973dfdcada3SDoug Rabson */ 974dfdcada3SDoug Rabson static int 975dfdcada3SDoug Rabson lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 976dfdcada3SDoug Rabson { 977dfdcada3SDoug Rabson struct lockf_entry *overlap; 978dfdcada3SDoug Rabson int error; 979dfdcada3SDoug Rabson 980dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_active, lf_link) { 981dfdcada3SDoug Rabson /* 982dfdcada3SDoug Rabson * We may assume that the active list is sorted by 983dfdcada3SDoug Rabson * lf_start. 984dfdcada3SDoug Rabson */ 985dfdcada3SDoug Rabson if (overlap->lf_start > lock->lf_end) 986dfdcada3SDoug Rabson break; 987dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 988dfdcada3SDoug Rabson continue; 989dfdcada3SDoug Rabson 990dfdcada3SDoug Rabson /* 991dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 992dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 993dfdcada3SDoug Rabson * deadlock. 994dfdcada3SDoug Rabson */ 995dfdcada3SDoug Rabson error = lf_add_edge(lock, overlap); 996dfdcada3SDoug Rabson 997dfdcada3SDoug Rabson /* 998dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 999dfdcada3SDoug Rabson * Remove any edges we added and return the error. 1000dfdcada3SDoug Rabson */ 1001dfdcada3SDoug Rabson if (error) { 1002dfdcada3SDoug Rabson lf_remove_outgoing(lock); 1003dfdcada3SDoug Rabson return (error); 1004dfdcada3SDoug Rabson } 1005dfdcada3SDoug Rabson } 1006dfdcada3SDoug Rabson 1007dfdcada3SDoug Rabson /* 1008dfdcada3SDoug Rabson * We also need to add edges to sleeping locks that block 1009dfdcada3SDoug Rabson * us. This ensures that lf_wakeup_lock cannot grant two 1010dfdcada3SDoug Rabson * mutually blocking locks simultaneously and also enforces a 1011dfdcada3SDoug Rabson * 'first come, first served' fairness model. Note that this 1012dfdcada3SDoug Rabson * only happens if we are blocked by at least one active lock 1013dfdcada3SDoug Rabson * due to the call to lf_getblock in lf_setlock below. 1014dfdcada3SDoug Rabson */ 1015dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1016dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 1017dfdcada3SDoug Rabson continue; 1018dfdcada3SDoug Rabson /* 1019dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 1020dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 1021dfdcada3SDoug Rabson * deadlock. 1022dfdcada3SDoug Rabson */ 1023dfdcada3SDoug Rabson error = lf_add_edge(lock, overlap); 1024dfdcada3SDoug Rabson 1025dfdcada3SDoug Rabson /* 1026dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 1027dfdcada3SDoug Rabson * Remove any edges we added and return the error. 1028dfdcada3SDoug Rabson */ 1029dfdcada3SDoug Rabson if (error) { 1030dfdcada3SDoug Rabson lf_remove_outgoing(lock); 1031dfdcada3SDoug Rabson return (error); 1032dfdcada3SDoug Rabson } 1033dfdcada3SDoug Rabson } 1034dfdcada3SDoug Rabson 1035dfdcada3SDoug Rabson return (0); 1036dfdcada3SDoug Rabson } 1037dfdcada3SDoug Rabson 1038dfdcada3SDoug Rabson /* 1039dfdcada3SDoug Rabson * Walk the list of pending locks for the file and create an in-coming 1040dfdcada3SDoug Rabson * edge from lock to each blocking lock. 1041dfdcada3SDoug Rabson */ 1042dfdcada3SDoug Rabson static int 1043dfdcada3SDoug Rabson lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1044dfdcada3SDoug Rabson { 1045dfdcada3SDoug Rabson struct lockf_entry *overlap; 1046dfdcada3SDoug Rabson int error; 1047dfdcada3SDoug Rabson 1048dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1049dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 1050dfdcada3SDoug Rabson continue; 1051dfdcada3SDoug Rabson 1052dfdcada3SDoug Rabson /* 1053dfdcada3SDoug Rabson * We've found a blocking lock. Add the corresponding 1054dfdcada3SDoug Rabson * edge to the graphs and see if it would cause a 1055dfdcada3SDoug Rabson * deadlock. 1056dfdcada3SDoug Rabson */ 1057dfdcada3SDoug Rabson error = lf_add_edge(overlap, lock); 1058dfdcada3SDoug Rabson 1059dfdcada3SDoug Rabson /* 1060dfdcada3SDoug Rabson * The only error that lf_add_edge returns is EDEADLK. 1061dfdcada3SDoug Rabson * Remove any edges we added and return the error. 1062dfdcada3SDoug Rabson */ 1063dfdcada3SDoug Rabson if (error) { 1064dfdcada3SDoug Rabson lf_remove_incoming(lock); 1065dfdcada3SDoug Rabson return (error); 1066dfdcada3SDoug Rabson } 1067dfdcada3SDoug Rabson } 1068dfdcada3SDoug Rabson return (0); 1069dfdcada3SDoug Rabson } 1070dfdcada3SDoug Rabson 1071dfdcada3SDoug Rabson /* 1072dfdcada3SDoug Rabson * Insert lock into the active list, keeping list entries ordered by 1073dfdcada3SDoug Rabson * increasing values of lf_start. 1074dfdcada3SDoug Rabson */ 1075dfdcada3SDoug Rabson static void 1076dfdcada3SDoug Rabson lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1077dfdcada3SDoug Rabson { 1078dfdcada3SDoug Rabson struct lockf_entry *lf, *lfprev; 1079dfdcada3SDoug Rabson 1080dfdcada3SDoug Rabson if (LIST_EMPTY(&state->ls_active)) { 1081dfdcada3SDoug Rabson LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1082dfdcada3SDoug Rabson return; 1083dfdcada3SDoug Rabson } 1084dfdcada3SDoug Rabson 1085dfdcada3SDoug Rabson lfprev = NULL; 1086dfdcada3SDoug Rabson LIST_FOREACH(lf, &state->ls_active, lf_link) { 1087dfdcada3SDoug Rabson if (lf->lf_start > lock->lf_start) { 1088dfdcada3SDoug Rabson LIST_INSERT_BEFORE(lf, lock, lf_link); 1089dfdcada3SDoug Rabson return; 1090dfdcada3SDoug Rabson } 1091dfdcada3SDoug Rabson lfprev = lf; 1092dfdcada3SDoug Rabson } 1093dfdcada3SDoug Rabson LIST_INSERT_AFTER(lfprev, lock, lf_link); 1094dfdcada3SDoug Rabson } 1095dfdcada3SDoug Rabson 1096dfdcada3SDoug Rabson /* 1097dfdcada3SDoug Rabson * Wake up a sleeping lock and remove it from the pending list now 1098e3043798SPedro F. Giffuni * that all its dependencies have been resolved. The caller should 1099dfdcada3SDoug Rabson * arrange for the lock to be added to the active list, adjusting any 1100dfdcada3SDoug Rabson * existing locks for the same owner as needed. 1101dfdcada3SDoug Rabson */ 1102dfdcada3SDoug Rabson static void 1103dfdcada3SDoug Rabson lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1104dfdcada3SDoug Rabson { 1105dfdcada3SDoug Rabson 1106dfdcada3SDoug Rabson /* 1107dfdcada3SDoug Rabson * Remove from ls_pending list and wake up the caller 1108dfdcada3SDoug Rabson * or start the async notification, as appropriate. 1109dfdcada3SDoug Rabson */ 1110dfdcada3SDoug Rabson LIST_REMOVE(wakelock, lf_link); 1111dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1112dfdcada3SDoug Rabson if (lockf_debug & 1) 1113dfdcada3SDoug Rabson lf_print("lf_wakeup_lock: awakening", wakelock); 1114dfdcada3SDoug Rabson #endif /* LOCKF_DEBUG */ 1115dfdcada3SDoug Rabson if (wakelock->lf_async_task) { 1116dfdcada3SDoug Rabson taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1117dfdcada3SDoug Rabson } else { 1118dfdcada3SDoug Rabson wakeup(wakelock); 1119dfdcada3SDoug Rabson } 1120dfdcada3SDoug Rabson } 1121dfdcada3SDoug Rabson 1122dfdcada3SDoug Rabson /* 1123e3043798SPedro F. Giffuni * Re-check all dependent locks and remove edges to locks that we no 1124dfdcada3SDoug Rabson * longer block. If 'all' is non-zero, the lock has been removed and 1125e3043798SPedro F. Giffuni * we must remove all the dependencies, otherwise it has simply been 1126dfdcada3SDoug Rabson * reduced but remains active. Any pending locks which have been been 1127dfdcada3SDoug Rabson * unblocked are added to 'granted' 1128dfdcada3SDoug Rabson */ 1129dfdcada3SDoug Rabson static void 1130dfdcada3SDoug Rabson lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1131dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1132dfdcada3SDoug Rabson { 1133dfdcada3SDoug Rabson struct lockf_edge *e, *ne; 1134dfdcada3SDoug Rabson struct lockf_entry *deplock; 1135dfdcada3SDoug Rabson 1136dfdcada3SDoug Rabson LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1137dfdcada3SDoug Rabson deplock = e->le_from; 1138dfdcada3SDoug Rabson if (all || !lf_blocks(lock, deplock)) { 1139dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1140dfdcada3SDoug Rabson lf_remove_edge(e); 1141dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1142dfdcada3SDoug Rabson if (LIST_EMPTY(&deplock->lf_outedges)) { 1143dfdcada3SDoug Rabson lf_wakeup_lock(state, deplock); 1144dfdcada3SDoug Rabson LIST_INSERT_HEAD(granted, deplock, lf_link); 1145dfdcada3SDoug Rabson } 1146dfdcada3SDoug Rabson } 1147dfdcada3SDoug Rabson } 1148dfdcada3SDoug Rabson } 1149dfdcada3SDoug Rabson 1150dfdcada3SDoug Rabson /* 1151e3043798SPedro F. Giffuni * Set the start of an existing active lock, updating dependencies and 1152dfdcada3SDoug Rabson * adding any newly woken locks to 'granted'. 1153dfdcada3SDoug Rabson */ 1154dfdcada3SDoug Rabson static void 1155dfdcada3SDoug Rabson lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1156dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1157dfdcada3SDoug Rabson { 1158dfdcada3SDoug Rabson 1159dfdcada3SDoug Rabson KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1160dfdcada3SDoug Rabson lock->lf_start = new_start; 1161dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1162dfdcada3SDoug Rabson lf_insert_lock(state, lock); 1163dfdcada3SDoug Rabson lf_update_dependancies(state, lock, FALSE, granted); 1164dfdcada3SDoug Rabson } 1165dfdcada3SDoug Rabson 1166dfdcada3SDoug Rabson /* 1167e3043798SPedro F. Giffuni * Set the end of an existing active lock, updating dependencies and 1168dfdcada3SDoug Rabson * adding any newly woken locks to 'granted'. 1169dfdcada3SDoug Rabson */ 1170dfdcada3SDoug Rabson static void 1171dfdcada3SDoug Rabson lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1172dfdcada3SDoug Rabson struct lockf_entry_list *granted) 1173dfdcada3SDoug Rabson { 1174dfdcada3SDoug Rabson 1175dfdcada3SDoug Rabson KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1176dfdcada3SDoug Rabson lock->lf_end = new_end; 1177dfdcada3SDoug Rabson lf_update_dependancies(state, lock, FALSE, granted); 1178dfdcada3SDoug Rabson } 1179dfdcada3SDoug Rabson 1180dfdcada3SDoug Rabson /* 1181dfdcada3SDoug Rabson * Add a lock to the active list, updating or removing any current 1182dfdcada3SDoug Rabson * locks owned by the same owner and processing any pending locks that 1183dfdcada3SDoug Rabson * become unblocked as a result. This code is also used for unlock 1184dfdcada3SDoug Rabson * since the logic for updating existing locks is identical. 1185dfdcada3SDoug Rabson * 1186dfdcada3SDoug Rabson * As a result of processing the new lock, we may unblock existing 1187dfdcada3SDoug Rabson * pending locks as a result of downgrading/unlocking. We simply 1188dfdcada3SDoug Rabson * activate the newly granted locks by looping. 1189dfdcada3SDoug Rabson * 1190e3043798SPedro F. Giffuni * Since the new lock already has its dependencies set up, we always 1191dfdcada3SDoug Rabson * add it to the list (unless its an unlock request). This may 1192dfdcada3SDoug Rabson * fragment the lock list in some pathological cases but its probably 1193dfdcada3SDoug Rabson * not a real problem. 1194dfdcada3SDoug Rabson */ 1195dfdcada3SDoug Rabson static void 1196dfdcada3SDoug Rabson lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1197dfdcada3SDoug Rabson { 1198dfdcada3SDoug Rabson struct lockf_entry *overlap, *lf; 1199dfdcada3SDoug Rabson struct lockf_entry_list granted; 1200dfdcada3SDoug Rabson int ovcase; 1201dfdcada3SDoug Rabson 1202dfdcada3SDoug Rabson LIST_INIT(&granted); 1203dfdcada3SDoug Rabson LIST_INSERT_HEAD(&granted, lock, lf_link); 1204dfdcada3SDoug Rabson 1205dfdcada3SDoug Rabson while (!LIST_EMPTY(&granted)) { 1206dfdcada3SDoug Rabson lock = LIST_FIRST(&granted); 1207dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1208dfdcada3SDoug Rabson 1209dfdcada3SDoug Rabson /* 1210dfdcada3SDoug Rabson * Skip over locks owned by other processes. Handle 1211dfdcada3SDoug Rabson * any locks that overlap and are owned by ourselves. 1212dfdcada3SDoug Rabson */ 1213dfdcada3SDoug Rabson overlap = LIST_FIRST(&state->ls_active); 1214dfdcada3SDoug Rabson for (;;) { 1215dfdcada3SDoug Rabson ovcase = lf_findoverlap(&overlap, lock, SELF); 1216dfdcada3SDoug Rabson 1217dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1218dfdcada3SDoug Rabson if (ovcase && (lockf_debug & 2)) { 1219dfdcada3SDoug Rabson printf("lf_setlock: overlap %d", ovcase); 1220dfdcada3SDoug Rabson lf_print("", overlap); 1221dfdcada3SDoug Rabson } 1222dfdcada3SDoug Rabson #endif 1223dfdcada3SDoug Rabson /* 1224dfdcada3SDoug Rabson * Six cases: 1225dfdcada3SDoug Rabson * 0) no overlap 1226dfdcada3SDoug Rabson * 1) overlap == lock 1227dfdcada3SDoug Rabson * 2) overlap contains lock 1228dfdcada3SDoug Rabson * 3) lock contains overlap 1229dfdcada3SDoug Rabson * 4) overlap starts before lock 1230dfdcada3SDoug Rabson * 5) overlap ends after lock 1231dfdcada3SDoug Rabson */ 1232dfdcada3SDoug Rabson switch (ovcase) { 1233dfdcada3SDoug Rabson case 0: /* no overlap */ 1234dfdcada3SDoug Rabson break; 1235dfdcada3SDoug Rabson 1236dfdcada3SDoug Rabson case 1: /* overlap == lock */ 1237dfdcada3SDoug Rabson /* 1238dfdcada3SDoug Rabson * We have already setup the 1239dfdcada3SDoug Rabson * dependants for the new lock, taking 1240dfdcada3SDoug Rabson * into account a possible downgrade 1241dfdcada3SDoug Rabson * or unlock. Remove the old lock. 1242dfdcada3SDoug Rabson */ 1243dfdcada3SDoug Rabson LIST_REMOVE(overlap, lf_link); 1244dfdcada3SDoug Rabson lf_update_dependancies(state, overlap, TRUE, 1245dfdcada3SDoug Rabson &granted); 1246dfdcada3SDoug Rabson lf_free_lock(overlap); 1247dfdcada3SDoug Rabson break; 1248dfdcada3SDoug Rabson 1249dfdcada3SDoug Rabson case 2: /* overlap contains lock */ 1250dfdcada3SDoug Rabson /* 1251dfdcada3SDoug Rabson * Just split the existing lock. 1252dfdcada3SDoug Rabson */ 1253dfdcada3SDoug Rabson lf_split(state, overlap, lock, &granted); 1254dfdcada3SDoug Rabson break; 1255dfdcada3SDoug Rabson 1256dfdcada3SDoug Rabson case 3: /* lock contains overlap */ 1257dfdcada3SDoug Rabson /* 1258dfdcada3SDoug Rabson * Delete the overlap and advance to 1259dfdcada3SDoug Rabson * the next entry in the list. 1260dfdcada3SDoug Rabson */ 1261dfdcada3SDoug Rabson lf = LIST_NEXT(overlap, lf_link); 1262dfdcada3SDoug Rabson LIST_REMOVE(overlap, lf_link); 1263dfdcada3SDoug Rabson lf_update_dependancies(state, overlap, TRUE, 1264dfdcada3SDoug Rabson &granted); 1265dfdcada3SDoug Rabson lf_free_lock(overlap); 1266dfdcada3SDoug Rabson overlap = lf; 1267dfdcada3SDoug Rabson continue; 1268dfdcada3SDoug Rabson 1269dfdcada3SDoug Rabson case 4: /* overlap starts before lock */ 1270dfdcada3SDoug Rabson /* 1271dfdcada3SDoug Rabson * Just update the overlap end and 1272dfdcada3SDoug Rabson * move on. 1273dfdcada3SDoug Rabson */ 1274dfdcada3SDoug Rabson lf_set_end(state, overlap, lock->lf_start - 1, 1275dfdcada3SDoug Rabson &granted); 1276dfdcada3SDoug Rabson overlap = LIST_NEXT(overlap, lf_link); 1277dfdcada3SDoug Rabson continue; 1278dfdcada3SDoug Rabson 1279dfdcada3SDoug Rabson case 5: /* overlap ends after lock */ 1280dfdcada3SDoug Rabson /* 1281dfdcada3SDoug Rabson * Change the start of overlap and 1282dfdcada3SDoug Rabson * re-insert. 1283dfdcada3SDoug Rabson */ 1284dfdcada3SDoug Rabson lf_set_start(state, overlap, lock->lf_end + 1, 1285dfdcada3SDoug Rabson &granted); 1286dfdcada3SDoug Rabson break; 1287dfdcada3SDoug Rabson } 1288dfdcada3SDoug Rabson break; 1289dfdcada3SDoug Rabson } 1290dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1291dfdcada3SDoug Rabson if (lockf_debug & 1) { 1292dfdcada3SDoug Rabson if (lock->lf_type != F_UNLCK) 1293dfdcada3SDoug Rabson lf_print("lf_activate_lock: activated", lock); 1294dfdcada3SDoug Rabson else 1295dfdcada3SDoug Rabson lf_print("lf_activate_lock: unlocked", lock); 1296dfdcada3SDoug Rabson lf_printlist("lf_activate_lock", lock); 1297dfdcada3SDoug Rabson } 1298dfdcada3SDoug Rabson #endif /* LOCKF_DEBUG */ 1299dfdcada3SDoug Rabson if (lock->lf_type != F_UNLCK) 1300dfdcada3SDoug Rabson lf_insert_lock(state, lock); 1301dfdcada3SDoug Rabson } 1302dfdcada3SDoug Rabson } 1303dfdcada3SDoug Rabson 1304dfdcada3SDoug Rabson /* 1305dfdcada3SDoug Rabson * Cancel a pending lock request, either as a result of a signal or a 1306dfdcada3SDoug Rabson * cancel request for an async lock. 1307dfdcada3SDoug Rabson */ 1308dfdcada3SDoug Rabson static void 1309dfdcada3SDoug Rabson lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1310dfdcada3SDoug Rabson { 1311dfdcada3SDoug Rabson struct lockf_entry_list granted; 1312dfdcada3SDoug Rabson 1313dfdcada3SDoug Rabson /* 1314dfdcada3SDoug Rabson * Note it is theoretically possible that cancelling this lock 1315dfdcada3SDoug Rabson * may allow some other pending lock to become 1316dfdcada3SDoug Rabson * active. Consider this case: 1317dfdcada3SDoug Rabson * 1318e3043798SPedro F. Giffuni * Owner Action Result Dependencies 1319dfdcada3SDoug Rabson * 1320dfdcada3SDoug Rabson * A: lock [0..0] succeeds 1321dfdcada3SDoug Rabson * B: lock [2..2] succeeds 1322dfdcada3SDoug Rabson * C: lock [1..2] blocked C->B 1323dfdcada3SDoug Rabson * D: lock [0..1] blocked C->B,D->A,D->C 1324dfdcada3SDoug Rabson * A: unlock [0..0] C->B,D->C 1325dfdcada3SDoug Rabson * C: cancel [1..2] 1326dfdcada3SDoug Rabson */ 1327dfdcada3SDoug Rabson 1328dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1329dfdcada3SDoug Rabson 1330dfdcada3SDoug Rabson /* 1331dfdcada3SDoug Rabson * Removing out-going edges is simple. 1332dfdcada3SDoug Rabson */ 1333dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1334dfdcada3SDoug Rabson lf_remove_outgoing(lock); 1335dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1336dfdcada3SDoug Rabson 1337dfdcada3SDoug Rabson /* 1338dfdcada3SDoug Rabson * Removing in-coming edges may allow some other lock to 1339dfdcada3SDoug Rabson * become active - we use lf_update_dependancies to figure 1340dfdcada3SDoug Rabson * this out. 1341dfdcada3SDoug Rabson */ 1342dfdcada3SDoug Rabson LIST_INIT(&granted); 1343dfdcada3SDoug Rabson lf_update_dependancies(state, lock, TRUE, &granted); 1344dfdcada3SDoug Rabson lf_free_lock(lock); 1345dfdcada3SDoug Rabson 1346dfdcada3SDoug Rabson /* 1347dfdcada3SDoug Rabson * Feed any newly active locks to lf_activate_lock. 1348dfdcada3SDoug Rabson */ 1349dfdcada3SDoug Rabson while (!LIST_EMPTY(&granted)) { 1350dfdcada3SDoug Rabson lock = LIST_FIRST(&granted); 1351dfdcada3SDoug Rabson LIST_REMOVE(lock, lf_link); 1352dfdcada3SDoug Rabson lf_activate_lock(state, lock); 1353dfdcada3SDoug Rabson } 1354dfdcada3SDoug Rabson } 1355dfdcada3SDoug Rabson 135692dc7331SDavid Greenman /* 135792dc7331SDavid Greenman * Set a byte-range lock. 135892dc7331SDavid Greenman */ 135987b6de2bSPoul-Henning Kamp static int 1360dfdcada3SDoug Rabson lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1361dfdcada3SDoug Rabson void **cookiep) 136292dc7331SDavid Greenman { 136392dc7331SDavid Greenman static char lockstr[] = "lockf"; 1364883a5a4aSKonstantin Belousov int error, priority, stops_deferred; 136592dc7331SDavid Greenman 136692dc7331SDavid Greenman #ifdef LOCKF_DEBUG 136792dc7331SDavid Greenman if (lockf_debug & 1) 136892dc7331SDavid Greenman lf_print("lf_setlock", lock); 136992dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 137092dc7331SDavid Greenman 137192dc7331SDavid Greenman /* 137292dc7331SDavid Greenman * Set the priority 137392dc7331SDavid Greenman */ 137492dc7331SDavid Greenman priority = PLOCK; 137592dc7331SDavid Greenman if (lock->lf_type == F_WRLCK) 137692dc7331SDavid Greenman priority += 4; 1377c675522fSDoug Rabson if (!(lock->lf_flags & F_NOINTR)) 137892dc7331SDavid Greenman priority |= PCATCH; 137992dc7331SDavid Greenman /* 138092dc7331SDavid Greenman * Scan lock list for this file looking for locks that would block us. 138192dc7331SDavid Greenman */ 13828aec91b5SKonstantin Belousov if (lf_getblock(state, lock)) { 138392dc7331SDavid Greenman /* 138492dc7331SDavid Greenman * Free the structure and return if nonblocking. 138592dc7331SDavid Greenman */ 1386dfdcada3SDoug Rabson if ((lock->lf_flags & F_WAIT) == 0 1387dfdcada3SDoug Rabson && lock->lf_async_task == NULL) { 1388dfdcada3SDoug Rabson lf_free_lock(lock); 1389dfdcada3SDoug Rabson error = EAGAIN; 1390dfdcada3SDoug Rabson goto out; 139192dc7331SDavid Greenman } 139292dc7331SDavid Greenman 1393dfdcada3SDoug Rabson /* 139406c85cefSDoug Rabson * For flock type locks, we must first remove 139506c85cefSDoug Rabson * any shared locks that we hold before we sleep 139606c85cefSDoug Rabson * waiting for an exclusive lock. 139706c85cefSDoug Rabson */ 139806c85cefSDoug Rabson if ((lock->lf_flags & F_FLOCK) && 139906c85cefSDoug Rabson lock->lf_type == F_WRLCK) { 140006c85cefSDoug Rabson lock->lf_type = F_UNLCK; 140106c85cefSDoug Rabson lf_activate_lock(state, lock); 140206c85cefSDoug Rabson lock->lf_type = F_WRLCK; 140306c85cefSDoug Rabson } 140406c85cefSDoug Rabson 140506c85cefSDoug Rabson /* 1406dfdcada3SDoug Rabson * We are blocked. Create edges to each blocking lock, 1407dfdcada3SDoug Rabson * checking for deadlock using the owner graph. For 1408dfdcada3SDoug Rabson * simplicity, we run deadlock detection for all 1409dfdcada3SDoug Rabson * locks, posix and otherwise. 1410dfdcada3SDoug Rabson */ 1411dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1412dfdcada3SDoug Rabson error = lf_add_outgoing(state, lock); 1413dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1414dfdcada3SDoug Rabson 1415dfdcada3SDoug Rabson if (error) { 1416dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1417dfdcada3SDoug Rabson if (lockf_debug & 1) 1418dfdcada3SDoug Rabson lf_print("lf_setlock: deadlock", lock); 1419dfdcada3SDoug Rabson #endif 1420dfdcada3SDoug Rabson lf_free_lock(lock); 1421dfdcada3SDoug Rabson goto out; 142292dc7331SDavid Greenman } 1423dfdcada3SDoug Rabson 142492dc7331SDavid Greenman /* 1425dfdcada3SDoug Rabson * We have added edges to everything that blocks 1426dfdcada3SDoug Rabson * us. Sleep until they all go away. 142792dc7331SDavid Greenman */ 1428dfdcada3SDoug Rabson LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 142992dc7331SDavid Greenman #ifdef LOCKF_DEBUG 143092dc7331SDavid Greenman if (lockf_debug & 1) { 1431dfdcada3SDoug Rabson struct lockf_edge *e; 1432dfdcada3SDoug Rabson LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1433dfdcada3SDoug Rabson lf_print("lf_setlock: blocking on", e->le_to); 1434dfdcada3SDoug Rabson lf_printlist("lf_setlock", e->le_to); 1435dfdcada3SDoug Rabson } 143692dc7331SDavid Greenman } 143792dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1438dfdcada3SDoug Rabson 1439dfdcada3SDoug Rabson if ((lock->lf_flags & F_WAIT) == 0) { 1440dfdcada3SDoug Rabson /* 1441dfdcada3SDoug Rabson * The caller requested async notification - 1442dfdcada3SDoug Rabson * this callback happens when the blocking 1443dfdcada3SDoug Rabson * lock is released, allowing the caller to 1444dfdcada3SDoug Rabson * make another attempt to take the lock. 1445dfdcada3SDoug Rabson */ 1446dfdcada3SDoug Rabson *cookiep = (void *) lock; 1447dfdcada3SDoug Rabson error = EINPROGRESS; 1448dfdcada3SDoug Rabson goto out; 1449dfdcada3SDoug Rabson } 1450dfdcada3SDoug Rabson 14518af54d4cSKonstantin Belousov lock->lf_refs++; 1452883a5a4aSKonstantin Belousov stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART); 1453dfdcada3SDoug Rabson error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1454883a5a4aSKonstantin Belousov sigallowstop(stops_deferred); 14558af54d4cSKonstantin Belousov if (lf_free_lock(lock)) { 14563bcc218fSKonstantin Belousov error = EDOOFUS; 14578af54d4cSKonstantin Belousov goto out; 14588af54d4cSKonstantin Belousov } 14598af54d4cSKonstantin Belousov 146092dc7331SDavid Greenman /* 14611168ab08SBruce Evans * We may have been awakened by a signal and/or by a 1462dfdcada3SDoug Rabson * debugger continuing us (in which cases we must 1463dfdcada3SDoug Rabson * remove our lock graph edges) and/or by another 1464dfdcada3SDoug Rabson * process releasing a lock (in which case our edges 1465dfdcada3SDoug Rabson * have already been removed and we have been moved to 1466eab626f1SKonstantin Belousov * the active list). We may also have been woken by 1467eab626f1SKonstantin Belousov * lf_purgelocks which we report to the caller as 1468eab626f1SKonstantin Belousov * EINTR. In that case, lf_purgelocks will have 1469eab626f1SKonstantin Belousov * removed our lock graph edges. 1470dfdcada3SDoug Rabson * 1471dfdcada3SDoug Rabson * Note that it is possible to receive a signal after 1472dfdcada3SDoug Rabson * we were successfully woken (and moved to the active 1473dfdcada3SDoug Rabson * list) but before we resumed execution. In this 1474dfdcada3SDoug Rabson * case, our lf_outedges list will be clear. We 1475dfdcada3SDoug Rabson * pretend there was no error. 1476dfdcada3SDoug Rabson * 1477dfdcada3SDoug Rabson * Note also, if we have been sleeping long enough, we 1478dfdcada3SDoug Rabson * may now have incoming edges from some newer lock 1479dfdcada3SDoug Rabson * which is waiting behind us in the queue. 148092dc7331SDavid Greenman */ 1481eab626f1SKonstantin Belousov if (lock->lf_flags & F_INTR) { 1482eab626f1SKonstantin Belousov error = EINTR; 1483eab626f1SKonstantin Belousov lf_free_lock(lock); 1484eab626f1SKonstantin Belousov goto out; 1485eab626f1SKonstantin Belousov } 1486dfdcada3SDoug Rabson if (LIST_EMPTY(&lock->lf_outedges)) { 1487dfdcada3SDoug Rabson error = 0; 1488dfdcada3SDoug Rabson } else { 1489dfdcada3SDoug Rabson lf_cancel_lock(state, lock); 1490dfdcada3SDoug Rabson goto out; 14911168ab08SBruce Evans } 1492dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1493dfdcada3SDoug Rabson if (lockf_debug & 1) { 1494dfdcada3SDoug Rabson lf_print("lf_setlock: granted", lock); 1495dfdcada3SDoug Rabson } 1496dfdcada3SDoug Rabson #endif 1497dfdcada3SDoug Rabson goto out; 1498dfdcada3SDoug Rabson } 1499dfdcada3SDoug Rabson /* 1500dfdcada3SDoug Rabson * It looks like we are going to grant the lock. First add 1501dfdcada3SDoug Rabson * edges from any currently pending lock that the new lock 1502dfdcada3SDoug Rabson * would block. 1503dfdcada3SDoug Rabson */ 1504dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1505dfdcada3SDoug Rabson error = lf_add_incoming(state, lock); 1506dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 15071168ab08SBruce Evans if (error) { 1508dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 1509dfdcada3SDoug Rabson if (lockf_debug & 1) 1510dfdcada3SDoug Rabson lf_print("lf_setlock: deadlock", lock); 1511dfdcada3SDoug Rabson #endif 1512dfdcada3SDoug Rabson lf_free_lock(lock); 1513dfdcada3SDoug Rabson goto out; 151492dc7331SDavid Greenman } 1515dfdcada3SDoug Rabson 151692dc7331SDavid Greenman /* 151792dc7331SDavid Greenman * No blocks!! Add the lock. Note that we will 151892dc7331SDavid Greenman * downgrade or upgrade any overlapping locks this 151992dc7331SDavid Greenman * process already owns. 152092dc7331SDavid Greenman */ 1521dfdcada3SDoug Rabson lf_activate_lock(state, lock); 1522dfdcada3SDoug Rabson error = 0; 1523dfdcada3SDoug Rabson out: 1524dfdcada3SDoug Rabson return (error); 152592dc7331SDavid Greenman } 152692dc7331SDavid Greenman 152792dc7331SDavid Greenman /* 152892dc7331SDavid Greenman * Remove a byte-range lock on an inode. 152992dc7331SDavid Greenman * 153092dc7331SDavid Greenman * Generally, find the lock (or an overlap to that lock) 153192dc7331SDavid Greenman * and remove it (or shrink it), then wakeup anyone we can. 153292dc7331SDavid Greenman */ 153387b6de2bSPoul-Henning Kamp static int 1534dfdcada3SDoug Rabson lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 153592dc7331SDavid Greenman { 1536dfdcada3SDoug Rabson struct lockf_entry *overlap; 153792dc7331SDavid Greenman 1538dfdcada3SDoug Rabson overlap = LIST_FIRST(&state->ls_active); 1539dfdcada3SDoug Rabson 1540dfdcada3SDoug Rabson if (overlap == NOLOCKF) 154192dc7331SDavid Greenman return (0); 154292dc7331SDavid Greenman #ifdef LOCKF_DEBUG 154392dc7331SDavid Greenman if (unlock->lf_type != F_UNLCK) 154492dc7331SDavid Greenman panic("lf_clearlock: bad type"); 154592dc7331SDavid Greenman if (lockf_debug & 1) 154692dc7331SDavid Greenman lf_print("lf_clearlock", unlock); 154792dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 154892dc7331SDavid Greenman 1549dfdcada3SDoug Rabson lf_activate_lock(state, unlock); 155092dc7331SDavid Greenman 155192dc7331SDavid Greenman return (0); 155292dc7331SDavid Greenman } 155392dc7331SDavid Greenman 155492dc7331SDavid Greenman /* 1555dfdcada3SDoug Rabson * Check whether there is a blocking lock, and if so return its 1556dfdcada3SDoug Rabson * details in '*fl'. 155792dc7331SDavid Greenman */ 155887b6de2bSPoul-Henning Kamp static int 1559dfdcada3SDoug Rabson lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 156092dc7331SDavid Greenman { 1561dfdcada3SDoug Rabson struct lockf_entry *block; 156292dc7331SDavid Greenman 156392dc7331SDavid Greenman #ifdef LOCKF_DEBUG 156492dc7331SDavid Greenman if (lockf_debug & 1) 156592dc7331SDavid Greenman lf_print("lf_getlock", lock); 156692dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 156792dc7331SDavid Greenman 1568dfdcada3SDoug Rabson if ((block = lf_getblock(state, lock))) { 156992dc7331SDavid Greenman fl->l_type = block->lf_type; 157092dc7331SDavid Greenman fl->l_whence = SEEK_SET; 157192dc7331SDavid Greenman fl->l_start = block->lf_start; 1572dfdcada3SDoug Rabson if (block->lf_end == OFF_MAX) 157392dc7331SDavid Greenman fl->l_len = 0; 157492dc7331SDavid Greenman else 157592dc7331SDavid Greenman fl->l_len = block->lf_end - block->lf_start + 1; 1576dfdcada3SDoug Rabson fl->l_pid = block->lf_owner->lo_pid; 1577dfdcada3SDoug Rabson fl->l_sysid = block->lf_owner->lo_sysid; 157892dc7331SDavid Greenman } else { 157992dc7331SDavid Greenman fl->l_type = F_UNLCK; 158092dc7331SDavid Greenman } 158192dc7331SDavid Greenman return (0); 158292dc7331SDavid Greenman } 158392dc7331SDavid Greenman 158492dc7331SDavid Greenman /* 1585dfdcada3SDoug Rabson * Cancel an async lock request. 1586dfdcada3SDoug Rabson */ 1587dfdcada3SDoug Rabson static int 1588dfdcada3SDoug Rabson lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1589dfdcada3SDoug Rabson { 1590dfdcada3SDoug Rabson struct lockf_entry *reallock; 1591dfdcada3SDoug Rabson 1592dfdcada3SDoug Rabson /* 1593dfdcada3SDoug Rabson * We need to match this request with an existing lock 1594dfdcada3SDoug Rabson * request. 1595dfdcada3SDoug Rabson */ 1596dfdcada3SDoug Rabson LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1597dfdcada3SDoug Rabson if ((void *) reallock == cookie) { 1598dfdcada3SDoug Rabson /* 1599dfdcada3SDoug Rabson * Double-check that this lock looks right 1600dfdcada3SDoug Rabson * (maybe use a rolling ID for the cancel 1601dfdcada3SDoug Rabson * cookie instead?) 1602dfdcada3SDoug Rabson */ 1603dfdcada3SDoug Rabson if (!(reallock->lf_vnode == lock->lf_vnode 1604dfdcada3SDoug Rabson && reallock->lf_start == lock->lf_start 1605dfdcada3SDoug Rabson && reallock->lf_end == lock->lf_end)) { 1606dfdcada3SDoug Rabson return (ENOENT); 1607dfdcada3SDoug Rabson } 1608dfdcada3SDoug Rabson 1609dfdcada3SDoug Rabson /* 1610dfdcada3SDoug Rabson * Make sure this lock was async and then just 1611dfdcada3SDoug Rabson * remove it from its wait lists. 1612dfdcada3SDoug Rabson */ 1613dfdcada3SDoug Rabson if (!reallock->lf_async_task) { 1614dfdcada3SDoug Rabson return (ENOENT); 1615dfdcada3SDoug Rabson } 1616dfdcada3SDoug Rabson 1617dfdcada3SDoug Rabson /* 1618dfdcada3SDoug Rabson * Note that since any other thread must take 1619dfdcada3SDoug Rabson * state->ls_lock before it can possibly 1620dfdcada3SDoug Rabson * trigger the async callback, we are safe 1621dfdcada3SDoug Rabson * from a race with lf_wakeup_lock, i.e. we 1622dfdcada3SDoug Rabson * can free the lock (actually our caller does 1623dfdcada3SDoug Rabson * this). 1624dfdcada3SDoug Rabson */ 1625dfdcada3SDoug Rabson lf_cancel_lock(state, reallock); 1626dfdcada3SDoug Rabson return (0); 1627dfdcada3SDoug Rabson } 1628dfdcada3SDoug Rabson } 1629dfdcada3SDoug Rabson 1630dfdcada3SDoug Rabson /* 1631dfdcada3SDoug Rabson * We didn't find a matching lock - not much we can do here. 1632dfdcada3SDoug Rabson */ 1633dfdcada3SDoug Rabson return (ENOENT); 1634dfdcada3SDoug Rabson } 1635dfdcada3SDoug Rabson 1636dfdcada3SDoug Rabson /* 163792dc7331SDavid Greenman * Walk the list of locks for an inode and 163892dc7331SDavid Greenman * return the first blocking lock. 163992dc7331SDavid Greenman */ 1640dfdcada3SDoug Rabson static struct lockf_entry * 1641dfdcada3SDoug Rabson lf_getblock(struct lockf *state, struct lockf_entry *lock) 164292dc7331SDavid Greenman { 1643dfdcada3SDoug Rabson struct lockf_entry *overlap; 164492dc7331SDavid Greenman 1645dfdcada3SDoug Rabson LIST_FOREACH(overlap, &state->ls_active, lf_link) { 164692dc7331SDavid Greenman /* 1647dfdcada3SDoug Rabson * We may assume that the active list is sorted by 1648dfdcada3SDoug Rabson * lf_start. 164992dc7331SDavid Greenman */ 1650dfdcada3SDoug Rabson if (overlap->lf_start > lock->lf_end) 1651dfdcada3SDoug Rabson break; 1652dfdcada3SDoug Rabson if (!lf_blocks(lock, overlap)) 1653dfdcada3SDoug Rabson continue; 165492dc7331SDavid Greenman return (overlap); 165592dc7331SDavid Greenman } 165692dc7331SDavid Greenman return (NOLOCKF); 165792dc7331SDavid Greenman } 165892dc7331SDavid Greenman 165992dc7331SDavid Greenman /* 1660dfdcada3SDoug Rabson * Walk the list of locks for an inode to find an overlapping lock (if 1661dfdcada3SDoug Rabson * any) and return a classification of that overlap. 1662dfdcada3SDoug Rabson * 1663dfdcada3SDoug Rabson * Arguments: 1664dfdcada3SDoug Rabson * *overlap The place in the lock list to start looking 1665dfdcada3SDoug Rabson * lock The lock which is being tested 1666dfdcada3SDoug Rabson * type Pass 'SELF' to test only locks with the same 1667dfdcada3SDoug Rabson * owner as lock, or 'OTHER' to test only locks 1668dfdcada3SDoug Rabson * with a different owner 1669dfdcada3SDoug Rabson * 1670dfdcada3SDoug Rabson * Returns one of six values: 1671dfdcada3SDoug Rabson * 0) no overlap 1672dfdcada3SDoug Rabson * 1) overlap == lock 1673dfdcada3SDoug Rabson * 2) overlap contains lock 1674dfdcada3SDoug Rabson * 3) lock contains overlap 1675dfdcada3SDoug Rabson * 4) overlap starts before lock 1676dfdcada3SDoug Rabson * 5) overlap ends after lock 1677dfdcada3SDoug Rabson * 1678dfdcada3SDoug Rabson * If there is an overlapping lock, '*overlap' is set to point at the 1679dfdcada3SDoug Rabson * overlapping lock. 168092dc7331SDavid Greenman * 168192dc7331SDavid Greenman * NOTE: this returns only the FIRST overlapping lock. There 168292dc7331SDavid Greenman * may be more than one. 168392dc7331SDavid Greenman */ 168487b6de2bSPoul-Henning Kamp static int 1685dfdcada3SDoug Rabson lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 168692dc7331SDavid Greenman { 1687dfdcada3SDoug Rabson struct lockf_entry *lf; 168892dc7331SDavid Greenman off_t start, end; 1689dfdcada3SDoug Rabson int res; 169092dc7331SDavid Greenman 1691dfdcada3SDoug Rabson if ((*overlap) == NOLOCKF) { 169292dc7331SDavid Greenman return (0); 1693dfdcada3SDoug Rabson } 169492dc7331SDavid Greenman #ifdef LOCKF_DEBUG 169592dc7331SDavid Greenman if (lockf_debug & 2) 169692dc7331SDavid Greenman lf_print("lf_findoverlap: looking for overlap in", lock); 169792dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 169892dc7331SDavid Greenman start = lock->lf_start; 169992dc7331SDavid Greenman end = lock->lf_end; 1700dfdcada3SDoug Rabson res = 0; 1701dfdcada3SDoug Rabson while (*overlap) { 1702dfdcada3SDoug Rabson lf = *overlap; 1703dfdcada3SDoug Rabson if (lf->lf_start > end) 1704dfdcada3SDoug Rabson break; 1705dfdcada3SDoug Rabson if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1706dfdcada3SDoug Rabson ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1707dfdcada3SDoug Rabson *overlap = LIST_NEXT(lf, lf_link); 170892dc7331SDavid Greenman continue; 170992dc7331SDavid Greenman } 171092dc7331SDavid Greenman #ifdef LOCKF_DEBUG 171192dc7331SDavid Greenman if (lockf_debug & 2) 171292dc7331SDavid Greenman lf_print("\tchecking", lf); 171392dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 171492dc7331SDavid Greenman /* 171592dc7331SDavid Greenman * OK, check for overlap 171692dc7331SDavid Greenman * 171792dc7331SDavid Greenman * Six cases: 171892dc7331SDavid Greenman * 0) no overlap 171992dc7331SDavid Greenman * 1) overlap == lock 172092dc7331SDavid Greenman * 2) overlap contains lock 172192dc7331SDavid Greenman * 3) lock contains overlap 172292dc7331SDavid Greenman * 4) overlap starts before lock 172392dc7331SDavid Greenman * 5) overlap ends after lock 172492dc7331SDavid Greenman */ 1725dfdcada3SDoug Rabson if (start > lf->lf_end) { 172692dc7331SDavid Greenman /* Case 0 */ 172792dc7331SDavid Greenman #ifdef LOCKF_DEBUG 172892dc7331SDavid Greenman if (lockf_debug & 2) 172992dc7331SDavid Greenman printf("no overlap\n"); 173092dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1731dfdcada3SDoug Rabson *overlap = LIST_NEXT(lf, lf_link); 173292dc7331SDavid Greenman continue; 173392dc7331SDavid Greenman } 1734dfdcada3SDoug Rabson if (lf->lf_start == start && lf->lf_end == end) { 173592dc7331SDavid Greenman /* Case 1 */ 173692dc7331SDavid Greenman #ifdef LOCKF_DEBUG 173792dc7331SDavid Greenman if (lockf_debug & 2) 173892dc7331SDavid Greenman printf("overlap == lock\n"); 173992dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1740dfdcada3SDoug Rabson res = 1; 1741dfdcada3SDoug Rabson break; 174292dc7331SDavid Greenman } 1743dfdcada3SDoug Rabson if (lf->lf_start <= start && lf->lf_end >= end) { 174492dc7331SDavid Greenman /* Case 2 */ 174592dc7331SDavid Greenman #ifdef LOCKF_DEBUG 174692dc7331SDavid Greenman if (lockf_debug & 2) 174792dc7331SDavid Greenman printf("overlap contains lock\n"); 174892dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1749dfdcada3SDoug Rabson res = 2; 1750dfdcada3SDoug Rabson break; 175192dc7331SDavid Greenman } 1752dfdcada3SDoug Rabson if (start <= lf->lf_start && end >= lf->lf_end) { 175392dc7331SDavid Greenman /* Case 3 */ 175492dc7331SDavid Greenman #ifdef LOCKF_DEBUG 175592dc7331SDavid Greenman if (lockf_debug & 2) 175692dc7331SDavid Greenman printf("lock contains overlap\n"); 175792dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1758dfdcada3SDoug Rabson res = 3; 1759dfdcada3SDoug Rabson break; 176092dc7331SDavid Greenman } 1761dfdcada3SDoug Rabson if (lf->lf_start < start && lf->lf_end >= start) { 176292dc7331SDavid Greenman /* Case 4 */ 176392dc7331SDavid Greenman #ifdef LOCKF_DEBUG 176492dc7331SDavid Greenman if (lockf_debug & 2) 176592dc7331SDavid Greenman printf("overlap starts before lock\n"); 176692dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1767dfdcada3SDoug Rabson res = 4; 1768dfdcada3SDoug Rabson break; 176992dc7331SDavid Greenman } 1770dfdcada3SDoug Rabson if (lf->lf_start > start && lf->lf_end > end) { 177192dc7331SDavid Greenman /* Case 5 */ 177292dc7331SDavid Greenman #ifdef LOCKF_DEBUG 177392dc7331SDavid Greenman if (lockf_debug & 2) 177492dc7331SDavid Greenman printf("overlap ends after lock\n"); 177592dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 1776dfdcada3SDoug Rabson res = 5; 1777dfdcada3SDoug Rabson break; 177892dc7331SDavid Greenman } 177992dc7331SDavid Greenman panic("lf_findoverlap: default"); 178092dc7331SDavid Greenman } 1781dfdcada3SDoug Rabson return (res); 178292dc7331SDavid Greenman } 178392dc7331SDavid Greenman 178492dc7331SDavid Greenman /* 1785dfdcada3SDoug Rabson * Split an the existing 'lock1', based on the extent of the lock 1786dfdcada3SDoug Rabson * described by 'lock2'. The existing lock should cover 'lock2' 1787dfdcada3SDoug Rabson * entirely. 1788dfdcada3SDoug Rabson * 1789dfdcada3SDoug Rabson * Any pending locks which have been been unblocked are added to 1790dfdcada3SDoug Rabson * 'granted' 179192dc7331SDavid Greenman */ 179287b6de2bSPoul-Henning Kamp static void 1793dfdcada3SDoug Rabson lf_split(struct lockf *state, struct lockf_entry *lock1, 1794dfdcada3SDoug Rabson struct lockf_entry *lock2, struct lockf_entry_list *granted) 179592dc7331SDavid Greenman { 1796dfdcada3SDoug Rabson struct lockf_entry *splitlock; 179792dc7331SDavid Greenman 179892dc7331SDavid Greenman #ifdef LOCKF_DEBUG 179992dc7331SDavid Greenman if (lockf_debug & 2) { 180092dc7331SDavid Greenman lf_print("lf_split", lock1); 180192dc7331SDavid Greenman lf_print("splitting from", lock2); 180292dc7331SDavid Greenman } 180392dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 180492dc7331SDavid Greenman /* 1805dfdcada3SDoug Rabson * Check to see if we don't need to split at all. 180692dc7331SDavid Greenman */ 180792dc7331SDavid Greenman if (lock1->lf_start == lock2->lf_start) { 1808dfdcada3SDoug Rabson lf_set_start(state, lock1, lock2->lf_end + 1, granted); 180992dc7331SDavid Greenman return; 181092dc7331SDavid Greenman } 181192dc7331SDavid Greenman if (lock1->lf_end == lock2->lf_end) { 1812dfdcada3SDoug Rabson lf_set_end(state, lock1, lock2->lf_start - 1, granted); 181392dc7331SDavid Greenman return; 181492dc7331SDavid Greenman } 181592dc7331SDavid Greenman /* 181692dc7331SDavid Greenman * Make a new lock consisting of the last part of 1817dfdcada3SDoug Rabson * the encompassing lock. 181892dc7331SDavid Greenman */ 1819dfdcada3SDoug Rabson splitlock = lf_alloc_lock(lock1->lf_owner); 1820dfdcada3SDoug Rabson memcpy(splitlock, lock1, sizeof *splitlock); 18218af54d4cSKonstantin Belousov splitlock->lf_refs = 1; 1822dfdcada3SDoug Rabson if (splitlock->lf_flags & F_REMOTE) 1823dfdcada3SDoug Rabson vref(splitlock->lf_vnode); 1824dfdcada3SDoug Rabson 1825dfdcada3SDoug Rabson /* 1826dfdcada3SDoug Rabson * This cannot cause a deadlock since any edges we would add 1827dfdcada3SDoug Rabson * to splitlock already exist in lock1. We must be sure to add 1828e3043798SPedro F. Giffuni * necessary dependencies to splitlock before we reduce lock1 1829dfdcada3SDoug Rabson * otherwise we may accidentally grant a pending lock that 1830dfdcada3SDoug Rabson * was blocked by the tail end of lock1. 1831dfdcada3SDoug Rabson */ 183292dc7331SDavid Greenman splitlock->lf_start = lock2->lf_end + 1; 1833dfdcada3SDoug Rabson LIST_INIT(&splitlock->lf_outedges); 1834dfdcada3SDoug Rabson LIST_INIT(&splitlock->lf_inedges); 1835dfdcada3SDoug Rabson sx_xlock(&lf_owner_graph_lock); 1836dfdcada3SDoug Rabson lf_add_incoming(state, splitlock); 1837dfdcada3SDoug Rabson sx_xunlock(&lf_owner_graph_lock); 1838dfdcada3SDoug Rabson 1839dfdcada3SDoug Rabson lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1840dfdcada3SDoug Rabson 184192dc7331SDavid Greenman /* 184292dc7331SDavid Greenman * OK, now link it in 184392dc7331SDavid Greenman */ 1844dfdcada3SDoug Rabson lf_insert_lock(state, splitlock); 1845dfdcada3SDoug Rabson } 1846dfdcada3SDoug Rabson 1847c675522fSDoug Rabson struct lockdesc { 1848c675522fSDoug Rabson STAILQ_ENTRY(lockdesc) link; 1849dfdcada3SDoug Rabson struct vnode *vp; 1850dfdcada3SDoug Rabson struct flock fl; 1851dfdcada3SDoug Rabson }; 1852c675522fSDoug Rabson STAILQ_HEAD(lockdesclist, lockdesc); 1853dfdcada3SDoug Rabson 1854c675522fSDoug Rabson int 1855c675522fSDoug Rabson lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) 1856dfdcada3SDoug Rabson { 1857dfdcada3SDoug Rabson struct lockf *ls; 1858dfdcada3SDoug Rabson struct lockf_entry *lf; 1859c675522fSDoug Rabson struct lockdesc *ldesc; 1860c675522fSDoug Rabson struct lockdesclist locks; 1861c675522fSDoug Rabson int error; 1862dfdcada3SDoug Rabson 1863dfdcada3SDoug Rabson /* 1864dfdcada3SDoug Rabson * In order to keep the locking simple, we iterate over the 1865dfdcada3SDoug Rabson * active lock lists to build a list of locks that need 1866c675522fSDoug Rabson * releasing. We then call the iterator for each one in turn. 1867dfdcada3SDoug Rabson * 1868dfdcada3SDoug Rabson * We take an extra reference to the vnode for the duration to 1869dfdcada3SDoug Rabson * make sure it doesn't go away before we are finished. 1870dfdcada3SDoug Rabson */ 1871dfdcada3SDoug Rabson STAILQ_INIT(&locks); 1872dfdcada3SDoug Rabson sx_xlock(&lf_lock_states_lock); 1873dfdcada3SDoug Rabson LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1874dfdcada3SDoug Rabson sx_xlock(&ls->ls_lock); 1875dfdcada3SDoug Rabson LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1876dfdcada3SDoug Rabson if (lf->lf_owner->lo_sysid != sysid) 1877dfdcada3SDoug Rabson continue; 1878dfdcada3SDoug Rabson 1879c675522fSDoug Rabson ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1880dfdcada3SDoug Rabson M_WAITOK); 1881c675522fSDoug Rabson ldesc->vp = lf->lf_vnode; 1882c675522fSDoug Rabson vref(ldesc->vp); 1883c675522fSDoug Rabson ldesc->fl.l_start = lf->lf_start; 1884dfdcada3SDoug Rabson if (lf->lf_end == OFF_MAX) 1885c675522fSDoug Rabson ldesc->fl.l_len = 0; 1886dfdcada3SDoug Rabson else 1887c675522fSDoug Rabson ldesc->fl.l_len = 1888dfdcada3SDoug Rabson lf->lf_end - lf->lf_start + 1; 1889c675522fSDoug Rabson ldesc->fl.l_whence = SEEK_SET; 1890c675522fSDoug Rabson ldesc->fl.l_type = F_UNLCK; 1891c675522fSDoug Rabson ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1892c675522fSDoug Rabson ldesc->fl.l_sysid = sysid; 1893c675522fSDoug Rabson STAILQ_INSERT_TAIL(&locks, ldesc, link); 1894dfdcada3SDoug Rabson } 1895dfdcada3SDoug Rabson sx_xunlock(&ls->ls_lock); 1896dfdcada3SDoug Rabson } 1897dfdcada3SDoug Rabson sx_xunlock(&lf_lock_states_lock); 1898dfdcada3SDoug Rabson 1899c675522fSDoug Rabson /* 1900c675522fSDoug Rabson * Call the iterator function for each lock in turn. If the 1901c675522fSDoug Rabson * iterator returns an error code, just free the rest of the 1902c675522fSDoug Rabson * lockdesc structures. 1903c675522fSDoug Rabson */ 1904c675522fSDoug Rabson error = 0; 1905c675522fSDoug Rabson while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1906dfdcada3SDoug Rabson STAILQ_REMOVE_HEAD(&locks, link); 1907c675522fSDoug Rabson if (!error) 1908c675522fSDoug Rabson error = fn(ldesc->vp, &ldesc->fl, arg); 1909c675522fSDoug Rabson vrele(ldesc->vp); 1910c675522fSDoug Rabson free(ldesc, M_LOCKF); 1911dfdcada3SDoug Rabson } 1912c675522fSDoug Rabson 1913c675522fSDoug Rabson return (error); 1914c675522fSDoug Rabson } 1915c675522fSDoug Rabson 1916c675522fSDoug Rabson int 1917c675522fSDoug Rabson lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) 1918c675522fSDoug Rabson { 1919c675522fSDoug Rabson struct lockf *ls; 1920c675522fSDoug Rabson struct lockf_entry *lf; 1921c675522fSDoug Rabson struct lockdesc *ldesc; 1922c675522fSDoug Rabson struct lockdesclist locks; 1923c675522fSDoug Rabson int error; 1924c675522fSDoug Rabson 1925c675522fSDoug Rabson /* 1926c675522fSDoug Rabson * In order to keep the locking simple, we iterate over the 1927c675522fSDoug Rabson * active lock lists to build a list of locks that need 1928c675522fSDoug Rabson * releasing. We then call the iterator for each one in turn. 1929c675522fSDoug Rabson * 1930c675522fSDoug Rabson * We take an extra reference to the vnode for the duration to 1931c675522fSDoug Rabson * make sure it doesn't go away before we are finished. 1932c675522fSDoug Rabson */ 1933c675522fSDoug Rabson STAILQ_INIT(&locks); 193428fe6a3fSKonstantin Belousov VI_LOCK(vp); 1935c675522fSDoug Rabson ls = vp->v_lockf; 193628fe6a3fSKonstantin Belousov if (!ls) { 193728fe6a3fSKonstantin Belousov VI_UNLOCK(vp); 1938c675522fSDoug Rabson return (0); 193928fe6a3fSKonstantin Belousov } 194028fe6a3fSKonstantin Belousov ls->ls_threads++; 194128fe6a3fSKonstantin Belousov VI_UNLOCK(vp); 1942c675522fSDoug Rabson 1943c675522fSDoug Rabson sx_xlock(&ls->ls_lock); 1944c675522fSDoug Rabson LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1945c675522fSDoug Rabson ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1946c675522fSDoug Rabson M_WAITOK); 1947c675522fSDoug Rabson ldesc->vp = lf->lf_vnode; 1948c675522fSDoug Rabson vref(ldesc->vp); 1949c675522fSDoug Rabson ldesc->fl.l_start = lf->lf_start; 1950c675522fSDoug Rabson if (lf->lf_end == OFF_MAX) 1951c675522fSDoug Rabson ldesc->fl.l_len = 0; 1952c675522fSDoug Rabson else 1953c675522fSDoug Rabson ldesc->fl.l_len = 1954c675522fSDoug Rabson lf->lf_end - lf->lf_start + 1; 1955c675522fSDoug Rabson ldesc->fl.l_whence = SEEK_SET; 1956c675522fSDoug Rabson ldesc->fl.l_type = F_UNLCK; 1957c675522fSDoug Rabson ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1958c675522fSDoug Rabson ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; 1959c675522fSDoug Rabson STAILQ_INSERT_TAIL(&locks, ldesc, link); 1960c675522fSDoug Rabson } 1961c675522fSDoug Rabson sx_xunlock(&ls->ls_lock); 196228fe6a3fSKonstantin Belousov VI_LOCK(vp); 196328fe6a3fSKonstantin Belousov ls->ls_threads--; 196428fe6a3fSKonstantin Belousov wakeup(ls); 196528fe6a3fSKonstantin Belousov VI_UNLOCK(vp); 1966c675522fSDoug Rabson 1967c675522fSDoug Rabson /* 1968c675522fSDoug Rabson * Call the iterator function for each lock in turn. If the 1969c675522fSDoug Rabson * iterator returns an error code, just free the rest of the 1970c675522fSDoug Rabson * lockdesc structures. 1971c675522fSDoug Rabson */ 1972c675522fSDoug Rabson error = 0; 1973c675522fSDoug Rabson while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1974c675522fSDoug Rabson STAILQ_REMOVE_HEAD(&locks, link); 1975c675522fSDoug Rabson if (!error) 1976c675522fSDoug Rabson error = fn(ldesc->vp, &ldesc->fl, arg); 1977c675522fSDoug Rabson vrele(ldesc->vp); 1978c675522fSDoug Rabson free(ldesc, M_LOCKF); 1979c675522fSDoug Rabson } 1980c675522fSDoug Rabson 1981c675522fSDoug Rabson return (error); 1982c675522fSDoug Rabson } 1983c675522fSDoug Rabson 1984c675522fSDoug Rabson static int 1985c675522fSDoug Rabson lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) 1986c675522fSDoug Rabson { 1987c675522fSDoug Rabson 1988c675522fSDoug Rabson VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); 1989c675522fSDoug Rabson return (0); 1990c675522fSDoug Rabson } 1991c675522fSDoug Rabson 1992c675522fSDoug Rabson void 1993c675522fSDoug Rabson lf_clearremotesys(int sysid) 1994c675522fSDoug Rabson { 1995c675522fSDoug Rabson 1996c675522fSDoug Rabson KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1997c675522fSDoug Rabson lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); 1998dfdcada3SDoug Rabson } 1999dfdcada3SDoug Rabson 2000dfdcada3SDoug Rabson int 2001dfdcada3SDoug Rabson lf_countlocks(int sysid) 2002dfdcada3SDoug Rabson { 2003dfdcada3SDoug Rabson int i; 2004dfdcada3SDoug Rabson struct lock_owner *lo; 2005dfdcada3SDoug Rabson int count; 2006dfdcada3SDoug Rabson 2007dfdcada3SDoug Rabson count = 0; 2008dfdcada3SDoug Rabson sx_xlock(&lf_lock_owners_lock); 2009dfdcada3SDoug Rabson for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 2010dfdcada3SDoug Rabson LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 2011dfdcada3SDoug Rabson if (lo->lo_sysid == sysid) 2012dfdcada3SDoug Rabson count += lo->lo_refs; 2013dfdcada3SDoug Rabson sx_xunlock(&lf_lock_owners_lock); 2014dfdcada3SDoug Rabson 2015dfdcada3SDoug Rabson return (count); 2016dfdcada3SDoug Rabson } 2017dfdcada3SDoug Rabson 2018dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2019dfdcada3SDoug Rabson 2020dfdcada3SDoug Rabson /* 2021dfdcada3SDoug Rabson * Return non-zero if y is reachable from x using a brute force 2022dfdcada3SDoug Rabson * search. If reachable and path is non-null, return the route taken 2023dfdcada3SDoug Rabson * in path. 2024dfdcada3SDoug Rabson */ 2025dfdcada3SDoug Rabson static int 2026dfdcada3SDoug Rabson graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 2027dfdcada3SDoug Rabson struct owner_vertex_list *path) 2028dfdcada3SDoug Rabson { 2029dfdcada3SDoug Rabson struct owner_edge *e; 2030dfdcada3SDoug Rabson 2031dfdcada3SDoug Rabson if (x == y) { 2032dfdcada3SDoug Rabson if (path) 2033dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(path, x, v_link); 2034dfdcada3SDoug Rabson return 1; 2035dfdcada3SDoug Rabson } 2036dfdcada3SDoug Rabson 2037dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2038dfdcada3SDoug Rabson if (graph_reaches(e->e_to, y, path)) { 2039dfdcada3SDoug Rabson if (path) 2040dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(path, x, v_link); 2041dfdcada3SDoug Rabson return 1; 2042dfdcada3SDoug Rabson } 2043dfdcada3SDoug Rabson } 2044dfdcada3SDoug Rabson return 0; 204592dc7331SDavid Greenman } 204692dc7331SDavid Greenman 204792dc7331SDavid Greenman /* 2048dfdcada3SDoug Rabson * Perform consistency checks on the graph. Make sure the values of 2049dfdcada3SDoug Rabson * v_order are correct. If checkorder is non-zero, check no vertex can 2050dfdcada3SDoug Rabson * reach any other vertex with a smaller order. 205192dc7331SDavid Greenman */ 205287b6de2bSPoul-Henning Kamp static void 2053dfdcada3SDoug Rabson graph_check(struct owner_graph *g, int checkorder) 205492dc7331SDavid Greenman { 2055dfdcada3SDoug Rabson int i, j; 205692dc7331SDavid Greenman 2057dfdcada3SDoug Rabson for (i = 0; i < g->g_size; i++) { 2058dfdcada3SDoug Rabson if (!g->g_vertices[i]->v_owner) 2059dfdcada3SDoug Rabson continue; 2060dfdcada3SDoug Rabson KASSERT(g->g_vertices[i]->v_order == i, 2061dfdcada3SDoug Rabson ("lock graph vertices disordered")); 2062dfdcada3SDoug Rabson if (checkorder) { 2063dfdcada3SDoug Rabson for (j = 0; j < i; j++) { 2064dfdcada3SDoug Rabson if (!g->g_vertices[j]->v_owner) 2065dfdcada3SDoug Rabson continue; 2066dfdcada3SDoug Rabson KASSERT(!graph_reaches(g->g_vertices[i], 2067dfdcada3SDoug Rabson g->g_vertices[j], NULL), 2068dfdcada3SDoug Rabson ("lock graph vertices disordered")); 2069dfdcada3SDoug Rabson } 2070dfdcada3SDoug Rabson } 2071dfdcada3SDoug Rabson } 2072dfdcada3SDoug Rabson } 2073dfdcada3SDoug Rabson 2074dfdcada3SDoug Rabson static void 2075dfdcada3SDoug Rabson graph_print_vertices(struct owner_vertex_list *set) 2076dfdcada3SDoug Rabson { 2077dfdcada3SDoug Rabson struct owner_vertex *v; 2078dfdcada3SDoug Rabson 2079dfdcada3SDoug Rabson printf("{ "); 2080dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 2081dfdcada3SDoug Rabson printf("%d:", v->v_order); 2082dfdcada3SDoug Rabson lf_print_owner(v->v_owner); 2083dfdcada3SDoug Rabson if (TAILQ_NEXT(v, v_link)) 2084dfdcada3SDoug Rabson printf(", "); 2085dfdcada3SDoug Rabson } 2086dfdcada3SDoug Rabson printf(" }\n"); 2087dfdcada3SDoug Rabson } 2088dfdcada3SDoug Rabson 2089dfdcada3SDoug Rabson #endif 2090dfdcada3SDoug Rabson 2091dfdcada3SDoug Rabson /* 2092dfdcada3SDoug Rabson * Calculate the sub-set of vertices v from the affected region [y..x] 2093dfdcada3SDoug Rabson * where v is reachable from y. Return -1 if a loop was detected 2094dfdcada3SDoug Rabson * (i.e. x is reachable from y, otherwise the number of vertices in 2095dfdcada3SDoug Rabson * this subset. 2096dfdcada3SDoug Rabson */ 2097dfdcada3SDoug Rabson static int 2098dfdcada3SDoug Rabson graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 2099dfdcada3SDoug Rabson struct owner_vertex *y, struct owner_vertex_list *delta) 2100dfdcada3SDoug Rabson { 2101dfdcada3SDoug Rabson uint32_t gen; 2102dfdcada3SDoug Rabson struct owner_vertex *v; 2103dfdcada3SDoug Rabson struct owner_edge *e; 2104dfdcada3SDoug Rabson int n; 2105dfdcada3SDoug Rabson 2106dfdcada3SDoug Rabson /* 2107dfdcada3SDoug Rabson * We start with a set containing just y. Then for each vertex 2108dfdcada3SDoug Rabson * v in the set so far unprocessed, we add each vertex that v 2109dfdcada3SDoug Rabson * has an out-edge to and that is within the affected region 2110dfdcada3SDoug Rabson * [y..x]. If we see the vertex x on our travels, stop 2111dfdcada3SDoug Rabson * immediately. 2112dfdcada3SDoug Rabson */ 2113dfdcada3SDoug Rabson TAILQ_INIT(delta); 2114dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, y, v_link); 2115dfdcada3SDoug Rabson v = y; 2116dfdcada3SDoug Rabson n = 1; 2117dfdcada3SDoug Rabson gen = g->g_gen; 2118dfdcada3SDoug Rabson while (v) { 2119dfdcada3SDoug Rabson LIST_FOREACH(e, &v->v_outedges, e_outlink) { 2120dfdcada3SDoug Rabson if (e->e_to == x) 2121dfdcada3SDoug Rabson return -1; 2122dfdcada3SDoug Rabson if (e->e_to->v_order < x->v_order 2123dfdcada3SDoug Rabson && e->e_to->v_gen != gen) { 2124dfdcada3SDoug Rabson e->e_to->v_gen = gen; 2125dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2126dfdcada3SDoug Rabson n++; 2127dfdcada3SDoug Rabson } 2128dfdcada3SDoug Rabson } 2129dfdcada3SDoug Rabson v = TAILQ_NEXT(v, v_link); 2130dfdcada3SDoug Rabson } 2131dfdcada3SDoug Rabson 2132dfdcada3SDoug Rabson return (n); 2133dfdcada3SDoug Rabson } 2134dfdcada3SDoug Rabson 2135dfdcada3SDoug Rabson /* 2136dfdcada3SDoug Rabson * Calculate the sub-set of vertices v from the affected region [y..x] 2137dfdcada3SDoug Rabson * where v reaches x. Return the number of vertices in this subset. 2138dfdcada3SDoug Rabson */ 2139dfdcada3SDoug Rabson static int 2140dfdcada3SDoug Rabson graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2141dfdcada3SDoug Rabson struct owner_vertex *y, struct owner_vertex_list *delta) 2142dfdcada3SDoug Rabson { 2143dfdcada3SDoug Rabson uint32_t gen; 2144dfdcada3SDoug Rabson struct owner_vertex *v; 2145dfdcada3SDoug Rabson struct owner_edge *e; 2146dfdcada3SDoug Rabson int n; 2147dfdcada3SDoug Rabson 2148dfdcada3SDoug Rabson /* 2149dfdcada3SDoug Rabson * We start with a set containing just x. Then for each vertex 2150dfdcada3SDoug Rabson * v in the set so far unprocessed, we add each vertex that v 2151dfdcada3SDoug Rabson * has an in-edge from and that is within the affected region 2152dfdcada3SDoug Rabson * [y..x]. 2153dfdcada3SDoug Rabson */ 2154dfdcada3SDoug Rabson TAILQ_INIT(delta); 2155dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(delta, x, v_link); 2156dfdcada3SDoug Rabson v = x; 2157dfdcada3SDoug Rabson n = 1; 2158dfdcada3SDoug Rabson gen = g->g_gen; 2159dfdcada3SDoug Rabson while (v) { 2160dfdcada3SDoug Rabson LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2161dfdcada3SDoug Rabson if (e->e_from->v_order > y->v_order 2162dfdcada3SDoug Rabson && e->e_from->v_gen != gen) { 2163dfdcada3SDoug Rabson e->e_from->v_gen = gen; 2164dfdcada3SDoug Rabson TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2165dfdcada3SDoug Rabson n++; 2166dfdcada3SDoug Rabson } 2167dfdcada3SDoug Rabson } 2168dfdcada3SDoug Rabson v = TAILQ_PREV(v, owner_vertex_list, v_link); 2169dfdcada3SDoug Rabson } 2170dfdcada3SDoug Rabson 2171dfdcada3SDoug Rabson return (n); 2172dfdcada3SDoug Rabson } 2173dfdcada3SDoug Rabson 2174dfdcada3SDoug Rabson static int 2175dfdcada3SDoug Rabson graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2176dfdcada3SDoug Rabson { 2177dfdcada3SDoug Rabson struct owner_vertex *v; 2178dfdcada3SDoug Rabson int i, j; 2179dfdcada3SDoug Rabson 2180dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 2181dfdcada3SDoug Rabson for (i = n; 2182dfdcada3SDoug Rabson i > 0 && indices[i - 1] > v->v_order; i--) 2183dfdcada3SDoug Rabson ; 2184dfdcada3SDoug Rabson for (j = n - 1; j >= i; j--) 2185dfdcada3SDoug Rabson indices[j + 1] = indices[j]; 2186dfdcada3SDoug Rabson indices[i] = v->v_order; 2187dfdcada3SDoug Rabson n++; 2188dfdcada3SDoug Rabson } 2189dfdcada3SDoug Rabson 2190dfdcada3SDoug Rabson return (n); 2191dfdcada3SDoug Rabson } 2192dfdcada3SDoug Rabson 2193dfdcada3SDoug Rabson static int 2194dfdcada3SDoug Rabson graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2195dfdcada3SDoug Rabson struct owner_vertex_list *set) 2196dfdcada3SDoug Rabson { 2197dfdcada3SDoug Rabson struct owner_vertex *v, *vlowest; 2198dfdcada3SDoug Rabson 2199dfdcada3SDoug Rabson while (!TAILQ_EMPTY(set)) { 2200dfdcada3SDoug Rabson vlowest = NULL; 2201dfdcada3SDoug Rabson TAILQ_FOREACH(v, set, v_link) { 2202dfdcada3SDoug Rabson if (!vlowest || v->v_order < vlowest->v_order) 2203dfdcada3SDoug Rabson vlowest = v; 2204dfdcada3SDoug Rabson } 2205dfdcada3SDoug Rabson TAILQ_REMOVE(set, vlowest, v_link); 2206dfdcada3SDoug Rabson vlowest->v_order = indices[nextunused]; 2207dfdcada3SDoug Rabson g->g_vertices[vlowest->v_order] = vlowest; 2208dfdcada3SDoug Rabson nextunused++; 2209dfdcada3SDoug Rabson } 2210dfdcada3SDoug Rabson 2211dfdcada3SDoug Rabson return (nextunused); 2212dfdcada3SDoug Rabson } 2213dfdcada3SDoug Rabson 2214dfdcada3SDoug Rabson static int 2215dfdcada3SDoug Rabson graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2216dfdcada3SDoug Rabson struct owner_vertex *y) 2217dfdcada3SDoug Rabson { 2218dfdcada3SDoug Rabson struct owner_edge *e; 2219dfdcada3SDoug Rabson struct owner_vertex_list deltaF, deltaB; 2220dfdcada3SDoug Rabson int nF, nB, n, vi, i; 2221dfdcada3SDoug Rabson int *indices; 2222dfdcada3SDoug Rabson 2223dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2224dfdcada3SDoug Rabson 2225dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2226dfdcada3SDoug Rabson if (e->e_to == y) { 2227dfdcada3SDoug Rabson e->e_refs++; 2228dfdcada3SDoug Rabson return (0); 222992dc7331SDavid Greenman } 223092dc7331SDavid Greenman } 223192dc7331SDavid Greenman 223292dc7331SDavid Greenman #ifdef LOCKF_DEBUG 2233dfdcada3SDoug Rabson if (lockf_debug & 8) { 2234dfdcada3SDoug Rabson printf("adding edge %d:", x->v_order); 2235dfdcada3SDoug Rabson lf_print_owner(x->v_owner); 2236dfdcada3SDoug Rabson printf(" -> %d:", y->v_order); 2237dfdcada3SDoug Rabson lf_print_owner(y->v_owner); 2238dfdcada3SDoug Rabson printf("\n"); 2239dfdcada3SDoug Rabson } 2240dfdcada3SDoug Rabson #endif 2241dfdcada3SDoug Rabson if (y->v_order < x->v_order) { 2242dfdcada3SDoug Rabson /* 2243dfdcada3SDoug Rabson * The new edge violates the order. First find the set 2244dfdcada3SDoug Rabson * of affected vertices reachable from y (deltaF) and 2245dfdcada3SDoug Rabson * the set of affect vertices affected that reach x 2246dfdcada3SDoug Rabson * (deltaB), using the graph generation number to 2247dfdcada3SDoug Rabson * detect whether we have visited a given vertex 2248dfdcada3SDoug Rabson * already. We re-order the graph so that each vertex 2249dfdcada3SDoug Rabson * in deltaB appears before each vertex in deltaF. 2250dfdcada3SDoug Rabson * 2251dfdcada3SDoug Rabson * If x is a member of deltaF, then the new edge would 2252dfdcada3SDoug Rabson * create a cycle. Otherwise, we may assume that 2253dfdcada3SDoug Rabson * deltaF and deltaB are disjoint. 2254dfdcada3SDoug Rabson */ 2255dfdcada3SDoug Rabson g->g_gen++; 2256dfdcada3SDoug Rabson if (g->g_gen == 0) { 2257dfdcada3SDoug Rabson /* 2258dfdcada3SDoug Rabson * Generation wrap. 2259dfdcada3SDoug Rabson */ 2260dfdcada3SDoug Rabson for (vi = 0; vi < g->g_size; vi++) { 2261dfdcada3SDoug Rabson g->g_vertices[vi]->v_gen = 0; 2262dfdcada3SDoug Rabson } 2263dfdcada3SDoug Rabson g->g_gen++; 2264dfdcada3SDoug Rabson } 2265dfdcada3SDoug Rabson nF = graph_delta_forward(g, x, y, &deltaF); 2266dfdcada3SDoug Rabson if (nF < 0) { 2267dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2268dfdcada3SDoug Rabson if (lockf_debug & 8) { 2269dfdcada3SDoug Rabson struct owner_vertex_list path; 2270dfdcada3SDoug Rabson printf("deadlock: "); 2271dfdcada3SDoug Rabson TAILQ_INIT(&path); 2272dfdcada3SDoug Rabson graph_reaches(y, x, &path); 2273dfdcada3SDoug Rabson graph_print_vertices(&path); 2274dfdcada3SDoug Rabson } 2275dfdcada3SDoug Rabson #endif 2276dfdcada3SDoug Rabson return (EDEADLK); 2277dfdcada3SDoug Rabson } 2278dfdcada3SDoug Rabson 2279dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2280dfdcada3SDoug Rabson if (lockf_debug & 8) { 2281dfdcada3SDoug Rabson printf("re-ordering graph vertices\n"); 2282dfdcada3SDoug Rabson printf("deltaF = "); 2283dfdcada3SDoug Rabson graph_print_vertices(&deltaF); 2284dfdcada3SDoug Rabson } 2285dfdcada3SDoug Rabson #endif 2286dfdcada3SDoug Rabson 2287dfdcada3SDoug Rabson nB = graph_delta_backward(g, x, y, &deltaB); 2288dfdcada3SDoug Rabson 2289dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2290dfdcada3SDoug Rabson if (lockf_debug & 8) { 2291dfdcada3SDoug Rabson printf("deltaB = "); 2292dfdcada3SDoug Rabson graph_print_vertices(&deltaB); 2293dfdcada3SDoug Rabson } 2294dfdcada3SDoug Rabson #endif 2295dfdcada3SDoug Rabson 2296dfdcada3SDoug Rabson /* 2297dfdcada3SDoug Rabson * We first build a set of vertex indices (vertex 2298dfdcada3SDoug Rabson * order values) that we may use, then we re-assign 2299dfdcada3SDoug Rabson * orders first to those vertices in deltaB, then to 2300dfdcada3SDoug Rabson * deltaF. Note that the contents of deltaF and deltaB 2301dfdcada3SDoug Rabson * may be partially disordered - we perform an 2302dfdcada3SDoug Rabson * insertion sort while building our index set. 2303dfdcada3SDoug Rabson */ 2304dfdcada3SDoug Rabson indices = g->g_indexbuf; 2305dfdcada3SDoug Rabson n = graph_add_indices(indices, 0, &deltaF); 2306dfdcada3SDoug Rabson graph_add_indices(indices, n, &deltaB); 2307dfdcada3SDoug Rabson 2308dfdcada3SDoug Rabson /* 2309dfdcada3SDoug Rabson * We must also be sure to maintain the relative 2310dfdcada3SDoug Rabson * ordering of deltaF and deltaB when re-assigning 2311dfdcada3SDoug Rabson * vertices. We do this by iteratively removing the 2312dfdcada3SDoug Rabson * lowest ordered element from the set and assigning 2313dfdcada3SDoug Rabson * it the next value from our new ordering. 2314dfdcada3SDoug Rabson */ 2315dfdcada3SDoug Rabson i = graph_assign_indices(g, indices, 0, &deltaB); 2316dfdcada3SDoug Rabson graph_assign_indices(g, indices, i, &deltaF); 2317dfdcada3SDoug Rabson 2318dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2319dfdcada3SDoug Rabson if (lockf_debug & 8) { 2320dfdcada3SDoug Rabson struct owner_vertex_list set; 2321dfdcada3SDoug Rabson TAILQ_INIT(&set); 2322dfdcada3SDoug Rabson for (i = 0; i < nB + nF; i++) 2323dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&set, 2324dfdcada3SDoug Rabson g->g_vertices[indices[i]], v_link); 2325dfdcada3SDoug Rabson printf("new ordering = "); 2326dfdcada3SDoug Rabson graph_print_vertices(&set); 2327dfdcada3SDoug Rabson } 2328dfdcada3SDoug Rabson #endif 2329dfdcada3SDoug Rabson } 2330dfdcada3SDoug Rabson 2331dfdcada3SDoug Rabson KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2332dfdcada3SDoug Rabson 2333dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2334dfdcada3SDoug Rabson if (lockf_debug & 8) { 2335dfdcada3SDoug Rabson graph_check(g, TRUE); 2336dfdcada3SDoug Rabson } 2337dfdcada3SDoug Rabson #endif 2338dfdcada3SDoug Rabson 2339dfdcada3SDoug Rabson e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2340dfdcada3SDoug Rabson 2341dfdcada3SDoug Rabson LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2342dfdcada3SDoug Rabson LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2343dfdcada3SDoug Rabson e->e_refs = 1; 2344dfdcada3SDoug Rabson e->e_from = x; 2345dfdcada3SDoug Rabson e->e_to = y; 2346dfdcada3SDoug Rabson 2347dfdcada3SDoug Rabson return (0); 2348dfdcada3SDoug Rabson } 2349dfdcada3SDoug Rabson 2350dfdcada3SDoug Rabson /* 2351dfdcada3SDoug Rabson * Remove an edge x->y from the graph. 2352dfdcada3SDoug Rabson */ 2353dfdcada3SDoug Rabson static void 2354dfdcada3SDoug Rabson graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2355dfdcada3SDoug Rabson struct owner_vertex *y) 2356dfdcada3SDoug Rabson { 2357dfdcada3SDoug Rabson struct owner_edge *e; 2358dfdcada3SDoug Rabson 2359dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2360dfdcada3SDoug Rabson 2361dfdcada3SDoug Rabson LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2362dfdcada3SDoug Rabson if (e->e_to == y) 2363dfdcada3SDoug Rabson break; 2364dfdcada3SDoug Rabson } 2365dfdcada3SDoug Rabson KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2366dfdcada3SDoug Rabson 2367dfdcada3SDoug Rabson e->e_refs--; 2368dfdcada3SDoug Rabson if (e->e_refs == 0) { 2369dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2370dfdcada3SDoug Rabson if (lockf_debug & 8) { 2371dfdcada3SDoug Rabson printf("removing edge %d:", x->v_order); 2372dfdcada3SDoug Rabson lf_print_owner(x->v_owner); 2373dfdcada3SDoug Rabson printf(" -> %d:", y->v_order); 2374dfdcada3SDoug Rabson lf_print_owner(y->v_owner); 2375dfdcada3SDoug Rabson printf("\n"); 2376dfdcada3SDoug Rabson } 2377dfdcada3SDoug Rabson #endif 2378dfdcada3SDoug Rabson LIST_REMOVE(e, e_outlink); 2379dfdcada3SDoug Rabson LIST_REMOVE(e, e_inlink); 2380dfdcada3SDoug Rabson free(e, M_LOCKF); 2381dfdcada3SDoug Rabson } 2382dfdcada3SDoug Rabson } 2383dfdcada3SDoug Rabson 2384dfdcada3SDoug Rabson /* 2385dfdcada3SDoug Rabson * Allocate a vertex from the free list. Return ENOMEM if there are 2386dfdcada3SDoug Rabson * none. 2387dfdcada3SDoug Rabson */ 2388dfdcada3SDoug Rabson static struct owner_vertex * 2389dfdcada3SDoug Rabson graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2390dfdcada3SDoug Rabson { 2391dfdcada3SDoug Rabson struct owner_vertex *v; 2392dfdcada3SDoug Rabson 2393dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2394dfdcada3SDoug Rabson 2395dfdcada3SDoug Rabson v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2396dfdcada3SDoug Rabson if (g->g_size == g->g_space) { 2397dfdcada3SDoug Rabson g->g_vertices = realloc(g->g_vertices, 2398dfdcada3SDoug Rabson 2 * g->g_space * sizeof(struct owner_vertex *), 2399dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2400dfdcada3SDoug Rabson free(g->g_indexbuf, M_LOCKF); 2401dfdcada3SDoug Rabson g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2402dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2403dfdcada3SDoug Rabson g->g_space = 2 * g->g_space; 2404dfdcada3SDoug Rabson } 2405dfdcada3SDoug Rabson v->v_order = g->g_size; 2406dfdcada3SDoug Rabson v->v_gen = g->g_gen; 2407dfdcada3SDoug Rabson g->g_vertices[g->g_size] = v; 2408dfdcada3SDoug Rabson g->g_size++; 2409dfdcada3SDoug Rabson 2410dfdcada3SDoug Rabson LIST_INIT(&v->v_outedges); 2411dfdcada3SDoug Rabson LIST_INIT(&v->v_inedges); 2412dfdcada3SDoug Rabson v->v_owner = lo; 2413dfdcada3SDoug Rabson 2414dfdcada3SDoug Rabson return (v); 2415dfdcada3SDoug Rabson } 2416dfdcada3SDoug Rabson 2417dfdcada3SDoug Rabson static void 2418dfdcada3SDoug Rabson graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2419dfdcada3SDoug Rabson { 2420dfdcada3SDoug Rabson struct owner_vertex *w; 2421dfdcada3SDoug Rabson int i; 2422dfdcada3SDoug Rabson 2423dfdcada3SDoug Rabson sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2424dfdcada3SDoug Rabson 2425dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2426dfdcada3SDoug Rabson KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2427dfdcada3SDoug Rabson 2428dfdcada3SDoug Rabson /* 2429dfdcada3SDoug Rabson * Remove from the graph's array and close up the gap, 2430dfdcada3SDoug Rabson * renumbering the other vertices. 2431dfdcada3SDoug Rabson */ 2432dfdcada3SDoug Rabson for (i = v->v_order + 1; i < g->g_size; i++) { 2433dfdcada3SDoug Rabson w = g->g_vertices[i]; 2434dfdcada3SDoug Rabson w->v_order--; 2435dfdcada3SDoug Rabson g->g_vertices[i - 1] = w; 2436dfdcada3SDoug Rabson } 2437dfdcada3SDoug Rabson g->g_size--; 2438dfdcada3SDoug Rabson 2439dfdcada3SDoug Rabson free(v, M_LOCKF); 2440dfdcada3SDoug Rabson } 2441dfdcada3SDoug Rabson 2442dfdcada3SDoug Rabson static struct owner_graph * 2443dfdcada3SDoug Rabson graph_init(struct owner_graph *g) 2444dfdcada3SDoug Rabson { 2445dfdcada3SDoug Rabson 2446dfdcada3SDoug Rabson g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2447dfdcada3SDoug Rabson M_LOCKF, M_WAITOK); 2448dfdcada3SDoug Rabson g->g_size = 0; 2449dfdcada3SDoug Rabson g->g_space = 10; 2450dfdcada3SDoug Rabson g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2451dfdcada3SDoug Rabson g->g_gen = 0; 2452dfdcada3SDoug Rabson 2453dfdcada3SDoug Rabson return (g); 2454dfdcada3SDoug Rabson } 2455dfdcada3SDoug Rabson 2456dfdcada3SDoug Rabson #ifdef LOCKF_DEBUG 2457dfdcada3SDoug Rabson /* 2458dfdcada3SDoug Rabson * Print description of a lock owner 2459dfdcada3SDoug Rabson */ 2460dfdcada3SDoug Rabson static void 2461dfdcada3SDoug Rabson lf_print_owner(struct lock_owner *lo) 2462dfdcada3SDoug Rabson { 2463dfdcada3SDoug Rabson 2464dfdcada3SDoug Rabson if (lo->lo_flags & F_REMOTE) { 2465dfdcada3SDoug Rabson printf("remote pid %d, system %d", 2466dfdcada3SDoug Rabson lo->lo_pid, lo->lo_sysid); 2467dfdcada3SDoug Rabson } else if (lo->lo_flags & F_FLOCK) { 2468dfdcada3SDoug Rabson printf("file %p", lo->lo_id); 2469dfdcada3SDoug Rabson } else { 2470dfdcada3SDoug Rabson printf("local pid %d", lo->lo_pid); 2471dfdcada3SDoug Rabson } 2472dfdcada3SDoug Rabson } 2473dfdcada3SDoug Rabson 247492dc7331SDavid Greenman /* 247592dc7331SDavid Greenman * Print out a lock. 247692dc7331SDavid Greenman */ 2477013e6650SJeff Roberson static void 2478dfdcada3SDoug Rabson lf_print(char *tag, struct lockf_entry *lock) 247992dc7331SDavid Greenman { 248092dc7331SDavid Greenman 2481d974cf4dSBruce Evans printf("%s: lock %p for ", tag, (void *)lock); 2482dfdcada3SDoug Rabson lf_print_owner(lock->lf_owner); 248359aff5fcSAlfred Perlstein if (lock->lf_inode != (struct inode *)0) 2484dfdcada3SDoug Rabson printf(" in ino %ju on dev <%s>,", 2485a7a00d05SMaxime Henrion (uintmax_t)lock->lf_inode->i_number, 2486a5ec35dfSSepherosa Ziehau devtoname(ITODEV(lock->lf_inode))); 2487dfdcada3SDoug Rabson printf(" %s, start %jd, end ", 248892dc7331SDavid Greenman lock->lf_type == F_RDLCK ? "shared" : 248992dc7331SDavid Greenman lock->lf_type == F_WRLCK ? "exclusive" : 2490a7a00d05SMaxime Henrion lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2491dfdcada3SDoug Rabson (intmax_t)lock->lf_start); 2492dfdcada3SDoug Rabson if (lock->lf_end == OFF_MAX) 2493dfdcada3SDoug Rabson printf("EOF"); 249459aff5fcSAlfred Perlstein else 2495dfdcada3SDoug Rabson printf("%jd", (intmax_t)lock->lf_end); 2496dfdcada3SDoug Rabson if (!LIST_EMPTY(&lock->lf_outedges)) 2497dfdcada3SDoug Rabson printf(" block %p\n", 2498dfdcada3SDoug Rabson (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 249992dc7331SDavid Greenman else 250092dc7331SDavid Greenman printf("\n"); 250192dc7331SDavid Greenman } 250292dc7331SDavid Greenman 2503013e6650SJeff Roberson static void 2504dfdcada3SDoug Rabson lf_printlist(char *tag, struct lockf_entry *lock) 250592dc7331SDavid Greenman { 2506dfdcada3SDoug Rabson struct lockf_entry *lf, *blk; 2507dfdcada3SDoug Rabson struct lockf_edge *e; 250892dc7331SDavid Greenman 250959aff5fcSAlfred Perlstein if (lock->lf_inode == (struct inode *)0) 251059aff5fcSAlfred Perlstein return; 251159aff5fcSAlfred Perlstein 251297eb8cfaSPoul-Henning Kamp printf("%s: Lock list for ino %ju on dev <%s>:\n", 2513a7a00d05SMaxime Henrion tag, (uintmax_t)lock->lf_inode->i_number, 2514a5ec35dfSSepherosa Ziehau devtoname(ITODEV(lock->lf_inode))); 2515a365ea5fSDoug Rabson LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 2516d974cf4dSBruce Evans printf("\tlock %p for ",(void *)lf); 2517dfdcada3SDoug Rabson lf_print_owner(lock->lf_owner); 2518a7a00d05SMaxime Henrion printf(", %s, start %jd, end %jd", 251992dc7331SDavid Greenman lf->lf_type == F_RDLCK ? "shared" : 252092dc7331SDavid Greenman lf->lf_type == F_WRLCK ? "exclusive" : 252192dc7331SDavid Greenman lf->lf_type == F_UNLCK ? "unlock" : 2522a7a00d05SMaxime Henrion "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2523dfdcada3SDoug Rabson LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2524dfdcada3SDoug Rabson blk = e->le_to; 2525d974cf4dSBruce Evans printf("\n\t\tlock request %p for ", (void *)blk); 2526dfdcada3SDoug Rabson lf_print_owner(blk->lf_owner); 2527a7a00d05SMaxime Henrion printf(", %s, start %jd, end %jd", 2528996c772fSJohn Dyson blk->lf_type == F_RDLCK ? "shared" : 2529996c772fSJohn Dyson blk->lf_type == F_WRLCK ? "exclusive" : 2530996c772fSJohn Dyson blk->lf_type == F_UNLCK ? "unlock" : 2531a7a00d05SMaxime Henrion "unknown", (intmax_t)blk->lf_start, 2532a7a00d05SMaxime Henrion (intmax_t)blk->lf_end); 2533dfdcada3SDoug Rabson if (!LIST_EMPTY(&blk->lf_inedges)) 2534996c772fSJohn Dyson panic("lf_printlist: bad list"); 2535996c772fSJohn Dyson } 253692dc7331SDavid Greenman printf("\n"); 253792dc7331SDavid Greenman } 253892dc7331SDavid Greenman } 253992dc7331SDavid Greenman #endif /* LOCKF_DEBUG */ 2540