1 /*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 /*- 28 * Copyright (c) 1982, 1986, 1989, 1993 29 * The Regents of the University of California. All rights reserved. 30 * 31 * This code is derived from software contributed to Berkeley by 32 * Scooter Morris at Genentech Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 4. Neither the name of the University nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 59 */ 60 61 #include <sys/cdefs.h> 62 __FBSDID("$FreeBSD$"); 63 64 #include "opt_debug_lockf.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/hash.h> 69 #include <sys/kernel.h> 70 #include <sys/limits.h> 71 #include <sys/lock.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sx.h> 76 #include <sys/unistd.h> 77 #include <sys/vnode.h> 78 #include <sys/malloc.h> 79 #include <sys/fcntl.h> 80 #include <sys/lockf.h> 81 #include <sys/taskqueue.h> 82 83 #ifdef LOCKF_DEBUG 84 #include <sys/sysctl.h> 85 86 #include <ufs/ufs/quota.h> 87 #include <ufs/ufs/inode.h> 88 89 static int lockf_debug = 0; /* control debug output */ 90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 91 #endif 92 93 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 94 95 struct owner_edge; 96 struct owner_vertex; 97 struct owner_vertex_list; 98 struct owner_graph; 99 100 #define NOLOCKF (struct lockf_entry *)0 101 #define SELF 0x1 102 #define OTHERS 0x2 103 static void lf_init(void *); 104 static int lf_hash_owner(caddr_t, struct flock *, int); 105 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106 int); 107 static struct lockf_entry * 108 lf_alloc_lock(struct lock_owner *); 109 static int lf_free_lock(struct lockf_entry *); 110 static int lf_clearlock(struct lockf *, struct lockf_entry *); 111 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112 static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113 static void lf_free_edge(struct lockf_edge *); 114 static struct lockf_edge * 115 lf_alloc_edge(void); 116 static void lf_alloc_vertex(struct lockf_entry *); 117 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118 static void lf_remove_edge(struct lockf_edge *); 119 static void lf_remove_outgoing(struct lockf_entry *); 120 static void lf_remove_incoming(struct lockf_entry *); 121 static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122 static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124 int); 125 static struct lockf_entry * 126 lf_getblock(struct lockf *, struct lockf_entry *); 127 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128 static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130 static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131 int all, struct lockf_entry_list *); 132 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133 struct lockf_entry_list*); 134 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135 struct lockf_entry_list*); 136 static int lf_setlock(struct lockf *, struct lockf_entry *, 137 struct vnode *, void **cookiep); 138 static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139 static void lf_split(struct lockf *, struct lockf_entry *, 140 struct lockf_entry *, struct lockf_entry_list *); 141 #ifdef LOCKF_DEBUG 142 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143 struct owner_vertex_list *path); 144 static void graph_check(struct owner_graph *g, int checkorder); 145 static void graph_print_vertices(struct owner_vertex_list *set); 146 #endif 147 static int graph_delta_forward(struct owner_graph *g, 148 struct owner_vertex *x, struct owner_vertex *y, 149 struct owner_vertex_list *delta); 150 static int graph_delta_backward(struct owner_graph *g, 151 struct owner_vertex *x, struct owner_vertex *y, 152 struct owner_vertex_list *delta); 153 static int graph_add_indices(int *indices, int n, 154 struct owner_vertex_list *set); 155 static int graph_assign_indices(struct owner_graph *g, int *indices, 156 int nextunused, struct owner_vertex_list *set); 157 static int graph_add_edge(struct owner_graph *g, 158 struct owner_vertex *x, struct owner_vertex *y); 159 static void graph_remove_edge(struct owner_graph *g, 160 struct owner_vertex *x, struct owner_vertex *y); 161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162 struct lock_owner *lo); 163 static void graph_free_vertex(struct owner_graph *g, 164 struct owner_vertex *v); 165 static struct owner_graph * graph_init(struct owner_graph *g); 166 #ifdef LOCKF_DEBUG 167 static void lf_print(char *, struct lockf_entry *); 168 static void lf_printlist(char *, struct lockf_entry *); 169 static void lf_print_owner(struct lock_owner *); 170 #endif 171 172 /* 173 * This structure is used to keep track of both local and remote lock 174 * owners. The lf_owner field of the struct lockf_entry points back at 175 * the lock owner structure. Each possible lock owner (local proc for 176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177 * pair for remote locks) is represented by a unique instance of 178 * struct lock_owner. 179 * 180 * If a lock owner has a lock that blocks some other lock or a lock 181 * that is waiting for some other lock, it also has a vertex in the 182 * owner_graph below. 183 * 184 * Locks: 185 * (s) locked by state->ls_lock 186 * (S) locked by lf_lock_states_lock 187 * (l) locked by lf_lock_owners_lock 188 * (g) locked by lf_owner_graph_lock 189 * (c) const until freeing 190 */ 191 #define LOCK_OWNER_HASH_SIZE 256 192 193 struct lock_owner { 194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195 int lo_refs; /* (l) Number of locks referring to this */ 196 int lo_flags; /* (c) Flags passwd to lf_advlock */ 197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198 pid_t lo_pid; /* (c) Process Id of the lock owner */ 199 int lo_sysid; /* (c) System Id of the lock owner */ 200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201 }; 202 203 LIST_HEAD(lock_owner_list, lock_owner); 204 205 static struct sx lf_lock_states_lock; 206 static struct lockf_list lf_lock_states; /* (S) */ 207 static struct sx lf_lock_owners_lock; 208 static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209 210 /* 211 * Structures for deadlock detection. 212 * 213 * We have two types of directed graph, the first is the set of locks, 214 * both active and pending on a vnode. Within this graph, active locks 215 * are terminal nodes in the graph (i.e. have no out-going 216 * edges). Pending locks have out-going edges to each blocking active 217 * lock that prevents the lock from being granted and also to each 218 * older pending lock that would block them if it was active. The 219 * graph for each vnode is naturally acyclic; new edges are only ever 220 * added to or from new nodes (either new pending locks which only add 221 * out-going edges or new active locks which only add in-coming edges) 222 * therefore they cannot create loops in the lock graph. 223 * 224 * The second graph is a global graph of lock owners. Each lock owner 225 * is a vertex in that graph and an edge is added to the graph 226 * whenever an edge is added to a vnode graph, with end points 227 * corresponding to owner of the new pending lock and the owner of the 228 * lock upon which it waits. In order to prevent deadlock, we only add 229 * an edge to this graph if the new edge would not create a cycle. 230 * 231 * The lock owner graph is topologically sorted, i.e. if a node has 232 * any outgoing edges, then it has an order strictly less than any 233 * node to which it has an outgoing edge. We preserve this ordering 234 * (and detect cycles) on edge insertion using Algorithm PK from the 235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237 * No. 1.7) 238 */ 239 struct owner_vertex; 240 241 struct owner_edge { 242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244 int e_refs; /* (g) number of times added */ 245 struct owner_vertex *e_from; /* (c) out-going from here */ 246 struct owner_vertex *e_to; /* (c) in-coming to here */ 247 }; 248 LIST_HEAD(owner_edge_list, owner_edge); 249 250 struct owner_vertex { 251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252 uint32_t v_gen; /* (g) workspace for edge insertion */ 253 int v_order; /* (g) order of vertex in graph */ 254 struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255 struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256 struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257 }; 258 TAILQ_HEAD(owner_vertex_list, owner_vertex); 259 260 struct owner_graph { 261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262 int g_size; /* (g) number of vertices */ 263 int g_space; /* (g) space allocated for vertices */ 264 int *g_indexbuf; /* (g) workspace for loop detection */ 265 uint32_t g_gen; /* (g) increment when re-ordering */ 266 }; 267 268 static struct sx lf_owner_graph_lock; 269 static struct owner_graph lf_owner_graph; 270 271 /* 272 * Initialise various structures and locks. 273 */ 274 static void 275 lf_init(void *dummy) 276 { 277 int i; 278 279 sx_init(&lf_lock_states_lock, "lock states lock"); 280 LIST_INIT(&lf_lock_states); 281 282 sx_init(&lf_lock_owners_lock, "lock owners lock"); 283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284 LIST_INIT(&lf_lock_owners[i]); 285 286 sx_init(&lf_owner_graph_lock, "owner graph lock"); 287 graph_init(&lf_owner_graph); 288 } 289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290 291 /* 292 * Generate a hash value for a lock owner. 293 */ 294 static int 295 lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296 { 297 uint32_t h; 298 299 if (flags & F_REMOTE) { 300 h = HASHSTEP(0, fl->l_pid); 301 h = HASHSTEP(h, fl->l_sysid); 302 } else if (flags & F_FLOCK) { 303 h = ((uintptr_t) id) >> 7; 304 } else { 305 struct proc *p = (struct proc *) id; 306 h = HASHSTEP(0, p->p_pid); 307 h = HASHSTEP(h, 0); 308 } 309 310 return (h % LOCK_OWNER_HASH_SIZE); 311 } 312 313 /* 314 * Return true if a lock owner matches the details passed to 315 * lf_advlock. 316 */ 317 static int 318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319 int flags) 320 { 321 if (flags & F_REMOTE) { 322 return lo->lo_pid == fl->l_pid 323 && lo->lo_sysid == fl->l_sysid; 324 } else { 325 return lo->lo_id == id; 326 } 327 } 328 329 static struct lockf_entry * 330 lf_alloc_lock(struct lock_owner *lo) 331 { 332 struct lockf_entry *lf; 333 334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335 336 #ifdef LOCKF_DEBUG 337 if (lockf_debug & 4) 338 printf("Allocated lock %p\n", lf); 339 #endif 340 if (lo) { 341 sx_xlock(&lf_lock_owners_lock); 342 lo->lo_refs++; 343 sx_xunlock(&lf_lock_owners_lock); 344 lf->lf_owner = lo; 345 } 346 347 return (lf); 348 } 349 350 static int 351 lf_free_lock(struct lockf_entry *lock) 352 { 353 354 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); 355 if (--lock->lf_refs > 0) 356 return (0); 357 /* 358 * Adjust the lock_owner reference count and 359 * reclaim the entry if this is the last lock 360 * for that owner. 361 */ 362 struct lock_owner *lo = lock->lf_owner; 363 if (lo) { 364 KASSERT(LIST_EMPTY(&lock->lf_outedges), 365 ("freeing lock with dependancies")); 366 KASSERT(LIST_EMPTY(&lock->lf_inedges), 367 ("freeing lock with dependants")); 368 sx_xlock(&lf_lock_owners_lock); 369 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 370 lo->lo_refs--; 371 if (lo->lo_refs == 0) { 372 #ifdef LOCKF_DEBUG 373 if (lockf_debug & 1) 374 printf("lf_free_lock: freeing lock owner %p\n", 375 lo); 376 #endif 377 if (lo->lo_vertex) { 378 sx_xlock(&lf_owner_graph_lock); 379 graph_free_vertex(&lf_owner_graph, 380 lo->lo_vertex); 381 sx_xunlock(&lf_owner_graph_lock); 382 } 383 LIST_REMOVE(lo, lo_link); 384 free(lo, M_LOCKF); 385 #ifdef LOCKF_DEBUG 386 if (lockf_debug & 4) 387 printf("Freed lock owner %p\n", lo); 388 #endif 389 } 390 sx_unlock(&lf_lock_owners_lock); 391 } 392 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 393 vrele(lock->lf_vnode); 394 lock->lf_vnode = NULL; 395 } 396 #ifdef LOCKF_DEBUG 397 if (lockf_debug & 4) 398 printf("Freed lock %p\n", lock); 399 #endif 400 free(lock, M_LOCKF); 401 return (1); 402 } 403 404 /* 405 * Advisory record locking support 406 */ 407 int 408 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 409 u_quad_t size) 410 { 411 struct lockf *state, *freestate = NULL; 412 struct flock *fl = ap->a_fl; 413 struct lockf_entry *lock; 414 struct vnode *vp = ap->a_vp; 415 caddr_t id = ap->a_id; 416 int flags = ap->a_flags; 417 int hash; 418 struct lock_owner *lo; 419 off_t start, end, oadd; 420 int error; 421 422 /* 423 * Handle the F_UNLKSYS case first - no need to mess about 424 * creating a lock owner for this one. 425 */ 426 if (ap->a_op == F_UNLCKSYS) { 427 lf_clearremotesys(fl->l_sysid); 428 return (0); 429 } 430 431 /* 432 * Convert the flock structure into a start and end. 433 */ 434 switch (fl->l_whence) { 435 436 case SEEK_SET: 437 case SEEK_CUR: 438 /* 439 * Caller is responsible for adding any necessary offset 440 * when SEEK_CUR is used. 441 */ 442 start = fl->l_start; 443 break; 444 445 case SEEK_END: 446 if (size > OFF_MAX || 447 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 448 return (EOVERFLOW); 449 start = size + fl->l_start; 450 break; 451 452 default: 453 return (EINVAL); 454 } 455 if (start < 0) 456 return (EINVAL); 457 if (fl->l_len < 0) { 458 if (start == 0) 459 return (EINVAL); 460 end = start - 1; 461 start += fl->l_len; 462 if (start < 0) 463 return (EINVAL); 464 } else if (fl->l_len == 0) { 465 end = OFF_MAX; 466 } else { 467 oadd = fl->l_len - 1; 468 if (oadd > OFF_MAX - start) 469 return (EOVERFLOW); 470 end = start + oadd; 471 } 472 /* 473 * Avoid the common case of unlocking when inode has no locks. 474 */ 475 VI_LOCK(vp); 476 if ((*statep) == NULL) { 477 if (ap->a_op != F_SETLK) { 478 fl->l_type = F_UNLCK; 479 VI_UNLOCK(vp); 480 return (0); 481 } 482 } 483 VI_UNLOCK(vp); 484 485 /* 486 * Map our arguments to an existing lock owner or create one 487 * if this is the first time we have seen this owner. 488 */ 489 hash = lf_hash_owner(id, fl, flags); 490 sx_xlock(&lf_lock_owners_lock); 491 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 492 if (lf_owner_matches(lo, id, fl, flags)) 493 break; 494 if (!lo) { 495 /* 496 * We initialise the lock with a reference 497 * count which matches the new lockf_entry 498 * structure created below. 499 */ 500 lo = malloc(sizeof(struct lock_owner), M_LOCKF, 501 M_WAITOK|M_ZERO); 502 #ifdef LOCKF_DEBUG 503 if (lockf_debug & 4) 504 printf("Allocated lock owner %p\n", lo); 505 #endif 506 507 lo->lo_refs = 1; 508 lo->lo_flags = flags; 509 lo->lo_id = id; 510 if (flags & F_REMOTE) { 511 lo->lo_pid = fl->l_pid; 512 lo->lo_sysid = fl->l_sysid; 513 } else if (flags & F_FLOCK) { 514 lo->lo_pid = -1; 515 lo->lo_sysid = 0; 516 } else { 517 struct proc *p = (struct proc *) id; 518 lo->lo_pid = p->p_pid; 519 lo->lo_sysid = 0; 520 } 521 lo->lo_vertex = NULL; 522 523 #ifdef LOCKF_DEBUG 524 if (lockf_debug & 1) { 525 printf("lf_advlockasync: new lock owner %p ", lo); 526 lf_print_owner(lo); 527 printf("\n"); 528 } 529 #endif 530 531 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 532 } else { 533 /* 534 * We have seen this lock owner before, increase its 535 * reference count to account for the new lockf_entry 536 * structure we create below. 537 */ 538 lo->lo_refs++; 539 } 540 sx_xunlock(&lf_lock_owners_lock); 541 542 /* 543 * Create the lockf structure. We initialise the lf_owner 544 * field here instead of in lf_alloc_lock() to avoid paying 545 * the lf_lock_owners_lock tax twice. 546 */ 547 lock = lf_alloc_lock(NULL); 548 lock->lf_refs = 1; 549 lock->lf_start = start; 550 lock->lf_end = end; 551 lock->lf_owner = lo; 552 lock->lf_vnode = vp; 553 if (flags & F_REMOTE) { 554 /* 555 * For remote locks, the caller may release its ref to 556 * the vnode at any time - we have to ref it here to 557 * prevent it from being recycled unexpectedly. 558 */ 559 vref(vp); 560 } 561 562 /* 563 * XXX The problem is that VTOI is ufs specific, so it will 564 * break LOCKF_DEBUG for all other FS's other than UFS because 565 * it casts the vnode->data ptr to struct inode *. 566 */ 567 /* lock->lf_inode = VTOI(ap->a_vp); */ 568 lock->lf_inode = (struct inode *)0; 569 lock->lf_type = fl->l_type; 570 LIST_INIT(&lock->lf_outedges); 571 LIST_INIT(&lock->lf_inedges); 572 lock->lf_async_task = ap->a_task; 573 lock->lf_flags = ap->a_flags; 574 575 /* 576 * Do the requested operation. First find our state structure 577 * and create a new one if necessary - the caller's *statep 578 * variable and the state's ls_threads count is protected by 579 * the vnode interlock. 580 */ 581 VI_LOCK(vp); 582 if (vp->v_iflag & VI_DOOMED) { 583 VI_UNLOCK(vp); 584 lf_free_lock(lock); 585 return (ENOENT); 586 } 587 588 /* 589 * Allocate a state structure if necessary. 590 */ 591 state = *statep; 592 if (state == NULL) { 593 struct lockf *ls; 594 595 VI_UNLOCK(vp); 596 597 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 598 sx_init(&ls->ls_lock, "ls_lock"); 599 LIST_INIT(&ls->ls_active); 600 LIST_INIT(&ls->ls_pending); 601 ls->ls_threads = 1; 602 603 sx_xlock(&lf_lock_states_lock); 604 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 605 sx_xunlock(&lf_lock_states_lock); 606 607 /* 608 * Cope if we lost a race with some other thread while 609 * trying to allocate memory. 610 */ 611 VI_LOCK(vp); 612 if (vp->v_iflag & VI_DOOMED) { 613 VI_UNLOCK(vp); 614 sx_xlock(&lf_lock_states_lock); 615 LIST_REMOVE(ls, ls_link); 616 sx_xunlock(&lf_lock_states_lock); 617 sx_destroy(&ls->ls_lock); 618 free(ls, M_LOCKF); 619 lf_free_lock(lock); 620 return (ENOENT); 621 } 622 if ((*statep) == NULL) { 623 state = *statep = ls; 624 VI_UNLOCK(vp); 625 } else { 626 state = *statep; 627 state->ls_threads++; 628 VI_UNLOCK(vp); 629 630 sx_xlock(&lf_lock_states_lock); 631 LIST_REMOVE(ls, ls_link); 632 sx_xunlock(&lf_lock_states_lock); 633 sx_destroy(&ls->ls_lock); 634 free(ls, M_LOCKF); 635 } 636 } else { 637 state->ls_threads++; 638 VI_UNLOCK(vp); 639 } 640 641 sx_xlock(&state->ls_lock); 642 /* 643 * Recheck the doomed vnode after state->ls_lock is 644 * locked. lf_purgelocks() requires that no new threads add 645 * pending locks when vnode is marked by VI_DOOMED flag. 646 */ 647 VI_LOCK(vp); 648 if (vp->v_iflag & VI_DOOMED) { 649 VI_UNLOCK(vp); 650 lf_free_lock(lock); 651 return (ENOENT); 652 } 653 VI_UNLOCK(vp); 654 655 switch (ap->a_op) { 656 case F_SETLK: 657 error = lf_setlock(state, lock, vp, ap->a_cookiep); 658 break; 659 660 case F_UNLCK: 661 error = lf_clearlock(state, lock); 662 lf_free_lock(lock); 663 break; 664 665 case F_GETLK: 666 error = lf_getlock(state, lock, fl); 667 lf_free_lock(lock); 668 break; 669 670 case F_CANCEL: 671 if (ap->a_cookiep) 672 error = lf_cancel(state, lock, *ap->a_cookiep); 673 else 674 error = EINVAL; 675 lf_free_lock(lock); 676 break; 677 678 default: 679 lf_free_lock(lock); 680 error = EINVAL; 681 break; 682 } 683 684 #ifdef INVARIANTS 685 /* 686 * Check for some can't happen stuff. In this case, the active 687 * lock list becoming disordered or containing mutually 688 * blocking locks. We also check the pending list for locks 689 * which should be active (i.e. have no out-going edges). 690 */ 691 LIST_FOREACH(lock, &state->ls_active, lf_link) { 692 struct lockf_entry *lf; 693 if (LIST_NEXT(lock, lf_link)) 694 KASSERT((lock->lf_start 695 <= LIST_NEXT(lock, lf_link)->lf_start), 696 ("locks disordered")); 697 LIST_FOREACH(lf, &state->ls_active, lf_link) { 698 if (lock == lf) 699 break; 700 KASSERT(!lf_blocks(lock, lf), 701 ("two conflicting active locks")); 702 if (lock->lf_owner == lf->lf_owner) 703 KASSERT(!lf_overlaps(lock, lf), 704 ("two overlapping locks from same owner")); 705 } 706 } 707 LIST_FOREACH(lock, &state->ls_pending, lf_link) { 708 KASSERT(!LIST_EMPTY(&lock->lf_outedges), 709 ("pending lock which should be active")); 710 } 711 #endif 712 sx_xunlock(&state->ls_lock); 713 714 /* 715 * If we have removed the last active lock on the vnode and 716 * this is the last thread that was in-progress, we can free 717 * the state structure. We update the caller's pointer inside 718 * the vnode interlock but call free outside. 719 * 720 * XXX alternatively, keep the state structure around until 721 * the filesystem recycles - requires a callback from the 722 * filesystem. 723 */ 724 VI_LOCK(vp); 725 726 state->ls_threads--; 727 wakeup(state); 728 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 729 KASSERT(LIST_EMPTY(&state->ls_pending), 730 ("freeing state with pending locks")); 731 freestate = state; 732 *statep = NULL; 733 } 734 735 VI_UNLOCK(vp); 736 737 if (freestate) { 738 sx_xlock(&lf_lock_states_lock); 739 LIST_REMOVE(freestate, ls_link); 740 sx_xunlock(&lf_lock_states_lock); 741 sx_destroy(&freestate->ls_lock); 742 free(freestate, M_LOCKF); 743 } 744 return (error); 745 } 746 747 int 748 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 749 { 750 struct vop_advlockasync_args a; 751 752 a.a_vp = ap->a_vp; 753 a.a_id = ap->a_id; 754 a.a_op = ap->a_op; 755 a.a_fl = ap->a_fl; 756 a.a_flags = ap->a_flags; 757 a.a_task = NULL; 758 a.a_cookiep = NULL; 759 760 return (lf_advlockasync(&a, statep, size)); 761 } 762 763 void 764 lf_purgelocks(struct vnode *vp, struct lockf **statep) 765 { 766 struct lockf *state; 767 struct lockf_entry *lock, *nlock; 768 769 /* 770 * For this to work correctly, the caller must ensure that no 771 * other threads enter the locking system for this vnode, 772 * e.g. by checking VI_DOOMED. We wake up any threads that are 773 * sleeping waiting for locks on this vnode and then free all 774 * the remaining locks. 775 */ 776 VI_LOCK(vp); 777 KASSERT(vp->v_iflag & VI_DOOMED, 778 ("lf_purgelocks: vp %p has not vgone yet", vp)); 779 state = *statep; 780 if (state) { 781 *statep = NULL; 782 state->ls_threads++; 783 VI_UNLOCK(vp); 784 785 sx_xlock(&state->ls_lock); 786 sx_xlock(&lf_owner_graph_lock); 787 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 788 LIST_REMOVE(lock, lf_link); 789 lf_remove_outgoing(lock); 790 lf_remove_incoming(lock); 791 792 /* 793 * If its an async lock, we can just free it 794 * here, otherwise we let the sleeping thread 795 * free it. 796 */ 797 if (lock->lf_async_task) { 798 lf_free_lock(lock); 799 } else { 800 lock->lf_flags |= F_INTR; 801 wakeup(lock); 802 } 803 } 804 sx_xunlock(&lf_owner_graph_lock); 805 sx_xunlock(&state->ls_lock); 806 807 /* 808 * Wait for all other threads, sleeping and otherwise 809 * to leave. 810 */ 811 VI_LOCK(vp); 812 while (state->ls_threads > 1) 813 msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 814 VI_UNLOCK(vp); 815 816 /* 817 * We can just free all the active locks since they 818 * will have no dependancies (we removed them all 819 * above). We don't need to bother locking since we 820 * are the last thread using this state structure. 821 */ 822 KASSERT(LIST_EMPTY(&state->ls_pending), 823 ("lock pending for %p", state)); 824 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { 825 LIST_REMOVE(lock, lf_link); 826 lf_free_lock(lock); 827 } 828 sx_xlock(&lf_lock_states_lock); 829 LIST_REMOVE(state, ls_link); 830 sx_xunlock(&lf_lock_states_lock); 831 sx_destroy(&state->ls_lock); 832 free(state, M_LOCKF); 833 } else { 834 VI_UNLOCK(vp); 835 } 836 } 837 838 /* 839 * Return non-zero if locks 'x' and 'y' overlap. 840 */ 841 static int 842 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 843 { 844 845 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 846 } 847 848 /* 849 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 850 */ 851 static int 852 lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 853 { 854 855 return x->lf_owner != y->lf_owner 856 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 857 && lf_overlaps(x, y); 858 } 859 860 /* 861 * Allocate a lock edge from the free list 862 */ 863 static struct lockf_edge * 864 lf_alloc_edge(void) 865 { 866 867 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 868 } 869 870 /* 871 * Free a lock edge. 872 */ 873 static void 874 lf_free_edge(struct lockf_edge *e) 875 { 876 877 free(e, M_LOCKF); 878 } 879 880 881 /* 882 * Ensure that the lock's owner has a corresponding vertex in the 883 * owner graph. 884 */ 885 static void 886 lf_alloc_vertex(struct lockf_entry *lock) 887 { 888 struct owner_graph *g = &lf_owner_graph; 889 890 if (!lock->lf_owner->lo_vertex) 891 lock->lf_owner->lo_vertex = 892 graph_alloc_vertex(g, lock->lf_owner); 893 } 894 895 /* 896 * Attempt to record an edge from lock x to lock y. Return EDEADLK if 897 * the new edge would cause a cycle in the owner graph. 898 */ 899 static int 900 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 901 { 902 struct owner_graph *g = &lf_owner_graph; 903 struct lockf_edge *e; 904 int error; 905 906 #ifdef INVARIANTS 907 LIST_FOREACH(e, &x->lf_outedges, le_outlink) 908 KASSERT(e->le_to != y, ("adding lock edge twice")); 909 #endif 910 911 /* 912 * Make sure the two owners have entries in the owner graph. 913 */ 914 lf_alloc_vertex(x); 915 lf_alloc_vertex(y); 916 917 error = graph_add_edge(g, x->lf_owner->lo_vertex, 918 y->lf_owner->lo_vertex); 919 if (error) 920 return (error); 921 922 e = lf_alloc_edge(); 923 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 924 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 925 e->le_from = x; 926 e->le_to = y; 927 928 return (0); 929 } 930 931 /* 932 * Remove an edge from the lock graph. 933 */ 934 static void 935 lf_remove_edge(struct lockf_edge *e) 936 { 937 struct owner_graph *g = &lf_owner_graph; 938 struct lockf_entry *x = e->le_from; 939 struct lockf_entry *y = e->le_to; 940 941 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 942 LIST_REMOVE(e, le_outlink); 943 LIST_REMOVE(e, le_inlink); 944 e->le_from = NULL; 945 e->le_to = NULL; 946 lf_free_edge(e); 947 } 948 949 /* 950 * Remove all out-going edges from lock x. 951 */ 952 static void 953 lf_remove_outgoing(struct lockf_entry *x) 954 { 955 struct lockf_edge *e; 956 957 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 958 lf_remove_edge(e); 959 } 960 } 961 962 /* 963 * Remove all in-coming edges from lock x. 964 */ 965 static void 966 lf_remove_incoming(struct lockf_entry *x) 967 { 968 struct lockf_edge *e; 969 970 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 971 lf_remove_edge(e); 972 } 973 } 974 975 /* 976 * Walk the list of locks for the file and create an out-going edge 977 * from lock to each blocking lock. 978 */ 979 static int 980 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 981 { 982 struct lockf_entry *overlap; 983 int error; 984 985 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 986 /* 987 * We may assume that the active list is sorted by 988 * lf_start. 989 */ 990 if (overlap->lf_start > lock->lf_end) 991 break; 992 if (!lf_blocks(lock, overlap)) 993 continue; 994 995 /* 996 * We've found a blocking lock. Add the corresponding 997 * edge to the graphs and see if it would cause a 998 * deadlock. 999 */ 1000 error = lf_add_edge(lock, overlap); 1001 1002 /* 1003 * The only error that lf_add_edge returns is EDEADLK. 1004 * Remove any edges we added and return the error. 1005 */ 1006 if (error) { 1007 lf_remove_outgoing(lock); 1008 return (error); 1009 } 1010 } 1011 1012 /* 1013 * We also need to add edges to sleeping locks that block 1014 * us. This ensures that lf_wakeup_lock cannot grant two 1015 * mutually blocking locks simultaneously and also enforces a 1016 * 'first come, first served' fairness model. Note that this 1017 * only happens if we are blocked by at least one active lock 1018 * due to the call to lf_getblock in lf_setlock below. 1019 */ 1020 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1021 if (!lf_blocks(lock, overlap)) 1022 continue; 1023 /* 1024 * We've found a blocking lock. Add the corresponding 1025 * edge to the graphs and see if it would cause a 1026 * deadlock. 1027 */ 1028 error = lf_add_edge(lock, overlap); 1029 1030 /* 1031 * The only error that lf_add_edge returns is EDEADLK. 1032 * Remove any edges we added and return the error. 1033 */ 1034 if (error) { 1035 lf_remove_outgoing(lock); 1036 return (error); 1037 } 1038 } 1039 1040 return (0); 1041 } 1042 1043 /* 1044 * Walk the list of pending locks for the file and create an in-coming 1045 * edge from lock to each blocking lock. 1046 */ 1047 static int 1048 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1049 { 1050 struct lockf_entry *overlap; 1051 int error; 1052 1053 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1054 if (!lf_blocks(lock, overlap)) 1055 continue; 1056 1057 /* 1058 * We've found a blocking lock. Add the corresponding 1059 * edge to the graphs and see if it would cause a 1060 * deadlock. 1061 */ 1062 error = lf_add_edge(overlap, lock); 1063 1064 /* 1065 * The only error that lf_add_edge returns is EDEADLK. 1066 * Remove any edges we added and return the error. 1067 */ 1068 if (error) { 1069 lf_remove_incoming(lock); 1070 return (error); 1071 } 1072 } 1073 return (0); 1074 } 1075 1076 /* 1077 * Insert lock into the active list, keeping list entries ordered by 1078 * increasing values of lf_start. 1079 */ 1080 static void 1081 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1082 { 1083 struct lockf_entry *lf, *lfprev; 1084 1085 if (LIST_EMPTY(&state->ls_active)) { 1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1087 return; 1088 } 1089 1090 lfprev = NULL; 1091 LIST_FOREACH(lf, &state->ls_active, lf_link) { 1092 if (lf->lf_start > lock->lf_start) { 1093 LIST_INSERT_BEFORE(lf, lock, lf_link); 1094 return; 1095 } 1096 lfprev = lf; 1097 } 1098 LIST_INSERT_AFTER(lfprev, lock, lf_link); 1099 } 1100 1101 /* 1102 * Wake up a sleeping lock and remove it from the pending list now 1103 * that all its dependancies have been resolved. The caller should 1104 * arrange for the lock to be added to the active list, adjusting any 1105 * existing locks for the same owner as needed. 1106 */ 1107 static void 1108 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1109 { 1110 1111 /* 1112 * Remove from ls_pending list and wake up the caller 1113 * or start the async notification, as appropriate. 1114 */ 1115 LIST_REMOVE(wakelock, lf_link); 1116 #ifdef LOCKF_DEBUG 1117 if (lockf_debug & 1) 1118 lf_print("lf_wakeup_lock: awakening", wakelock); 1119 #endif /* LOCKF_DEBUG */ 1120 if (wakelock->lf_async_task) { 1121 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1122 } else { 1123 wakeup(wakelock); 1124 } 1125 } 1126 1127 /* 1128 * Re-check all dependant locks and remove edges to locks that we no 1129 * longer block. If 'all' is non-zero, the lock has been removed and 1130 * we must remove all the dependancies, otherwise it has simply been 1131 * reduced but remains active. Any pending locks which have been been 1132 * unblocked are added to 'granted' 1133 */ 1134 static void 1135 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1136 struct lockf_entry_list *granted) 1137 { 1138 struct lockf_edge *e, *ne; 1139 struct lockf_entry *deplock; 1140 1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1142 deplock = e->le_from; 1143 if (all || !lf_blocks(lock, deplock)) { 1144 sx_xlock(&lf_owner_graph_lock); 1145 lf_remove_edge(e); 1146 sx_xunlock(&lf_owner_graph_lock); 1147 if (LIST_EMPTY(&deplock->lf_outedges)) { 1148 lf_wakeup_lock(state, deplock); 1149 LIST_INSERT_HEAD(granted, deplock, lf_link); 1150 } 1151 } 1152 } 1153 } 1154 1155 /* 1156 * Set the start of an existing active lock, updating dependancies and 1157 * adding any newly woken locks to 'granted'. 1158 */ 1159 static void 1160 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1161 struct lockf_entry_list *granted) 1162 { 1163 1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1165 lock->lf_start = new_start; 1166 LIST_REMOVE(lock, lf_link); 1167 lf_insert_lock(state, lock); 1168 lf_update_dependancies(state, lock, FALSE, granted); 1169 } 1170 1171 /* 1172 * Set the end of an existing active lock, updating dependancies and 1173 * adding any newly woken locks to 'granted'. 1174 */ 1175 static void 1176 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1177 struct lockf_entry_list *granted) 1178 { 1179 1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1181 lock->lf_end = new_end; 1182 lf_update_dependancies(state, lock, FALSE, granted); 1183 } 1184 1185 /* 1186 * Add a lock to the active list, updating or removing any current 1187 * locks owned by the same owner and processing any pending locks that 1188 * become unblocked as a result. This code is also used for unlock 1189 * since the logic for updating existing locks is identical. 1190 * 1191 * As a result of processing the new lock, we may unblock existing 1192 * pending locks as a result of downgrading/unlocking. We simply 1193 * activate the newly granted locks by looping. 1194 * 1195 * Since the new lock already has its dependancies set up, we always 1196 * add it to the list (unless its an unlock request). This may 1197 * fragment the lock list in some pathological cases but its probably 1198 * not a real problem. 1199 */ 1200 static void 1201 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1202 { 1203 struct lockf_entry *overlap, *lf; 1204 struct lockf_entry_list granted; 1205 int ovcase; 1206 1207 LIST_INIT(&granted); 1208 LIST_INSERT_HEAD(&granted, lock, lf_link); 1209 1210 while (!LIST_EMPTY(&granted)) { 1211 lock = LIST_FIRST(&granted); 1212 LIST_REMOVE(lock, lf_link); 1213 1214 /* 1215 * Skip over locks owned by other processes. Handle 1216 * any locks that overlap and are owned by ourselves. 1217 */ 1218 overlap = LIST_FIRST(&state->ls_active); 1219 for (;;) { 1220 ovcase = lf_findoverlap(&overlap, lock, SELF); 1221 1222 #ifdef LOCKF_DEBUG 1223 if (ovcase && (lockf_debug & 2)) { 1224 printf("lf_setlock: overlap %d", ovcase); 1225 lf_print("", overlap); 1226 } 1227 #endif 1228 /* 1229 * Six cases: 1230 * 0) no overlap 1231 * 1) overlap == lock 1232 * 2) overlap contains lock 1233 * 3) lock contains overlap 1234 * 4) overlap starts before lock 1235 * 5) overlap ends after lock 1236 */ 1237 switch (ovcase) { 1238 case 0: /* no overlap */ 1239 break; 1240 1241 case 1: /* overlap == lock */ 1242 /* 1243 * We have already setup the 1244 * dependants for the new lock, taking 1245 * into account a possible downgrade 1246 * or unlock. Remove the old lock. 1247 */ 1248 LIST_REMOVE(overlap, lf_link); 1249 lf_update_dependancies(state, overlap, TRUE, 1250 &granted); 1251 lf_free_lock(overlap); 1252 break; 1253 1254 case 2: /* overlap contains lock */ 1255 /* 1256 * Just split the existing lock. 1257 */ 1258 lf_split(state, overlap, lock, &granted); 1259 break; 1260 1261 case 3: /* lock contains overlap */ 1262 /* 1263 * Delete the overlap and advance to 1264 * the next entry in the list. 1265 */ 1266 lf = LIST_NEXT(overlap, lf_link); 1267 LIST_REMOVE(overlap, lf_link); 1268 lf_update_dependancies(state, overlap, TRUE, 1269 &granted); 1270 lf_free_lock(overlap); 1271 overlap = lf; 1272 continue; 1273 1274 case 4: /* overlap starts before lock */ 1275 /* 1276 * Just update the overlap end and 1277 * move on. 1278 */ 1279 lf_set_end(state, overlap, lock->lf_start - 1, 1280 &granted); 1281 overlap = LIST_NEXT(overlap, lf_link); 1282 continue; 1283 1284 case 5: /* overlap ends after lock */ 1285 /* 1286 * Change the start of overlap and 1287 * re-insert. 1288 */ 1289 lf_set_start(state, overlap, lock->lf_end + 1, 1290 &granted); 1291 break; 1292 } 1293 break; 1294 } 1295 #ifdef LOCKF_DEBUG 1296 if (lockf_debug & 1) { 1297 if (lock->lf_type != F_UNLCK) 1298 lf_print("lf_activate_lock: activated", lock); 1299 else 1300 lf_print("lf_activate_lock: unlocked", lock); 1301 lf_printlist("lf_activate_lock", lock); 1302 } 1303 #endif /* LOCKF_DEBUG */ 1304 if (lock->lf_type != F_UNLCK) 1305 lf_insert_lock(state, lock); 1306 } 1307 } 1308 1309 /* 1310 * Cancel a pending lock request, either as a result of a signal or a 1311 * cancel request for an async lock. 1312 */ 1313 static void 1314 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1315 { 1316 struct lockf_entry_list granted; 1317 1318 /* 1319 * Note it is theoretically possible that cancelling this lock 1320 * may allow some other pending lock to become 1321 * active. Consider this case: 1322 * 1323 * Owner Action Result Dependancies 1324 * 1325 * A: lock [0..0] succeeds 1326 * B: lock [2..2] succeeds 1327 * C: lock [1..2] blocked C->B 1328 * D: lock [0..1] blocked C->B,D->A,D->C 1329 * A: unlock [0..0] C->B,D->C 1330 * C: cancel [1..2] 1331 */ 1332 1333 LIST_REMOVE(lock, lf_link); 1334 1335 /* 1336 * Removing out-going edges is simple. 1337 */ 1338 sx_xlock(&lf_owner_graph_lock); 1339 lf_remove_outgoing(lock); 1340 sx_xunlock(&lf_owner_graph_lock); 1341 1342 /* 1343 * Removing in-coming edges may allow some other lock to 1344 * become active - we use lf_update_dependancies to figure 1345 * this out. 1346 */ 1347 LIST_INIT(&granted); 1348 lf_update_dependancies(state, lock, TRUE, &granted); 1349 lf_free_lock(lock); 1350 1351 /* 1352 * Feed any newly active locks to lf_activate_lock. 1353 */ 1354 while (!LIST_EMPTY(&granted)) { 1355 lock = LIST_FIRST(&granted); 1356 LIST_REMOVE(lock, lf_link); 1357 lf_activate_lock(state, lock); 1358 } 1359 } 1360 1361 /* 1362 * Set a byte-range lock. 1363 */ 1364 static int 1365 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1366 void **cookiep) 1367 { 1368 static char lockstr[] = "lockf"; 1369 int priority, error; 1370 1371 #ifdef LOCKF_DEBUG 1372 if (lockf_debug & 1) 1373 lf_print("lf_setlock", lock); 1374 #endif /* LOCKF_DEBUG */ 1375 1376 /* 1377 * Set the priority 1378 */ 1379 priority = PLOCK; 1380 if (lock->lf_type == F_WRLCK) 1381 priority += 4; 1382 if (!(lock->lf_flags & F_NOINTR)) 1383 priority |= PCATCH; 1384 /* 1385 * Scan lock list for this file looking for locks that would block us. 1386 */ 1387 if (lf_getblock(state, lock)) { 1388 /* 1389 * Free the structure and return if nonblocking. 1390 */ 1391 if ((lock->lf_flags & F_WAIT) == 0 1392 && lock->lf_async_task == NULL) { 1393 lf_free_lock(lock); 1394 error = EAGAIN; 1395 goto out; 1396 } 1397 1398 /* 1399 * For flock type locks, we must first remove 1400 * any shared locks that we hold before we sleep 1401 * waiting for an exclusive lock. 1402 */ 1403 if ((lock->lf_flags & F_FLOCK) && 1404 lock->lf_type == F_WRLCK) { 1405 lock->lf_type = F_UNLCK; 1406 lf_activate_lock(state, lock); 1407 lock->lf_type = F_WRLCK; 1408 } 1409 1410 /* 1411 * We are blocked. Create edges to each blocking lock, 1412 * checking for deadlock using the owner graph. For 1413 * simplicity, we run deadlock detection for all 1414 * locks, posix and otherwise. 1415 */ 1416 sx_xlock(&lf_owner_graph_lock); 1417 error = lf_add_outgoing(state, lock); 1418 sx_xunlock(&lf_owner_graph_lock); 1419 1420 if (error) { 1421 #ifdef LOCKF_DEBUG 1422 if (lockf_debug & 1) 1423 lf_print("lf_setlock: deadlock", lock); 1424 #endif 1425 lf_free_lock(lock); 1426 goto out; 1427 } 1428 1429 /* 1430 * We have added edges to everything that blocks 1431 * us. Sleep until they all go away. 1432 */ 1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 1434 #ifdef LOCKF_DEBUG 1435 if (lockf_debug & 1) { 1436 struct lockf_edge *e; 1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1438 lf_print("lf_setlock: blocking on", e->le_to); 1439 lf_printlist("lf_setlock", e->le_to); 1440 } 1441 } 1442 #endif /* LOCKF_DEBUG */ 1443 1444 if ((lock->lf_flags & F_WAIT) == 0) { 1445 /* 1446 * The caller requested async notification - 1447 * this callback happens when the blocking 1448 * lock is released, allowing the caller to 1449 * make another attempt to take the lock. 1450 */ 1451 *cookiep = (void *) lock; 1452 error = EINPROGRESS; 1453 goto out; 1454 } 1455 1456 lock->lf_refs++; 1457 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1458 if (lf_free_lock(lock)) { 1459 error = EINTR; 1460 goto out; 1461 } 1462 1463 /* 1464 * We may have been awakened by a signal and/or by a 1465 * debugger continuing us (in which cases we must 1466 * remove our lock graph edges) and/or by another 1467 * process releasing a lock (in which case our edges 1468 * have already been removed and we have been moved to 1469 * the active list). We may also have been woken by 1470 * lf_purgelocks which we report to the caller as 1471 * EINTR. In that case, lf_purgelocks will have 1472 * removed our lock graph edges. 1473 * 1474 * Note that it is possible to receive a signal after 1475 * we were successfully woken (and moved to the active 1476 * list) but before we resumed execution. In this 1477 * case, our lf_outedges list will be clear. We 1478 * pretend there was no error. 1479 * 1480 * Note also, if we have been sleeping long enough, we 1481 * may now have incoming edges from some newer lock 1482 * which is waiting behind us in the queue. 1483 */ 1484 if (lock->lf_flags & F_INTR) { 1485 error = EINTR; 1486 lf_free_lock(lock); 1487 goto out; 1488 } 1489 if (LIST_EMPTY(&lock->lf_outedges)) { 1490 error = 0; 1491 } else { 1492 lf_cancel_lock(state, lock); 1493 goto out; 1494 } 1495 #ifdef LOCKF_DEBUG 1496 if (lockf_debug & 1) { 1497 lf_print("lf_setlock: granted", lock); 1498 } 1499 #endif 1500 goto out; 1501 } 1502 /* 1503 * It looks like we are going to grant the lock. First add 1504 * edges from any currently pending lock that the new lock 1505 * would block. 1506 */ 1507 sx_xlock(&lf_owner_graph_lock); 1508 error = lf_add_incoming(state, lock); 1509 sx_xunlock(&lf_owner_graph_lock); 1510 if (error) { 1511 #ifdef LOCKF_DEBUG 1512 if (lockf_debug & 1) 1513 lf_print("lf_setlock: deadlock", lock); 1514 #endif 1515 lf_free_lock(lock); 1516 goto out; 1517 } 1518 1519 /* 1520 * No blocks!! Add the lock. Note that we will 1521 * downgrade or upgrade any overlapping locks this 1522 * process already owns. 1523 */ 1524 lf_activate_lock(state, lock); 1525 error = 0; 1526 out: 1527 return (error); 1528 } 1529 1530 /* 1531 * Remove a byte-range lock on an inode. 1532 * 1533 * Generally, find the lock (or an overlap to that lock) 1534 * and remove it (or shrink it), then wakeup anyone we can. 1535 */ 1536 static int 1537 lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 1538 { 1539 struct lockf_entry *overlap; 1540 1541 overlap = LIST_FIRST(&state->ls_active); 1542 1543 if (overlap == NOLOCKF) 1544 return (0); 1545 #ifdef LOCKF_DEBUG 1546 if (unlock->lf_type != F_UNLCK) 1547 panic("lf_clearlock: bad type"); 1548 if (lockf_debug & 1) 1549 lf_print("lf_clearlock", unlock); 1550 #endif /* LOCKF_DEBUG */ 1551 1552 lf_activate_lock(state, unlock); 1553 1554 return (0); 1555 } 1556 1557 /* 1558 * Check whether there is a blocking lock, and if so return its 1559 * details in '*fl'. 1560 */ 1561 static int 1562 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 1563 { 1564 struct lockf_entry *block; 1565 1566 #ifdef LOCKF_DEBUG 1567 if (lockf_debug & 1) 1568 lf_print("lf_getlock", lock); 1569 #endif /* LOCKF_DEBUG */ 1570 1571 if ((block = lf_getblock(state, lock))) { 1572 fl->l_type = block->lf_type; 1573 fl->l_whence = SEEK_SET; 1574 fl->l_start = block->lf_start; 1575 if (block->lf_end == OFF_MAX) 1576 fl->l_len = 0; 1577 else 1578 fl->l_len = block->lf_end - block->lf_start + 1; 1579 fl->l_pid = block->lf_owner->lo_pid; 1580 fl->l_sysid = block->lf_owner->lo_sysid; 1581 } else { 1582 fl->l_type = F_UNLCK; 1583 } 1584 return (0); 1585 } 1586 1587 /* 1588 * Cancel an async lock request. 1589 */ 1590 static int 1591 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1592 { 1593 struct lockf_entry *reallock; 1594 1595 /* 1596 * We need to match this request with an existing lock 1597 * request. 1598 */ 1599 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1600 if ((void *) reallock == cookie) { 1601 /* 1602 * Double-check that this lock looks right 1603 * (maybe use a rolling ID for the cancel 1604 * cookie instead?) 1605 */ 1606 if (!(reallock->lf_vnode == lock->lf_vnode 1607 && reallock->lf_start == lock->lf_start 1608 && reallock->lf_end == lock->lf_end)) { 1609 return (ENOENT); 1610 } 1611 1612 /* 1613 * Make sure this lock was async and then just 1614 * remove it from its wait lists. 1615 */ 1616 if (!reallock->lf_async_task) { 1617 return (ENOENT); 1618 } 1619 1620 /* 1621 * Note that since any other thread must take 1622 * state->ls_lock before it can possibly 1623 * trigger the async callback, we are safe 1624 * from a race with lf_wakeup_lock, i.e. we 1625 * can free the lock (actually our caller does 1626 * this). 1627 */ 1628 lf_cancel_lock(state, reallock); 1629 return (0); 1630 } 1631 } 1632 1633 /* 1634 * We didn't find a matching lock - not much we can do here. 1635 */ 1636 return (ENOENT); 1637 } 1638 1639 /* 1640 * Walk the list of locks for an inode and 1641 * return the first blocking lock. 1642 */ 1643 static struct lockf_entry * 1644 lf_getblock(struct lockf *state, struct lockf_entry *lock) 1645 { 1646 struct lockf_entry *overlap; 1647 1648 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 1649 /* 1650 * We may assume that the active list is sorted by 1651 * lf_start. 1652 */ 1653 if (overlap->lf_start > lock->lf_end) 1654 break; 1655 if (!lf_blocks(lock, overlap)) 1656 continue; 1657 return (overlap); 1658 } 1659 return (NOLOCKF); 1660 } 1661 1662 /* 1663 * Walk the list of locks for an inode to find an overlapping lock (if 1664 * any) and return a classification of that overlap. 1665 * 1666 * Arguments: 1667 * *overlap The place in the lock list to start looking 1668 * lock The lock which is being tested 1669 * type Pass 'SELF' to test only locks with the same 1670 * owner as lock, or 'OTHER' to test only locks 1671 * with a different owner 1672 * 1673 * Returns one of six values: 1674 * 0) no overlap 1675 * 1) overlap == lock 1676 * 2) overlap contains lock 1677 * 3) lock contains overlap 1678 * 4) overlap starts before lock 1679 * 5) overlap ends after lock 1680 * 1681 * If there is an overlapping lock, '*overlap' is set to point at the 1682 * overlapping lock. 1683 * 1684 * NOTE: this returns only the FIRST overlapping lock. There 1685 * may be more than one. 1686 */ 1687 static int 1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 1689 { 1690 struct lockf_entry *lf; 1691 off_t start, end; 1692 int res; 1693 1694 if ((*overlap) == NOLOCKF) { 1695 return (0); 1696 } 1697 #ifdef LOCKF_DEBUG 1698 if (lockf_debug & 2) 1699 lf_print("lf_findoverlap: looking for overlap in", lock); 1700 #endif /* LOCKF_DEBUG */ 1701 start = lock->lf_start; 1702 end = lock->lf_end; 1703 res = 0; 1704 while (*overlap) { 1705 lf = *overlap; 1706 if (lf->lf_start > end) 1707 break; 1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1710 *overlap = LIST_NEXT(lf, lf_link); 1711 continue; 1712 } 1713 #ifdef LOCKF_DEBUG 1714 if (lockf_debug & 2) 1715 lf_print("\tchecking", lf); 1716 #endif /* LOCKF_DEBUG */ 1717 /* 1718 * OK, check for overlap 1719 * 1720 * Six cases: 1721 * 0) no overlap 1722 * 1) overlap == lock 1723 * 2) overlap contains lock 1724 * 3) lock contains overlap 1725 * 4) overlap starts before lock 1726 * 5) overlap ends after lock 1727 */ 1728 if (start > lf->lf_end) { 1729 /* Case 0 */ 1730 #ifdef LOCKF_DEBUG 1731 if (lockf_debug & 2) 1732 printf("no overlap\n"); 1733 #endif /* LOCKF_DEBUG */ 1734 *overlap = LIST_NEXT(lf, lf_link); 1735 continue; 1736 } 1737 if (lf->lf_start == start && lf->lf_end == end) { 1738 /* Case 1 */ 1739 #ifdef LOCKF_DEBUG 1740 if (lockf_debug & 2) 1741 printf("overlap == lock\n"); 1742 #endif /* LOCKF_DEBUG */ 1743 res = 1; 1744 break; 1745 } 1746 if (lf->lf_start <= start && lf->lf_end >= end) { 1747 /* Case 2 */ 1748 #ifdef LOCKF_DEBUG 1749 if (lockf_debug & 2) 1750 printf("overlap contains lock\n"); 1751 #endif /* LOCKF_DEBUG */ 1752 res = 2; 1753 break; 1754 } 1755 if (start <= lf->lf_start && end >= lf->lf_end) { 1756 /* Case 3 */ 1757 #ifdef LOCKF_DEBUG 1758 if (lockf_debug & 2) 1759 printf("lock contains overlap\n"); 1760 #endif /* LOCKF_DEBUG */ 1761 res = 3; 1762 break; 1763 } 1764 if (lf->lf_start < start && lf->lf_end >= start) { 1765 /* Case 4 */ 1766 #ifdef LOCKF_DEBUG 1767 if (lockf_debug & 2) 1768 printf("overlap starts before lock\n"); 1769 #endif /* LOCKF_DEBUG */ 1770 res = 4; 1771 break; 1772 } 1773 if (lf->lf_start > start && lf->lf_end > end) { 1774 /* Case 5 */ 1775 #ifdef LOCKF_DEBUG 1776 if (lockf_debug & 2) 1777 printf("overlap ends after lock\n"); 1778 #endif /* LOCKF_DEBUG */ 1779 res = 5; 1780 break; 1781 } 1782 panic("lf_findoverlap: default"); 1783 } 1784 return (res); 1785 } 1786 1787 /* 1788 * Split an the existing 'lock1', based on the extent of the lock 1789 * described by 'lock2'. The existing lock should cover 'lock2' 1790 * entirely. 1791 * 1792 * Any pending locks which have been been unblocked are added to 1793 * 'granted' 1794 */ 1795 static void 1796 lf_split(struct lockf *state, struct lockf_entry *lock1, 1797 struct lockf_entry *lock2, struct lockf_entry_list *granted) 1798 { 1799 struct lockf_entry *splitlock; 1800 1801 #ifdef LOCKF_DEBUG 1802 if (lockf_debug & 2) { 1803 lf_print("lf_split", lock1); 1804 lf_print("splitting from", lock2); 1805 } 1806 #endif /* LOCKF_DEBUG */ 1807 /* 1808 * Check to see if we don't need to split at all. 1809 */ 1810 if (lock1->lf_start == lock2->lf_start) { 1811 lf_set_start(state, lock1, lock2->lf_end + 1, granted); 1812 return; 1813 } 1814 if (lock1->lf_end == lock2->lf_end) { 1815 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1816 return; 1817 } 1818 /* 1819 * Make a new lock consisting of the last part of 1820 * the encompassing lock. 1821 */ 1822 splitlock = lf_alloc_lock(lock1->lf_owner); 1823 memcpy(splitlock, lock1, sizeof *splitlock); 1824 splitlock->lf_refs = 1; 1825 if (splitlock->lf_flags & F_REMOTE) 1826 vref(splitlock->lf_vnode); 1827 1828 /* 1829 * This cannot cause a deadlock since any edges we would add 1830 * to splitlock already exist in lock1. We must be sure to add 1831 * necessary dependancies to splitlock before we reduce lock1 1832 * otherwise we may accidentally grant a pending lock that 1833 * was blocked by the tail end of lock1. 1834 */ 1835 splitlock->lf_start = lock2->lf_end + 1; 1836 LIST_INIT(&splitlock->lf_outedges); 1837 LIST_INIT(&splitlock->lf_inedges); 1838 sx_xlock(&lf_owner_graph_lock); 1839 lf_add_incoming(state, splitlock); 1840 sx_xunlock(&lf_owner_graph_lock); 1841 1842 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1843 1844 /* 1845 * OK, now link it in 1846 */ 1847 lf_insert_lock(state, splitlock); 1848 } 1849 1850 struct lockdesc { 1851 STAILQ_ENTRY(lockdesc) link; 1852 struct vnode *vp; 1853 struct flock fl; 1854 }; 1855 STAILQ_HEAD(lockdesclist, lockdesc); 1856 1857 int 1858 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) 1859 { 1860 struct lockf *ls; 1861 struct lockf_entry *lf; 1862 struct lockdesc *ldesc; 1863 struct lockdesclist locks; 1864 int error; 1865 1866 /* 1867 * In order to keep the locking simple, we iterate over the 1868 * active lock lists to build a list of locks that need 1869 * releasing. We then call the iterator for each one in turn. 1870 * 1871 * We take an extra reference to the vnode for the duration to 1872 * make sure it doesn't go away before we are finished. 1873 */ 1874 STAILQ_INIT(&locks); 1875 sx_xlock(&lf_lock_states_lock); 1876 LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1877 sx_xlock(&ls->ls_lock); 1878 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1879 if (lf->lf_owner->lo_sysid != sysid) 1880 continue; 1881 1882 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1883 M_WAITOK); 1884 ldesc->vp = lf->lf_vnode; 1885 vref(ldesc->vp); 1886 ldesc->fl.l_start = lf->lf_start; 1887 if (lf->lf_end == OFF_MAX) 1888 ldesc->fl.l_len = 0; 1889 else 1890 ldesc->fl.l_len = 1891 lf->lf_end - lf->lf_start + 1; 1892 ldesc->fl.l_whence = SEEK_SET; 1893 ldesc->fl.l_type = F_UNLCK; 1894 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1895 ldesc->fl.l_sysid = sysid; 1896 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1897 } 1898 sx_xunlock(&ls->ls_lock); 1899 } 1900 sx_xunlock(&lf_lock_states_lock); 1901 1902 /* 1903 * Call the iterator function for each lock in turn. If the 1904 * iterator returns an error code, just free the rest of the 1905 * lockdesc structures. 1906 */ 1907 error = 0; 1908 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1909 STAILQ_REMOVE_HEAD(&locks, link); 1910 if (!error) 1911 error = fn(ldesc->vp, &ldesc->fl, arg); 1912 vrele(ldesc->vp); 1913 free(ldesc, M_LOCKF); 1914 } 1915 1916 return (error); 1917 } 1918 1919 int 1920 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) 1921 { 1922 struct lockf *ls; 1923 struct lockf_entry *lf; 1924 struct lockdesc *ldesc; 1925 struct lockdesclist locks; 1926 int error; 1927 1928 /* 1929 * In order to keep the locking simple, we iterate over the 1930 * active lock lists to build a list of locks that need 1931 * releasing. We then call the iterator for each one in turn. 1932 * 1933 * We take an extra reference to the vnode for the duration to 1934 * make sure it doesn't go away before we are finished. 1935 */ 1936 STAILQ_INIT(&locks); 1937 ls = vp->v_lockf; 1938 if (!ls) 1939 return (0); 1940 1941 sx_xlock(&ls->ls_lock); 1942 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1943 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1944 M_WAITOK); 1945 ldesc->vp = lf->lf_vnode; 1946 vref(ldesc->vp); 1947 ldesc->fl.l_start = lf->lf_start; 1948 if (lf->lf_end == OFF_MAX) 1949 ldesc->fl.l_len = 0; 1950 else 1951 ldesc->fl.l_len = 1952 lf->lf_end - lf->lf_start + 1; 1953 ldesc->fl.l_whence = SEEK_SET; 1954 ldesc->fl.l_type = F_UNLCK; 1955 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1956 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; 1957 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1958 } 1959 sx_xunlock(&ls->ls_lock); 1960 1961 /* 1962 * Call the iterator function for each lock in turn. If the 1963 * iterator returns an error code, just free the rest of the 1964 * lockdesc structures. 1965 */ 1966 error = 0; 1967 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1968 STAILQ_REMOVE_HEAD(&locks, link); 1969 if (!error) 1970 error = fn(ldesc->vp, &ldesc->fl, arg); 1971 vrele(ldesc->vp); 1972 free(ldesc, M_LOCKF); 1973 } 1974 1975 return (error); 1976 } 1977 1978 static int 1979 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) 1980 { 1981 1982 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); 1983 return (0); 1984 } 1985 1986 void 1987 lf_clearremotesys(int sysid) 1988 { 1989 1990 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1991 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); 1992 } 1993 1994 int 1995 lf_countlocks(int sysid) 1996 { 1997 int i; 1998 struct lock_owner *lo; 1999 int count; 2000 2001 count = 0; 2002 sx_xlock(&lf_lock_owners_lock); 2003 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 2004 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 2005 if (lo->lo_sysid == sysid) 2006 count += lo->lo_refs; 2007 sx_xunlock(&lf_lock_owners_lock); 2008 2009 return (count); 2010 } 2011 2012 #ifdef LOCKF_DEBUG 2013 2014 /* 2015 * Return non-zero if y is reachable from x using a brute force 2016 * search. If reachable and path is non-null, return the route taken 2017 * in path. 2018 */ 2019 static int 2020 graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 2021 struct owner_vertex_list *path) 2022 { 2023 struct owner_edge *e; 2024 2025 if (x == y) { 2026 if (path) 2027 TAILQ_INSERT_HEAD(path, x, v_link); 2028 return 1; 2029 } 2030 2031 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2032 if (graph_reaches(e->e_to, y, path)) { 2033 if (path) 2034 TAILQ_INSERT_HEAD(path, x, v_link); 2035 return 1; 2036 } 2037 } 2038 return 0; 2039 } 2040 2041 /* 2042 * Perform consistency checks on the graph. Make sure the values of 2043 * v_order are correct. If checkorder is non-zero, check no vertex can 2044 * reach any other vertex with a smaller order. 2045 */ 2046 static void 2047 graph_check(struct owner_graph *g, int checkorder) 2048 { 2049 int i, j; 2050 2051 for (i = 0; i < g->g_size; i++) { 2052 if (!g->g_vertices[i]->v_owner) 2053 continue; 2054 KASSERT(g->g_vertices[i]->v_order == i, 2055 ("lock graph vertices disordered")); 2056 if (checkorder) { 2057 for (j = 0; j < i; j++) { 2058 if (!g->g_vertices[j]->v_owner) 2059 continue; 2060 KASSERT(!graph_reaches(g->g_vertices[i], 2061 g->g_vertices[j], NULL), 2062 ("lock graph vertices disordered")); 2063 } 2064 } 2065 } 2066 } 2067 2068 static void 2069 graph_print_vertices(struct owner_vertex_list *set) 2070 { 2071 struct owner_vertex *v; 2072 2073 printf("{ "); 2074 TAILQ_FOREACH(v, set, v_link) { 2075 printf("%d:", v->v_order); 2076 lf_print_owner(v->v_owner); 2077 if (TAILQ_NEXT(v, v_link)) 2078 printf(", "); 2079 } 2080 printf(" }\n"); 2081 } 2082 2083 #endif 2084 2085 /* 2086 * Calculate the sub-set of vertices v from the affected region [y..x] 2087 * where v is reachable from y. Return -1 if a loop was detected 2088 * (i.e. x is reachable from y, otherwise the number of vertices in 2089 * this subset. 2090 */ 2091 static int 2092 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 2093 struct owner_vertex *y, struct owner_vertex_list *delta) 2094 { 2095 uint32_t gen; 2096 struct owner_vertex *v; 2097 struct owner_edge *e; 2098 int n; 2099 2100 /* 2101 * We start with a set containing just y. Then for each vertex 2102 * v in the set so far unprocessed, we add each vertex that v 2103 * has an out-edge to and that is within the affected region 2104 * [y..x]. If we see the vertex x on our travels, stop 2105 * immediately. 2106 */ 2107 TAILQ_INIT(delta); 2108 TAILQ_INSERT_TAIL(delta, y, v_link); 2109 v = y; 2110 n = 1; 2111 gen = g->g_gen; 2112 while (v) { 2113 LIST_FOREACH(e, &v->v_outedges, e_outlink) { 2114 if (e->e_to == x) 2115 return -1; 2116 if (e->e_to->v_order < x->v_order 2117 && e->e_to->v_gen != gen) { 2118 e->e_to->v_gen = gen; 2119 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2120 n++; 2121 } 2122 } 2123 v = TAILQ_NEXT(v, v_link); 2124 } 2125 2126 return (n); 2127 } 2128 2129 /* 2130 * Calculate the sub-set of vertices v from the affected region [y..x] 2131 * where v reaches x. Return the number of vertices in this subset. 2132 */ 2133 static int 2134 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2135 struct owner_vertex *y, struct owner_vertex_list *delta) 2136 { 2137 uint32_t gen; 2138 struct owner_vertex *v; 2139 struct owner_edge *e; 2140 int n; 2141 2142 /* 2143 * We start with a set containing just x. Then for each vertex 2144 * v in the set so far unprocessed, we add each vertex that v 2145 * has an in-edge from and that is within the affected region 2146 * [y..x]. 2147 */ 2148 TAILQ_INIT(delta); 2149 TAILQ_INSERT_TAIL(delta, x, v_link); 2150 v = x; 2151 n = 1; 2152 gen = g->g_gen; 2153 while (v) { 2154 LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2155 if (e->e_from->v_order > y->v_order 2156 && e->e_from->v_gen != gen) { 2157 e->e_from->v_gen = gen; 2158 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2159 n++; 2160 } 2161 } 2162 v = TAILQ_PREV(v, owner_vertex_list, v_link); 2163 } 2164 2165 return (n); 2166 } 2167 2168 static int 2169 graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2170 { 2171 struct owner_vertex *v; 2172 int i, j; 2173 2174 TAILQ_FOREACH(v, set, v_link) { 2175 for (i = n; 2176 i > 0 && indices[i - 1] > v->v_order; i--) 2177 ; 2178 for (j = n - 1; j >= i; j--) 2179 indices[j + 1] = indices[j]; 2180 indices[i] = v->v_order; 2181 n++; 2182 } 2183 2184 return (n); 2185 } 2186 2187 static int 2188 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2189 struct owner_vertex_list *set) 2190 { 2191 struct owner_vertex *v, *vlowest; 2192 2193 while (!TAILQ_EMPTY(set)) { 2194 vlowest = NULL; 2195 TAILQ_FOREACH(v, set, v_link) { 2196 if (!vlowest || v->v_order < vlowest->v_order) 2197 vlowest = v; 2198 } 2199 TAILQ_REMOVE(set, vlowest, v_link); 2200 vlowest->v_order = indices[nextunused]; 2201 g->g_vertices[vlowest->v_order] = vlowest; 2202 nextunused++; 2203 } 2204 2205 return (nextunused); 2206 } 2207 2208 static int 2209 graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2210 struct owner_vertex *y) 2211 { 2212 struct owner_edge *e; 2213 struct owner_vertex_list deltaF, deltaB; 2214 int nF, nB, n, vi, i; 2215 int *indices; 2216 2217 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2218 2219 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2220 if (e->e_to == y) { 2221 e->e_refs++; 2222 return (0); 2223 } 2224 } 2225 2226 #ifdef LOCKF_DEBUG 2227 if (lockf_debug & 8) { 2228 printf("adding edge %d:", x->v_order); 2229 lf_print_owner(x->v_owner); 2230 printf(" -> %d:", y->v_order); 2231 lf_print_owner(y->v_owner); 2232 printf("\n"); 2233 } 2234 #endif 2235 if (y->v_order < x->v_order) { 2236 /* 2237 * The new edge violates the order. First find the set 2238 * of affected vertices reachable from y (deltaF) and 2239 * the set of affect vertices affected that reach x 2240 * (deltaB), using the graph generation number to 2241 * detect whether we have visited a given vertex 2242 * already. We re-order the graph so that each vertex 2243 * in deltaB appears before each vertex in deltaF. 2244 * 2245 * If x is a member of deltaF, then the new edge would 2246 * create a cycle. Otherwise, we may assume that 2247 * deltaF and deltaB are disjoint. 2248 */ 2249 g->g_gen++; 2250 if (g->g_gen == 0) { 2251 /* 2252 * Generation wrap. 2253 */ 2254 for (vi = 0; vi < g->g_size; vi++) { 2255 g->g_vertices[vi]->v_gen = 0; 2256 } 2257 g->g_gen++; 2258 } 2259 nF = graph_delta_forward(g, x, y, &deltaF); 2260 if (nF < 0) { 2261 #ifdef LOCKF_DEBUG 2262 if (lockf_debug & 8) { 2263 struct owner_vertex_list path; 2264 printf("deadlock: "); 2265 TAILQ_INIT(&path); 2266 graph_reaches(y, x, &path); 2267 graph_print_vertices(&path); 2268 } 2269 #endif 2270 return (EDEADLK); 2271 } 2272 2273 #ifdef LOCKF_DEBUG 2274 if (lockf_debug & 8) { 2275 printf("re-ordering graph vertices\n"); 2276 printf("deltaF = "); 2277 graph_print_vertices(&deltaF); 2278 } 2279 #endif 2280 2281 nB = graph_delta_backward(g, x, y, &deltaB); 2282 2283 #ifdef LOCKF_DEBUG 2284 if (lockf_debug & 8) { 2285 printf("deltaB = "); 2286 graph_print_vertices(&deltaB); 2287 } 2288 #endif 2289 2290 /* 2291 * We first build a set of vertex indices (vertex 2292 * order values) that we may use, then we re-assign 2293 * orders first to those vertices in deltaB, then to 2294 * deltaF. Note that the contents of deltaF and deltaB 2295 * may be partially disordered - we perform an 2296 * insertion sort while building our index set. 2297 */ 2298 indices = g->g_indexbuf; 2299 n = graph_add_indices(indices, 0, &deltaF); 2300 graph_add_indices(indices, n, &deltaB); 2301 2302 /* 2303 * We must also be sure to maintain the relative 2304 * ordering of deltaF and deltaB when re-assigning 2305 * vertices. We do this by iteratively removing the 2306 * lowest ordered element from the set and assigning 2307 * it the next value from our new ordering. 2308 */ 2309 i = graph_assign_indices(g, indices, 0, &deltaB); 2310 graph_assign_indices(g, indices, i, &deltaF); 2311 2312 #ifdef LOCKF_DEBUG 2313 if (lockf_debug & 8) { 2314 struct owner_vertex_list set; 2315 TAILQ_INIT(&set); 2316 for (i = 0; i < nB + nF; i++) 2317 TAILQ_INSERT_TAIL(&set, 2318 g->g_vertices[indices[i]], v_link); 2319 printf("new ordering = "); 2320 graph_print_vertices(&set); 2321 } 2322 #endif 2323 } 2324 2325 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2326 2327 #ifdef LOCKF_DEBUG 2328 if (lockf_debug & 8) { 2329 graph_check(g, TRUE); 2330 } 2331 #endif 2332 2333 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2334 2335 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2336 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2337 e->e_refs = 1; 2338 e->e_from = x; 2339 e->e_to = y; 2340 2341 return (0); 2342 } 2343 2344 /* 2345 * Remove an edge x->y from the graph. 2346 */ 2347 static void 2348 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2349 struct owner_vertex *y) 2350 { 2351 struct owner_edge *e; 2352 2353 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2354 2355 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2356 if (e->e_to == y) 2357 break; 2358 } 2359 KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2360 2361 e->e_refs--; 2362 if (e->e_refs == 0) { 2363 #ifdef LOCKF_DEBUG 2364 if (lockf_debug & 8) { 2365 printf("removing edge %d:", x->v_order); 2366 lf_print_owner(x->v_owner); 2367 printf(" -> %d:", y->v_order); 2368 lf_print_owner(y->v_owner); 2369 printf("\n"); 2370 } 2371 #endif 2372 LIST_REMOVE(e, e_outlink); 2373 LIST_REMOVE(e, e_inlink); 2374 free(e, M_LOCKF); 2375 } 2376 } 2377 2378 /* 2379 * Allocate a vertex from the free list. Return ENOMEM if there are 2380 * none. 2381 */ 2382 static struct owner_vertex * 2383 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2384 { 2385 struct owner_vertex *v; 2386 2387 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2388 2389 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2390 if (g->g_size == g->g_space) { 2391 g->g_vertices = realloc(g->g_vertices, 2392 2 * g->g_space * sizeof(struct owner_vertex *), 2393 M_LOCKF, M_WAITOK); 2394 free(g->g_indexbuf, M_LOCKF); 2395 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2396 M_LOCKF, M_WAITOK); 2397 g->g_space = 2 * g->g_space; 2398 } 2399 v->v_order = g->g_size; 2400 v->v_gen = g->g_gen; 2401 g->g_vertices[g->g_size] = v; 2402 g->g_size++; 2403 2404 LIST_INIT(&v->v_outedges); 2405 LIST_INIT(&v->v_inedges); 2406 v->v_owner = lo; 2407 2408 return (v); 2409 } 2410 2411 static void 2412 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2413 { 2414 struct owner_vertex *w; 2415 int i; 2416 2417 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2418 2419 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2420 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2421 2422 /* 2423 * Remove from the graph's array and close up the gap, 2424 * renumbering the other vertices. 2425 */ 2426 for (i = v->v_order + 1; i < g->g_size; i++) { 2427 w = g->g_vertices[i]; 2428 w->v_order--; 2429 g->g_vertices[i - 1] = w; 2430 } 2431 g->g_size--; 2432 2433 free(v, M_LOCKF); 2434 } 2435 2436 static struct owner_graph * 2437 graph_init(struct owner_graph *g) 2438 { 2439 2440 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2441 M_LOCKF, M_WAITOK); 2442 g->g_size = 0; 2443 g->g_space = 10; 2444 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2445 g->g_gen = 0; 2446 2447 return (g); 2448 } 2449 2450 #ifdef LOCKF_DEBUG 2451 /* 2452 * Print description of a lock owner 2453 */ 2454 static void 2455 lf_print_owner(struct lock_owner *lo) 2456 { 2457 2458 if (lo->lo_flags & F_REMOTE) { 2459 printf("remote pid %d, system %d", 2460 lo->lo_pid, lo->lo_sysid); 2461 } else if (lo->lo_flags & F_FLOCK) { 2462 printf("file %p", lo->lo_id); 2463 } else { 2464 printf("local pid %d", lo->lo_pid); 2465 } 2466 } 2467 2468 /* 2469 * Print out a lock. 2470 */ 2471 static void 2472 lf_print(char *tag, struct lockf_entry *lock) 2473 { 2474 2475 printf("%s: lock %p for ", tag, (void *)lock); 2476 lf_print_owner(lock->lf_owner); 2477 if (lock->lf_inode != (struct inode *)0) 2478 printf(" in ino %ju on dev <%s>,", 2479 (uintmax_t)lock->lf_inode->i_number, 2480 devtoname(lock->lf_inode->i_dev)); 2481 printf(" %s, start %jd, end ", 2482 lock->lf_type == F_RDLCK ? "shared" : 2483 lock->lf_type == F_WRLCK ? "exclusive" : 2484 lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2485 (intmax_t)lock->lf_start); 2486 if (lock->lf_end == OFF_MAX) 2487 printf("EOF"); 2488 else 2489 printf("%jd", (intmax_t)lock->lf_end); 2490 if (!LIST_EMPTY(&lock->lf_outedges)) 2491 printf(" block %p\n", 2492 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 2493 else 2494 printf("\n"); 2495 } 2496 2497 static void 2498 lf_printlist(char *tag, struct lockf_entry *lock) 2499 { 2500 struct lockf_entry *lf, *blk; 2501 struct lockf_edge *e; 2502 2503 if (lock->lf_inode == (struct inode *)0) 2504 return; 2505 2506 printf("%s: Lock list for ino %ju on dev <%s>:\n", 2507 tag, (uintmax_t)lock->lf_inode->i_number, 2508 devtoname(lock->lf_inode->i_dev)); 2509 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 2510 printf("\tlock %p for ",(void *)lf); 2511 lf_print_owner(lock->lf_owner); 2512 printf(", %s, start %jd, end %jd", 2513 lf->lf_type == F_RDLCK ? "shared" : 2514 lf->lf_type == F_WRLCK ? "exclusive" : 2515 lf->lf_type == F_UNLCK ? "unlock" : 2516 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2517 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2518 blk = e->le_to; 2519 printf("\n\t\tlock request %p for ", (void *)blk); 2520 lf_print_owner(blk->lf_owner); 2521 printf(", %s, start %jd, end %jd", 2522 blk->lf_type == F_RDLCK ? "shared" : 2523 blk->lf_type == F_WRLCK ? "exclusive" : 2524 blk->lf_type == F_UNLCK ? "unlock" : 2525 "unknown", (intmax_t)blk->lf_start, 2526 (intmax_t)blk->lf_end); 2527 if (!LIST_EMPTY(&blk->lf_inedges)) 2528 panic("lf_printlist: bad list"); 2529 } 2530 printf("\n"); 2531 } 2532 } 2533 #endif /* LOCKF_DEBUG */ 2534