1 /*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 /*- 28 * Copyright (c) 1982, 1986, 1989, 1993 29 * The Regents of the University of California. All rights reserved. 30 * 31 * This code is derived from software contributed to Berkeley by 32 * Scooter Morris at Genentech Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 4. Neither the name of the University nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 59 */ 60 61 #include <sys/cdefs.h> 62 __FBSDID("$FreeBSD$"); 63 64 #include "opt_debug_lockf.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/hash.h> 69 #include <sys/kernel.h> 70 #include <sys/limits.h> 71 #include <sys/lock.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sx.h> 76 #include <sys/unistd.h> 77 #include <sys/vnode.h> 78 #include <sys/malloc.h> 79 #include <sys/fcntl.h> 80 #include <sys/lockf.h> 81 #include <sys/taskqueue.h> 82 83 #ifdef LOCKF_DEBUG 84 #include <sys/sysctl.h> 85 86 #include <ufs/ufs/quota.h> 87 #include <ufs/ufs/inode.h> 88 89 static int lockf_debug = 0; /* control debug output */ 90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 91 #endif 92 93 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 94 95 struct owner_edge; 96 struct owner_vertex; 97 struct owner_vertex_list; 98 struct owner_graph; 99 100 #define NOLOCKF (struct lockf_entry *)0 101 #define SELF 0x1 102 #define OTHERS 0x2 103 static void lf_init(void *); 104 static int lf_hash_owner(caddr_t, struct flock *, int); 105 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106 int); 107 static struct lockf_entry * 108 lf_alloc_lock(struct lock_owner *); 109 static void lf_free_lock(struct lockf_entry *); 110 static int lf_clearlock(struct lockf *, struct lockf_entry *); 111 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112 static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113 static void lf_free_edge(struct lockf_edge *); 114 static struct lockf_edge * 115 lf_alloc_edge(void); 116 static void lf_alloc_vertex(struct lockf_entry *); 117 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118 static void lf_remove_edge(struct lockf_edge *); 119 static void lf_remove_outgoing(struct lockf_entry *); 120 static void lf_remove_incoming(struct lockf_entry *); 121 static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122 static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124 int); 125 static struct lockf_entry * 126 lf_getblock(struct lockf *, struct lockf_entry *); 127 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128 static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130 static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131 int all, struct lockf_entry_list *); 132 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133 struct lockf_entry_list*); 134 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135 struct lockf_entry_list*); 136 static int lf_setlock(struct lockf *, struct lockf_entry *, 137 struct vnode *, void **cookiep); 138 static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139 static void lf_split(struct lockf *, struct lockf_entry *, 140 struct lockf_entry *, struct lockf_entry_list *); 141 #ifdef LOCKF_DEBUG 142 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143 struct owner_vertex_list *path); 144 static void graph_check(struct owner_graph *g, int checkorder); 145 static void graph_print_vertices(struct owner_vertex_list *set); 146 #endif 147 static int graph_delta_forward(struct owner_graph *g, 148 struct owner_vertex *x, struct owner_vertex *y, 149 struct owner_vertex_list *delta); 150 static int graph_delta_backward(struct owner_graph *g, 151 struct owner_vertex *x, struct owner_vertex *y, 152 struct owner_vertex_list *delta); 153 static int graph_add_indices(int *indices, int n, 154 struct owner_vertex_list *set); 155 static int graph_assign_indices(struct owner_graph *g, int *indices, 156 int nextunused, struct owner_vertex_list *set); 157 static int graph_add_edge(struct owner_graph *g, 158 struct owner_vertex *x, struct owner_vertex *y); 159 static void graph_remove_edge(struct owner_graph *g, 160 struct owner_vertex *x, struct owner_vertex *y); 161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162 struct lock_owner *lo); 163 static void graph_free_vertex(struct owner_graph *g, 164 struct owner_vertex *v); 165 static struct owner_graph * graph_init(struct owner_graph *g); 166 #ifdef LOCKF_DEBUG 167 static void lf_print(char *, struct lockf_entry *); 168 static void lf_printlist(char *, struct lockf_entry *); 169 static void lf_print_owner(struct lock_owner *); 170 #endif 171 172 /* 173 * This structure is used to keep track of both local and remote lock 174 * owners. The lf_owner field of the struct lockf_entry points back at 175 * the lock owner structure. Each possible lock owner (local proc for 176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177 * pair for remote locks) is represented by a unique instance of 178 * struct lock_owner. 179 * 180 * If a lock owner has a lock that blocks some other lock or a lock 181 * that is waiting for some other lock, it also has a vertex in the 182 * owner_graph below. 183 * 184 * Locks: 185 * (s) locked by state->ls_lock 186 * (S) locked by lf_lock_states_lock 187 * (l) locked by lf_lock_owners_lock 188 * (g) locked by lf_owner_graph_lock 189 * (c) const until freeing 190 */ 191 #define LOCK_OWNER_HASH_SIZE 256 192 193 struct lock_owner { 194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195 int lo_refs; /* (l) Number of locks referring to this */ 196 int lo_flags; /* (c) Flags passwd to lf_advlock */ 197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198 pid_t lo_pid; /* (c) Process Id of the lock owner */ 199 int lo_sysid; /* (c) System Id of the lock owner */ 200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201 }; 202 203 LIST_HEAD(lock_owner_list, lock_owner); 204 205 static struct sx lf_lock_states_lock; 206 static struct lockf_list lf_lock_states; /* (S) */ 207 static struct sx lf_lock_owners_lock; 208 static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209 210 /* 211 * Structures for deadlock detection. 212 * 213 * We have two types of directed graph, the first is the set of locks, 214 * both active and pending on a vnode. Within this graph, active locks 215 * are terminal nodes in the graph (i.e. have no out-going 216 * edges). Pending locks have out-going edges to each blocking active 217 * lock that prevents the lock from being granted and also to each 218 * older pending lock that would block them if it was active. The 219 * graph for each vnode is naturally acyclic; new edges are only ever 220 * added to or from new nodes (either new pending locks which only add 221 * out-going edges or new active locks which only add in-coming edges) 222 * therefore they cannot create loops in the lock graph. 223 * 224 * The second graph is a global graph of lock owners. Each lock owner 225 * is a vertex in that graph and an edge is added to the graph 226 * whenever an edge is added to a vnode graph, with end points 227 * corresponding to owner of the new pending lock and the owner of the 228 * lock upon which it waits. In order to prevent deadlock, we only add 229 * an edge to this graph if the new edge would not create a cycle. 230 * 231 * The lock owner graph is topologically sorted, i.e. if a node has 232 * any outgoing edges, then it has an order strictly less than any 233 * node to which it has an outgoing edge. We preserve this ordering 234 * (and detect cycles) on edge insertion using Algorithm PK from the 235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237 * No. 1.7) 238 */ 239 struct owner_vertex; 240 241 struct owner_edge { 242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244 int e_refs; /* (g) number of times added */ 245 struct owner_vertex *e_from; /* (c) out-going from here */ 246 struct owner_vertex *e_to; /* (c) in-coming to here */ 247 }; 248 LIST_HEAD(owner_edge_list, owner_edge); 249 250 struct owner_vertex { 251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252 uint32_t v_gen; /* (g) workspace for edge insertion */ 253 int v_order; /* (g) order of vertex in graph */ 254 struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255 struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256 struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257 }; 258 TAILQ_HEAD(owner_vertex_list, owner_vertex); 259 260 struct owner_graph { 261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262 int g_size; /* (g) number of vertices */ 263 int g_space; /* (g) space allocated for vertices */ 264 int *g_indexbuf; /* (g) workspace for loop detection */ 265 uint32_t g_gen; /* (g) increment when re-ordering */ 266 }; 267 268 static struct sx lf_owner_graph_lock; 269 static struct owner_graph lf_owner_graph; 270 271 /* 272 * Initialise various structures and locks. 273 */ 274 static void 275 lf_init(void *dummy) 276 { 277 int i; 278 279 sx_init(&lf_lock_states_lock, "lock states lock"); 280 LIST_INIT(&lf_lock_states); 281 282 sx_init(&lf_lock_owners_lock, "lock owners lock"); 283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284 LIST_INIT(&lf_lock_owners[i]); 285 286 sx_init(&lf_owner_graph_lock, "owner graph lock"); 287 graph_init(&lf_owner_graph); 288 } 289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290 291 /* 292 * Generate a hash value for a lock owner. 293 */ 294 static int 295 lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296 { 297 uint32_t h; 298 299 if (flags & F_REMOTE) { 300 h = HASHSTEP(0, fl->l_pid); 301 h = HASHSTEP(h, fl->l_sysid); 302 } else if (flags & F_FLOCK) { 303 h = ((uintptr_t) id) >> 7; 304 } else { 305 struct proc *p = (struct proc *) id; 306 h = HASHSTEP(0, p->p_pid); 307 h = HASHSTEP(h, 0); 308 } 309 310 return (h % LOCK_OWNER_HASH_SIZE); 311 } 312 313 /* 314 * Return true if a lock owner matches the details passed to 315 * lf_advlock. 316 */ 317 static int 318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319 int flags) 320 { 321 if (flags & F_REMOTE) { 322 return lo->lo_pid == fl->l_pid 323 && lo->lo_sysid == fl->l_sysid; 324 } else { 325 return lo->lo_id == id; 326 } 327 } 328 329 static struct lockf_entry * 330 lf_alloc_lock(struct lock_owner *lo) 331 { 332 struct lockf_entry *lf; 333 334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335 336 #ifdef LOCKF_DEBUG 337 if (lockf_debug & 4) 338 printf("Allocated lock %p\n", lf); 339 #endif 340 if (lo) { 341 sx_xlock(&lf_lock_owners_lock); 342 lo->lo_refs++; 343 sx_xunlock(&lf_lock_owners_lock); 344 lf->lf_owner = lo; 345 } 346 347 return (lf); 348 } 349 350 static void 351 lf_free_lock(struct lockf_entry *lock) 352 { 353 /* 354 * Adjust the lock_owner reference count and 355 * reclaim the entry if this is the last lock 356 * for that owner. 357 */ 358 struct lock_owner *lo = lock->lf_owner; 359 if (lo) { 360 KASSERT(LIST_EMPTY(&lock->lf_outedges), 361 ("freeing lock with dependancies")); 362 KASSERT(LIST_EMPTY(&lock->lf_inedges), 363 ("freeing lock with dependants")); 364 sx_xlock(&lf_lock_owners_lock); 365 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366 lo->lo_refs--; 367 if (lo->lo_refs == 0) { 368 #ifdef LOCKF_DEBUG 369 if (lockf_debug & 1) 370 printf("lf_free_lock: freeing lock owner %p\n", 371 lo); 372 #endif 373 if (lo->lo_vertex) { 374 sx_xlock(&lf_owner_graph_lock); 375 graph_free_vertex(&lf_owner_graph, 376 lo->lo_vertex); 377 sx_xunlock(&lf_owner_graph_lock); 378 } 379 LIST_REMOVE(lo, lo_link); 380 free(lo, M_LOCKF); 381 #ifdef LOCKF_DEBUG 382 if (lockf_debug & 4) 383 printf("Freed lock owner %p\n", lo); 384 #endif 385 } 386 sx_unlock(&lf_lock_owners_lock); 387 } 388 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389 vrele(lock->lf_vnode); 390 lock->lf_vnode = NULL; 391 } 392 #ifdef LOCKF_DEBUG 393 if (lockf_debug & 4) 394 printf("Freed lock %p\n", lock); 395 #endif 396 free(lock, M_LOCKF); 397 } 398 399 /* 400 * Advisory record locking support 401 */ 402 int 403 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404 u_quad_t size) 405 { 406 struct lockf *state, *freestate = NULL; 407 struct flock *fl = ap->a_fl; 408 struct lockf_entry *lock; 409 struct vnode *vp = ap->a_vp; 410 caddr_t id = ap->a_id; 411 int flags = ap->a_flags; 412 int hash; 413 struct lock_owner *lo; 414 off_t start, end, oadd; 415 int error; 416 417 /* 418 * Handle the F_UNLKSYS case first - no need to mess about 419 * creating a lock owner for this one. 420 */ 421 if (ap->a_op == F_UNLCKSYS) { 422 lf_clearremotesys(fl->l_sysid); 423 return (0); 424 } 425 426 /* 427 * Convert the flock structure into a start and end. 428 */ 429 switch (fl->l_whence) { 430 431 case SEEK_SET: 432 case SEEK_CUR: 433 /* 434 * Caller is responsible for adding any necessary offset 435 * when SEEK_CUR is used. 436 */ 437 start = fl->l_start; 438 break; 439 440 case SEEK_END: 441 if (size > OFF_MAX || 442 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443 return (EOVERFLOW); 444 start = size + fl->l_start; 445 break; 446 447 default: 448 return (EINVAL); 449 } 450 if (start < 0) 451 return (EINVAL); 452 if (fl->l_len < 0) { 453 if (start == 0) 454 return (EINVAL); 455 end = start - 1; 456 start += fl->l_len; 457 if (start < 0) 458 return (EINVAL); 459 } else if (fl->l_len == 0) { 460 end = OFF_MAX; 461 } else { 462 oadd = fl->l_len - 1; 463 if (oadd > OFF_MAX - start) 464 return (EOVERFLOW); 465 end = start + oadd; 466 } 467 /* 468 * Avoid the common case of unlocking when inode has no locks. 469 */ 470 VI_LOCK(vp); 471 if ((*statep) == NULL) { 472 if (ap->a_op != F_SETLK) { 473 fl->l_type = F_UNLCK; 474 VI_UNLOCK(vp); 475 return (0); 476 } 477 } 478 VI_UNLOCK(vp); 479 480 /* 481 * Map our arguments to an existing lock owner or create one 482 * if this is the first time we have seen this owner. 483 */ 484 hash = lf_hash_owner(id, fl, flags); 485 sx_xlock(&lf_lock_owners_lock); 486 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 487 if (lf_owner_matches(lo, id, fl, flags)) 488 break; 489 if (!lo) { 490 /* 491 * We initialise the lock with a reference 492 * count which matches the new lockf_entry 493 * structure created below. 494 */ 495 lo = malloc(sizeof(struct lock_owner), M_LOCKF, 496 M_WAITOK|M_ZERO); 497 #ifdef LOCKF_DEBUG 498 if (lockf_debug & 4) 499 printf("Allocated lock owner %p\n", lo); 500 #endif 501 502 lo->lo_refs = 1; 503 lo->lo_flags = flags; 504 lo->lo_id = id; 505 if (flags & F_REMOTE) { 506 lo->lo_pid = fl->l_pid; 507 lo->lo_sysid = fl->l_sysid; 508 } else if (flags & F_FLOCK) { 509 lo->lo_pid = -1; 510 lo->lo_sysid = 0; 511 } else { 512 struct proc *p = (struct proc *) id; 513 lo->lo_pid = p->p_pid; 514 lo->lo_sysid = 0; 515 } 516 lo->lo_vertex = NULL; 517 518 #ifdef LOCKF_DEBUG 519 if (lockf_debug & 1) { 520 printf("lf_advlockasync: new lock owner %p ", lo); 521 lf_print_owner(lo); 522 printf("\n"); 523 } 524 #endif 525 526 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 527 } else { 528 /* 529 * We have seen this lock owner before, increase its 530 * reference count to account for the new lockf_entry 531 * structure we create below. 532 */ 533 lo->lo_refs++; 534 } 535 sx_xunlock(&lf_lock_owners_lock); 536 537 /* 538 * Create the lockf structure. We initialise the lf_owner 539 * field here instead of in lf_alloc_lock() to avoid paying 540 * the lf_lock_owners_lock tax twice. 541 */ 542 lock = lf_alloc_lock(NULL); 543 lock->lf_start = start; 544 lock->lf_end = end; 545 lock->lf_owner = lo; 546 lock->lf_vnode = vp; 547 if (flags & F_REMOTE) { 548 /* 549 * For remote locks, the caller may release its ref to 550 * the vnode at any time - we have to ref it here to 551 * prevent it from being recycled unexpectedly. 552 */ 553 vref(vp); 554 } 555 556 /* 557 * XXX The problem is that VTOI is ufs specific, so it will 558 * break LOCKF_DEBUG for all other FS's other than UFS because 559 * it casts the vnode->data ptr to struct inode *. 560 */ 561 /* lock->lf_inode = VTOI(ap->a_vp); */ 562 lock->lf_inode = (struct inode *)0; 563 lock->lf_type = fl->l_type; 564 LIST_INIT(&lock->lf_outedges); 565 LIST_INIT(&lock->lf_inedges); 566 lock->lf_async_task = ap->a_task; 567 lock->lf_flags = ap->a_flags; 568 569 /* 570 * Do the requested operation. First find our state structure 571 * and create a new one if necessary - the caller's *statep 572 * variable and the state's ls_threads count is protected by 573 * the vnode interlock. 574 */ 575 VI_LOCK(vp); 576 if (vp->v_iflag & VI_DOOMED) { 577 VI_UNLOCK(vp); 578 lf_free_lock(lock); 579 return (ENOENT); 580 } 581 582 /* 583 * Allocate a state structure if necessary. 584 */ 585 state = *statep; 586 if (state == NULL) { 587 struct lockf *ls; 588 589 VI_UNLOCK(vp); 590 591 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 592 sx_init(&ls->ls_lock, "ls_lock"); 593 LIST_INIT(&ls->ls_active); 594 LIST_INIT(&ls->ls_pending); 595 ls->ls_threads = 1; 596 597 sx_xlock(&lf_lock_states_lock); 598 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 599 sx_xunlock(&lf_lock_states_lock); 600 601 /* 602 * Cope if we lost a race with some other thread while 603 * trying to allocate memory. 604 */ 605 VI_LOCK(vp); 606 if (vp->v_iflag & VI_DOOMED) { 607 VI_UNLOCK(vp); 608 sx_xlock(&lf_lock_states_lock); 609 LIST_REMOVE(ls, ls_link); 610 sx_xunlock(&lf_lock_states_lock); 611 sx_destroy(&ls->ls_lock); 612 free(ls, M_LOCKF); 613 lf_free_lock(lock); 614 return (ENOENT); 615 } 616 if ((*statep) == NULL) { 617 state = *statep = ls; 618 VI_UNLOCK(vp); 619 } else { 620 state = *statep; 621 state->ls_threads++; 622 VI_UNLOCK(vp); 623 624 sx_xlock(&lf_lock_states_lock); 625 LIST_REMOVE(ls, ls_link); 626 sx_xunlock(&lf_lock_states_lock); 627 sx_destroy(&ls->ls_lock); 628 free(ls, M_LOCKF); 629 } 630 } else { 631 state->ls_threads++; 632 VI_UNLOCK(vp); 633 } 634 635 sx_xlock(&state->ls_lock); 636 switch(ap->a_op) { 637 case F_SETLK: 638 error = lf_setlock(state, lock, vp, ap->a_cookiep); 639 break; 640 641 case F_UNLCK: 642 error = lf_clearlock(state, lock); 643 lf_free_lock(lock); 644 break; 645 646 case F_GETLK: 647 error = lf_getlock(state, lock, fl); 648 lf_free_lock(lock); 649 break; 650 651 case F_CANCEL: 652 if (ap->a_cookiep) 653 error = lf_cancel(state, lock, *ap->a_cookiep); 654 else 655 error = EINVAL; 656 lf_free_lock(lock); 657 break; 658 659 default: 660 lf_free_lock(lock); 661 error = EINVAL; 662 break; 663 } 664 665 #ifdef INVARIANTS 666 /* 667 * Check for some can't happen stuff. In this case, the active 668 * lock list becoming disordered or containing mutually 669 * blocking locks. We also check the pending list for locks 670 * which should be active (i.e. have no out-going edges). 671 */ 672 LIST_FOREACH(lock, &state->ls_active, lf_link) { 673 struct lockf_entry *lf; 674 if (LIST_NEXT(lock, lf_link)) 675 KASSERT((lock->lf_start 676 <= LIST_NEXT(lock, lf_link)->lf_start), 677 ("locks disordered")); 678 LIST_FOREACH(lf, &state->ls_active, lf_link) { 679 if (lock == lf) 680 break; 681 KASSERT(!lf_blocks(lock, lf), 682 ("two conflicting active locks")); 683 if (lock->lf_owner == lf->lf_owner) 684 KASSERT(!lf_overlaps(lock, lf), 685 ("two overlapping locks from same owner")); 686 } 687 } 688 LIST_FOREACH(lock, &state->ls_pending, lf_link) { 689 KASSERT(!LIST_EMPTY(&lock->lf_outedges), 690 ("pending lock which should be active")); 691 } 692 #endif 693 sx_xunlock(&state->ls_lock); 694 695 /* 696 * If we have removed the last active lock on the vnode and 697 * this is the last thread that was in-progress, we can free 698 * the state structure. We update the caller's pointer inside 699 * the vnode interlock but call free outside. 700 * 701 * XXX alternatively, keep the state structure around until 702 * the filesystem recycles - requires a callback from the 703 * filesystem. 704 */ 705 VI_LOCK(vp); 706 707 state->ls_threads--; 708 wakeup(state); 709 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 710 KASSERT(LIST_EMPTY(&state->ls_pending), 711 ("freeing state with pending locks")); 712 freestate = state; 713 *statep = NULL; 714 } 715 716 VI_UNLOCK(vp); 717 718 if (freestate) { 719 sx_xlock(&lf_lock_states_lock); 720 LIST_REMOVE(freestate, ls_link); 721 sx_xunlock(&lf_lock_states_lock); 722 sx_destroy(&freestate->ls_lock); 723 free(freestate, M_LOCKF); 724 } 725 return (error); 726 } 727 728 int 729 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 730 { 731 struct vop_advlockasync_args a; 732 733 a.a_vp = ap->a_vp; 734 a.a_id = ap->a_id; 735 a.a_op = ap->a_op; 736 a.a_fl = ap->a_fl; 737 a.a_flags = ap->a_flags; 738 a.a_task = NULL; 739 a.a_cookiep = NULL; 740 741 return (lf_advlockasync(&a, statep, size)); 742 } 743 744 void 745 lf_purgelocks(struct vnode *vp, struct lockf **statep) 746 { 747 struct lockf *state; 748 struct lockf_entry *lock, *nlock; 749 750 /* 751 * For this to work correctly, the caller must ensure that no 752 * other threads enter the locking system for this vnode, 753 * e.g. by checking VI_DOOMED. We wake up any threads that are 754 * sleeping waiting for locks on this vnode and then free all 755 * the remaining locks. 756 */ 757 VI_LOCK(vp); 758 state = *statep; 759 if (state) { 760 state->ls_threads++; 761 VI_UNLOCK(vp); 762 763 sx_xlock(&state->ls_lock); 764 sx_xlock(&lf_owner_graph_lock); 765 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 766 LIST_REMOVE(lock, lf_link); 767 lf_remove_outgoing(lock); 768 lf_remove_incoming(lock); 769 770 /* 771 * If its an async lock, we can just free it 772 * here, otherwise we let the sleeping thread 773 * free it. 774 */ 775 if (lock->lf_async_task) { 776 lf_free_lock(lock); 777 } else { 778 lock->lf_flags |= F_INTR; 779 wakeup(lock); 780 } 781 } 782 sx_xunlock(&lf_owner_graph_lock); 783 sx_xunlock(&state->ls_lock); 784 785 /* 786 * Wait for all other threads, sleeping and otherwise 787 * to leave. 788 */ 789 VI_LOCK(vp); 790 while (state->ls_threads > 1) 791 msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 792 *statep = 0; 793 VI_UNLOCK(vp); 794 795 /* 796 * We can just free all the active locks since they 797 * will have no dependancies (we removed them all 798 * above). We don't need to bother locking since we 799 * are the last thread using this state structure. 800 */ 801 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 802 LIST_REMOVE(lock, lf_link); 803 lf_free_lock(lock); 804 } 805 sx_xlock(&lf_lock_states_lock); 806 LIST_REMOVE(state, ls_link); 807 sx_xunlock(&lf_lock_states_lock); 808 sx_destroy(&state->ls_lock); 809 free(state, M_LOCKF); 810 } else { 811 VI_UNLOCK(vp); 812 } 813 } 814 815 /* 816 * Return non-zero if locks 'x' and 'y' overlap. 817 */ 818 static int 819 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 820 { 821 822 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 823 } 824 825 /* 826 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 827 */ 828 static int 829 lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 830 { 831 832 return x->lf_owner != y->lf_owner 833 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 834 && lf_overlaps(x, y); 835 } 836 837 /* 838 * Allocate a lock edge from the free list 839 */ 840 static struct lockf_edge * 841 lf_alloc_edge(void) 842 { 843 844 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 845 } 846 847 /* 848 * Free a lock edge. 849 */ 850 static void 851 lf_free_edge(struct lockf_edge *e) 852 { 853 854 free(e, M_LOCKF); 855 } 856 857 858 /* 859 * Ensure that the lock's owner has a corresponding vertex in the 860 * owner graph. 861 */ 862 static void 863 lf_alloc_vertex(struct lockf_entry *lock) 864 { 865 struct owner_graph *g = &lf_owner_graph; 866 867 if (!lock->lf_owner->lo_vertex) 868 lock->lf_owner->lo_vertex = 869 graph_alloc_vertex(g, lock->lf_owner); 870 } 871 872 /* 873 * Attempt to record an edge from lock x to lock y. Return EDEADLK if 874 * the new edge would cause a cycle in the owner graph. 875 */ 876 static int 877 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 878 { 879 struct owner_graph *g = &lf_owner_graph; 880 struct lockf_edge *e; 881 int error; 882 883 #ifdef INVARIANTS 884 LIST_FOREACH(e, &x->lf_outedges, le_outlink) 885 KASSERT(e->le_to != y, ("adding lock edge twice")); 886 #endif 887 888 /* 889 * Make sure the two owners have entries in the owner graph. 890 */ 891 lf_alloc_vertex(x); 892 lf_alloc_vertex(y); 893 894 error = graph_add_edge(g, x->lf_owner->lo_vertex, 895 y->lf_owner->lo_vertex); 896 if (error) 897 return (error); 898 899 e = lf_alloc_edge(); 900 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 901 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 902 e->le_from = x; 903 e->le_to = y; 904 905 return (0); 906 } 907 908 /* 909 * Remove an edge from the lock graph. 910 */ 911 static void 912 lf_remove_edge(struct lockf_edge *e) 913 { 914 struct owner_graph *g = &lf_owner_graph; 915 struct lockf_entry *x = e->le_from; 916 struct lockf_entry *y = e->le_to; 917 918 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 919 LIST_REMOVE(e, le_outlink); 920 LIST_REMOVE(e, le_inlink); 921 e->le_from = NULL; 922 e->le_to = NULL; 923 lf_free_edge(e); 924 } 925 926 /* 927 * Remove all out-going edges from lock x. 928 */ 929 static void 930 lf_remove_outgoing(struct lockf_entry *x) 931 { 932 struct lockf_edge *e; 933 934 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 935 lf_remove_edge(e); 936 } 937 } 938 939 /* 940 * Remove all in-coming edges from lock x. 941 */ 942 static void 943 lf_remove_incoming(struct lockf_entry *x) 944 { 945 struct lockf_edge *e; 946 947 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 948 lf_remove_edge(e); 949 } 950 } 951 952 /* 953 * Walk the list of locks for the file and create an out-going edge 954 * from lock to each blocking lock. 955 */ 956 static int 957 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 958 { 959 struct lockf_entry *overlap; 960 int error; 961 962 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 963 /* 964 * We may assume that the active list is sorted by 965 * lf_start. 966 */ 967 if (overlap->lf_start > lock->lf_end) 968 break; 969 if (!lf_blocks(lock, overlap)) 970 continue; 971 972 /* 973 * We've found a blocking lock. Add the corresponding 974 * edge to the graphs and see if it would cause a 975 * deadlock. 976 */ 977 error = lf_add_edge(lock, overlap); 978 979 /* 980 * The only error that lf_add_edge returns is EDEADLK. 981 * Remove any edges we added and return the error. 982 */ 983 if (error) { 984 lf_remove_outgoing(lock); 985 return (error); 986 } 987 } 988 989 /* 990 * We also need to add edges to sleeping locks that block 991 * us. This ensures that lf_wakeup_lock cannot grant two 992 * mutually blocking locks simultaneously and also enforces a 993 * 'first come, first served' fairness model. Note that this 994 * only happens if we are blocked by at least one active lock 995 * due to the call to lf_getblock in lf_setlock below. 996 */ 997 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 998 if (!lf_blocks(lock, overlap)) 999 continue; 1000 /* 1001 * We've found a blocking lock. Add the corresponding 1002 * edge to the graphs and see if it would cause a 1003 * deadlock. 1004 */ 1005 error = lf_add_edge(lock, overlap); 1006 1007 /* 1008 * The only error that lf_add_edge returns is EDEADLK. 1009 * Remove any edges we added and return the error. 1010 */ 1011 if (error) { 1012 lf_remove_outgoing(lock); 1013 return (error); 1014 } 1015 } 1016 1017 return (0); 1018 } 1019 1020 /* 1021 * Walk the list of pending locks for the file and create an in-coming 1022 * edge from lock to each blocking lock. 1023 */ 1024 static int 1025 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1026 { 1027 struct lockf_entry *overlap; 1028 int error; 1029 1030 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1031 if (!lf_blocks(lock, overlap)) 1032 continue; 1033 1034 /* 1035 * We've found a blocking lock. Add the corresponding 1036 * edge to the graphs and see if it would cause a 1037 * deadlock. 1038 */ 1039 error = lf_add_edge(overlap, lock); 1040 1041 /* 1042 * The only error that lf_add_edge returns is EDEADLK. 1043 * Remove any edges we added and return the error. 1044 */ 1045 if (error) { 1046 lf_remove_incoming(lock); 1047 return (error); 1048 } 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Insert lock into the active list, keeping list entries ordered by 1055 * increasing values of lf_start. 1056 */ 1057 static void 1058 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1059 { 1060 struct lockf_entry *lf, *lfprev; 1061 1062 if (LIST_EMPTY(&state->ls_active)) { 1063 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1064 return; 1065 } 1066 1067 lfprev = NULL; 1068 LIST_FOREACH(lf, &state->ls_active, lf_link) { 1069 if (lf->lf_start > lock->lf_start) { 1070 LIST_INSERT_BEFORE(lf, lock, lf_link); 1071 return; 1072 } 1073 lfprev = lf; 1074 } 1075 LIST_INSERT_AFTER(lfprev, lock, lf_link); 1076 } 1077 1078 /* 1079 * Wake up a sleeping lock and remove it from the pending list now 1080 * that all its dependancies have been resolved. The caller should 1081 * arrange for the lock to be added to the active list, adjusting any 1082 * existing locks for the same owner as needed. 1083 */ 1084 static void 1085 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1086 { 1087 1088 /* 1089 * Remove from ls_pending list and wake up the caller 1090 * or start the async notification, as appropriate. 1091 */ 1092 LIST_REMOVE(wakelock, lf_link); 1093 #ifdef LOCKF_DEBUG 1094 if (lockf_debug & 1) 1095 lf_print("lf_wakeup_lock: awakening", wakelock); 1096 #endif /* LOCKF_DEBUG */ 1097 if (wakelock->lf_async_task) { 1098 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1099 } else { 1100 wakeup(wakelock); 1101 } 1102 } 1103 1104 /* 1105 * Re-check all dependant locks and remove edges to locks that we no 1106 * longer block. If 'all' is non-zero, the lock has been removed and 1107 * we must remove all the dependancies, otherwise it has simply been 1108 * reduced but remains active. Any pending locks which have been been 1109 * unblocked are added to 'granted' 1110 */ 1111 static void 1112 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1113 struct lockf_entry_list *granted) 1114 { 1115 struct lockf_edge *e, *ne; 1116 struct lockf_entry *deplock; 1117 1118 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1119 deplock = e->le_from; 1120 if (all || !lf_blocks(lock, deplock)) { 1121 sx_xlock(&lf_owner_graph_lock); 1122 lf_remove_edge(e); 1123 sx_xunlock(&lf_owner_graph_lock); 1124 if (LIST_EMPTY(&deplock->lf_outedges)) { 1125 lf_wakeup_lock(state, deplock); 1126 LIST_INSERT_HEAD(granted, deplock, lf_link); 1127 } 1128 } 1129 } 1130 } 1131 1132 /* 1133 * Set the start of an existing active lock, updating dependancies and 1134 * adding any newly woken locks to 'granted'. 1135 */ 1136 static void 1137 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1138 struct lockf_entry_list *granted) 1139 { 1140 1141 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1142 lock->lf_start = new_start; 1143 LIST_REMOVE(lock, lf_link); 1144 lf_insert_lock(state, lock); 1145 lf_update_dependancies(state, lock, FALSE, granted); 1146 } 1147 1148 /* 1149 * Set the end of an existing active lock, updating dependancies and 1150 * adding any newly woken locks to 'granted'. 1151 */ 1152 static void 1153 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1154 struct lockf_entry_list *granted) 1155 { 1156 1157 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1158 lock->lf_end = new_end; 1159 lf_update_dependancies(state, lock, FALSE, granted); 1160 } 1161 1162 /* 1163 * Add a lock to the active list, updating or removing any current 1164 * locks owned by the same owner and processing any pending locks that 1165 * become unblocked as a result. This code is also used for unlock 1166 * since the logic for updating existing locks is identical. 1167 * 1168 * As a result of processing the new lock, we may unblock existing 1169 * pending locks as a result of downgrading/unlocking. We simply 1170 * activate the newly granted locks by looping. 1171 * 1172 * Since the new lock already has its dependancies set up, we always 1173 * add it to the list (unless its an unlock request). This may 1174 * fragment the lock list in some pathological cases but its probably 1175 * not a real problem. 1176 */ 1177 static void 1178 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1179 { 1180 struct lockf_entry *overlap, *lf; 1181 struct lockf_entry_list granted; 1182 int ovcase; 1183 1184 LIST_INIT(&granted); 1185 LIST_INSERT_HEAD(&granted, lock, lf_link); 1186 1187 while (!LIST_EMPTY(&granted)) { 1188 lock = LIST_FIRST(&granted); 1189 LIST_REMOVE(lock, lf_link); 1190 1191 /* 1192 * Skip over locks owned by other processes. Handle 1193 * any locks that overlap and are owned by ourselves. 1194 */ 1195 overlap = LIST_FIRST(&state->ls_active); 1196 for (;;) { 1197 ovcase = lf_findoverlap(&overlap, lock, SELF); 1198 1199 #ifdef LOCKF_DEBUG 1200 if (ovcase && (lockf_debug & 2)) { 1201 printf("lf_setlock: overlap %d", ovcase); 1202 lf_print("", overlap); 1203 } 1204 #endif 1205 /* 1206 * Six cases: 1207 * 0) no overlap 1208 * 1) overlap == lock 1209 * 2) overlap contains lock 1210 * 3) lock contains overlap 1211 * 4) overlap starts before lock 1212 * 5) overlap ends after lock 1213 */ 1214 switch (ovcase) { 1215 case 0: /* no overlap */ 1216 break; 1217 1218 case 1: /* overlap == lock */ 1219 /* 1220 * We have already setup the 1221 * dependants for the new lock, taking 1222 * into account a possible downgrade 1223 * or unlock. Remove the old lock. 1224 */ 1225 LIST_REMOVE(overlap, lf_link); 1226 lf_update_dependancies(state, overlap, TRUE, 1227 &granted); 1228 lf_free_lock(overlap); 1229 break; 1230 1231 case 2: /* overlap contains lock */ 1232 /* 1233 * Just split the existing lock. 1234 */ 1235 lf_split(state, overlap, lock, &granted); 1236 break; 1237 1238 case 3: /* lock contains overlap */ 1239 /* 1240 * Delete the overlap and advance to 1241 * the next entry in the list. 1242 */ 1243 lf = LIST_NEXT(overlap, lf_link); 1244 LIST_REMOVE(overlap, lf_link); 1245 lf_update_dependancies(state, overlap, TRUE, 1246 &granted); 1247 lf_free_lock(overlap); 1248 overlap = lf; 1249 continue; 1250 1251 case 4: /* overlap starts before lock */ 1252 /* 1253 * Just update the overlap end and 1254 * move on. 1255 */ 1256 lf_set_end(state, overlap, lock->lf_start - 1, 1257 &granted); 1258 overlap = LIST_NEXT(overlap, lf_link); 1259 continue; 1260 1261 case 5: /* overlap ends after lock */ 1262 /* 1263 * Change the start of overlap and 1264 * re-insert. 1265 */ 1266 lf_set_start(state, overlap, lock->lf_end + 1, 1267 &granted); 1268 break; 1269 } 1270 break; 1271 } 1272 #ifdef LOCKF_DEBUG 1273 if (lockf_debug & 1) { 1274 if (lock->lf_type != F_UNLCK) 1275 lf_print("lf_activate_lock: activated", lock); 1276 else 1277 lf_print("lf_activate_lock: unlocked", lock); 1278 lf_printlist("lf_activate_lock", lock); 1279 } 1280 #endif /* LOCKF_DEBUG */ 1281 if (lock->lf_type != F_UNLCK) 1282 lf_insert_lock(state, lock); 1283 } 1284 } 1285 1286 /* 1287 * Cancel a pending lock request, either as a result of a signal or a 1288 * cancel request for an async lock. 1289 */ 1290 static void 1291 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1292 { 1293 struct lockf_entry_list granted; 1294 1295 /* 1296 * Note it is theoretically possible that cancelling this lock 1297 * may allow some other pending lock to become 1298 * active. Consider this case: 1299 * 1300 * Owner Action Result Dependancies 1301 * 1302 * A: lock [0..0] succeeds 1303 * B: lock [2..2] succeeds 1304 * C: lock [1..2] blocked C->B 1305 * D: lock [0..1] blocked C->B,D->A,D->C 1306 * A: unlock [0..0] C->B,D->C 1307 * C: cancel [1..2] 1308 */ 1309 1310 LIST_REMOVE(lock, lf_link); 1311 1312 /* 1313 * Removing out-going edges is simple. 1314 */ 1315 sx_xlock(&lf_owner_graph_lock); 1316 lf_remove_outgoing(lock); 1317 sx_xunlock(&lf_owner_graph_lock); 1318 1319 /* 1320 * Removing in-coming edges may allow some other lock to 1321 * become active - we use lf_update_dependancies to figure 1322 * this out. 1323 */ 1324 LIST_INIT(&granted); 1325 lf_update_dependancies(state, lock, TRUE, &granted); 1326 lf_free_lock(lock); 1327 1328 /* 1329 * Feed any newly active locks to lf_activate_lock. 1330 */ 1331 while (!LIST_EMPTY(&granted)) { 1332 lock = LIST_FIRST(&granted); 1333 LIST_REMOVE(lock, lf_link); 1334 lf_activate_lock(state, lock); 1335 } 1336 } 1337 1338 /* 1339 * Set a byte-range lock. 1340 */ 1341 static int 1342 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1343 void **cookiep) 1344 { 1345 static char lockstr[] = "lockf"; 1346 int priority, error; 1347 1348 #ifdef LOCKF_DEBUG 1349 if (lockf_debug & 1) 1350 lf_print("lf_setlock", lock); 1351 #endif /* LOCKF_DEBUG */ 1352 1353 /* 1354 * Set the priority 1355 */ 1356 priority = PLOCK; 1357 if (lock->lf_type == F_WRLCK) 1358 priority += 4; 1359 if (!(lock->lf_flags & F_NOINTR)) 1360 priority |= PCATCH; 1361 /* 1362 * Scan lock list for this file looking for locks that would block us. 1363 */ 1364 while (lf_getblock(state, lock)) { 1365 /* 1366 * Free the structure and return if nonblocking. 1367 */ 1368 if ((lock->lf_flags & F_WAIT) == 0 1369 && lock->lf_async_task == NULL) { 1370 lf_free_lock(lock); 1371 error = EAGAIN; 1372 goto out; 1373 } 1374 1375 /* 1376 * For flock type locks, we must first remove 1377 * any shared locks that we hold before we sleep 1378 * waiting for an exclusive lock. 1379 */ 1380 if ((lock->lf_flags & F_FLOCK) && 1381 lock->lf_type == F_WRLCK) { 1382 lock->lf_type = F_UNLCK; 1383 lf_activate_lock(state, lock); 1384 lock->lf_type = F_WRLCK; 1385 } 1386 1387 /* 1388 * We are blocked. Create edges to each blocking lock, 1389 * checking for deadlock using the owner graph. For 1390 * simplicity, we run deadlock detection for all 1391 * locks, posix and otherwise. 1392 */ 1393 sx_xlock(&lf_owner_graph_lock); 1394 error = lf_add_outgoing(state, lock); 1395 sx_xunlock(&lf_owner_graph_lock); 1396 1397 if (error) { 1398 #ifdef LOCKF_DEBUG 1399 if (lockf_debug & 1) 1400 lf_print("lf_setlock: deadlock", lock); 1401 #endif 1402 lf_free_lock(lock); 1403 goto out; 1404 } 1405 1406 /* 1407 * We have added edges to everything that blocks 1408 * us. Sleep until they all go away. 1409 */ 1410 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 1411 #ifdef LOCKF_DEBUG 1412 if (lockf_debug & 1) { 1413 struct lockf_edge *e; 1414 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1415 lf_print("lf_setlock: blocking on", e->le_to); 1416 lf_printlist("lf_setlock", e->le_to); 1417 } 1418 } 1419 #endif /* LOCKF_DEBUG */ 1420 1421 if ((lock->lf_flags & F_WAIT) == 0) { 1422 /* 1423 * The caller requested async notification - 1424 * this callback happens when the blocking 1425 * lock is released, allowing the caller to 1426 * make another attempt to take the lock. 1427 */ 1428 *cookiep = (void *) lock; 1429 error = EINPROGRESS; 1430 goto out; 1431 } 1432 1433 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1434 /* 1435 * We may have been awakened by a signal and/or by a 1436 * debugger continuing us (in which cases we must 1437 * remove our lock graph edges) and/or by another 1438 * process releasing a lock (in which case our edges 1439 * have already been removed and we have been moved to 1440 * the active list). We may also have been woken by 1441 * lf_purgelocks which we report to the caller as 1442 * EINTR. In that case, lf_purgelocks will have 1443 * removed our lock graph edges. 1444 * 1445 * Note that it is possible to receive a signal after 1446 * we were successfully woken (and moved to the active 1447 * list) but before we resumed execution. In this 1448 * case, our lf_outedges list will be clear. We 1449 * pretend there was no error. 1450 * 1451 * Note also, if we have been sleeping long enough, we 1452 * may now have incoming edges from some newer lock 1453 * which is waiting behind us in the queue. 1454 */ 1455 if (lock->lf_flags & F_INTR) { 1456 error = EINTR; 1457 lf_free_lock(lock); 1458 goto out; 1459 } 1460 if (LIST_EMPTY(&lock->lf_outedges)) { 1461 error = 0; 1462 } else { 1463 lf_cancel_lock(state, lock); 1464 goto out; 1465 } 1466 #ifdef LOCKF_DEBUG 1467 if (lockf_debug & 1) { 1468 lf_print("lf_setlock: granted", lock); 1469 } 1470 #endif 1471 goto out; 1472 } 1473 /* 1474 * It looks like we are going to grant the lock. First add 1475 * edges from any currently pending lock that the new lock 1476 * would block. 1477 */ 1478 sx_xlock(&lf_owner_graph_lock); 1479 error = lf_add_incoming(state, lock); 1480 sx_xunlock(&lf_owner_graph_lock); 1481 if (error) { 1482 #ifdef LOCKF_DEBUG 1483 if (lockf_debug & 1) 1484 lf_print("lf_setlock: deadlock", lock); 1485 #endif 1486 lf_free_lock(lock); 1487 goto out; 1488 } 1489 1490 /* 1491 * No blocks!! Add the lock. Note that we will 1492 * downgrade or upgrade any overlapping locks this 1493 * process already owns. 1494 */ 1495 lf_activate_lock(state, lock); 1496 error = 0; 1497 out: 1498 return (error); 1499 } 1500 1501 /* 1502 * Remove a byte-range lock on an inode. 1503 * 1504 * Generally, find the lock (or an overlap to that lock) 1505 * and remove it (or shrink it), then wakeup anyone we can. 1506 */ 1507 static int 1508 lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 1509 { 1510 struct lockf_entry *overlap; 1511 1512 overlap = LIST_FIRST(&state->ls_active); 1513 1514 if (overlap == NOLOCKF) 1515 return (0); 1516 #ifdef LOCKF_DEBUG 1517 if (unlock->lf_type != F_UNLCK) 1518 panic("lf_clearlock: bad type"); 1519 if (lockf_debug & 1) 1520 lf_print("lf_clearlock", unlock); 1521 #endif /* LOCKF_DEBUG */ 1522 1523 lf_activate_lock(state, unlock); 1524 1525 return (0); 1526 } 1527 1528 /* 1529 * Check whether there is a blocking lock, and if so return its 1530 * details in '*fl'. 1531 */ 1532 static int 1533 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 1534 { 1535 struct lockf_entry *block; 1536 1537 #ifdef LOCKF_DEBUG 1538 if (lockf_debug & 1) 1539 lf_print("lf_getlock", lock); 1540 #endif /* LOCKF_DEBUG */ 1541 1542 if ((block = lf_getblock(state, lock))) { 1543 fl->l_type = block->lf_type; 1544 fl->l_whence = SEEK_SET; 1545 fl->l_start = block->lf_start; 1546 if (block->lf_end == OFF_MAX) 1547 fl->l_len = 0; 1548 else 1549 fl->l_len = block->lf_end - block->lf_start + 1; 1550 fl->l_pid = block->lf_owner->lo_pid; 1551 fl->l_sysid = block->lf_owner->lo_sysid; 1552 } else { 1553 fl->l_type = F_UNLCK; 1554 } 1555 return (0); 1556 } 1557 1558 /* 1559 * Cancel an async lock request. 1560 */ 1561 static int 1562 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1563 { 1564 struct lockf_entry *reallock; 1565 1566 /* 1567 * We need to match this request with an existing lock 1568 * request. 1569 */ 1570 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1571 if ((void *) reallock == cookie) { 1572 /* 1573 * Double-check that this lock looks right 1574 * (maybe use a rolling ID for the cancel 1575 * cookie instead?) 1576 */ 1577 if (!(reallock->lf_vnode == lock->lf_vnode 1578 && reallock->lf_start == lock->lf_start 1579 && reallock->lf_end == lock->lf_end)) { 1580 return (ENOENT); 1581 } 1582 1583 /* 1584 * Make sure this lock was async and then just 1585 * remove it from its wait lists. 1586 */ 1587 if (!reallock->lf_async_task) { 1588 return (ENOENT); 1589 } 1590 1591 /* 1592 * Note that since any other thread must take 1593 * state->ls_lock before it can possibly 1594 * trigger the async callback, we are safe 1595 * from a race with lf_wakeup_lock, i.e. we 1596 * can free the lock (actually our caller does 1597 * this). 1598 */ 1599 lf_cancel_lock(state, reallock); 1600 return (0); 1601 } 1602 } 1603 1604 /* 1605 * We didn't find a matching lock - not much we can do here. 1606 */ 1607 return (ENOENT); 1608 } 1609 1610 /* 1611 * Walk the list of locks for an inode and 1612 * return the first blocking lock. 1613 */ 1614 static struct lockf_entry * 1615 lf_getblock(struct lockf *state, struct lockf_entry *lock) 1616 { 1617 struct lockf_entry *overlap; 1618 1619 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 1620 /* 1621 * We may assume that the active list is sorted by 1622 * lf_start. 1623 */ 1624 if (overlap->lf_start > lock->lf_end) 1625 break; 1626 if (!lf_blocks(lock, overlap)) 1627 continue; 1628 return (overlap); 1629 } 1630 return (NOLOCKF); 1631 } 1632 1633 /* 1634 * Walk the list of locks for an inode to find an overlapping lock (if 1635 * any) and return a classification of that overlap. 1636 * 1637 * Arguments: 1638 * *overlap The place in the lock list to start looking 1639 * lock The lock which is being tested 1640 * type Pass 'SELF' to test only locks with the same 1641 * owner as lock, or 'OTHER' to test only locks 1642 * with a different owner 1643 * 1644 * Returns one of six values: 1645 * 0) no overlap 1646 * 1) overlap == lock 1647 * 2) overlap contains lock 1648 * 3) lock contains overlap 1649 * 4) overlap starts before lock 1650 * 5) overlap ends after lock 1651 * 1652 * If there is an overlapping lock, '*overlap' is set to point at the 1653 * overlapping lock. 1654 * 1655 * NOTE: this returns only the FIRST overlapping lock. There 1656 * may be more than one. 1657 */ 1658 static int 1659 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 1660 { 1661 struct lockf_entry *lf; 1662 off_t start, end; 1663 int res; 1664 1665 if ((*overlap) == NOLOCKF) { 1666 return (0); 1667 } 1668 #ifdef LOCKF_DEBUG 1669 if (lockf_debug & 2) 1670 lf_print("lf_findoverlap: looking for overlap in", lock); 1671 #endif /* LOCKF_DEBUG */ 1672 start = lock->lf_start; 1673 end = lock->lf_end; 1674 res = 0; 1675 while (*overlap) { 1676 lf = *overlap; 1677 if (lf->lf_start > end) 1678 break; 1679 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1680 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1681 *overlap = LIST_NEXT(lf, lf_link); 1682 continue; 1683 } 1684 #ifdef LOCKF_DEBUG 1685 if (lockf_debug & 2) 1686 lf_print("\tchecking", lf); 1687 #endif /* LOCKF_DEBUG */ 1688 /* 1689 * OK, check for overlap 1690 * 1691 * Six cases: 1692 * 0) no overlap 1693 * 1) overlap == lock 1694 * 2) overlap contains lock 1695 * 3) lock contains overlap 1696 * 4) overlap starts before lock 1697 * 5) overlap ends after lock 1698 */ 1699 if (start > lf->lf_end) { 1700 /* Case 0 */ 1701 #ifdef LOCKF_DEBUG 1702 if (lockf_debug & 2) 1703 printf("no overlap\n"); 1704 #endif /* LOCKF_DEBUG */ 1705 *overlap = LIST_NEXT(lf, lf_link); 1706 continue; 1707 } 1708 if (lf->lf_start == start && lf->lf_end == end) { 1709 /* Case 1 */ 1710 #ifdef LOCKF_DEBUG 1711 if (lockf_debug & 2) 1712 printf("overlap == lock\n"); 1713 #endif /* LOCKF_DEBUG */ 1714 res = 1; 1715 break; 1716 } 1717 if (lf->lf_start <= start && lf->lf_end >= end) { 1718 /* Case 2 */ 1719 #ifdef LOCKF_DEBUG 1720 if (lockf_debug & 2) 1721 printf("overlap contains lock\n"); 1722 #endif /* LOCKF_DEBUG */ 1723 res = 2; 1724 break; 1725 } 1726 if (start <= lf->lf_start && end >= lf->lf_end) { 1727 /* Case 3 */ 1728 #ifdef LOCKF_DEBUG 1729 if (lockf_debug & 2) 1730 printf("lock contains overlap\n"); 1731 #endif /* LOCKF_DEBUG */ 1732 res = 3; 1733 break; 1734 } 1735 if (lf->lf_start < start && lf->lf_end >= start) { 1736 /* Case 4 */ 1737 #ifdef LOCKF_DEBUG 1738 if (lockf_debug & 2) 1739 printf("overlap starts before lock\n"); 1740 #endif /* LOCKF_DEBUG */ 1741 res = 4; 1742 break; 1743 } 1744 if (lf->lf_start > start && lf->lf_end > end) { 1745 /* Case 5 */ 1746 #ifdef LOCKF_DEBUG 1747 if (lockf_debug & 2) 1748 printf("overlap ends after lock\n"); 1749 #endif /* LOCKF_DEBUG */ 1750 res = 5; 1751 break; 1752 } 1753 panic("lf_findoverlap: default"); 1754 } 1755 return (res); 1756 } 1757 1758 /* 1759 * Split an the existing 'lock1', based on the extent of the lock 1760 * described by 'lock2'. The existing lock should cover 'lock2' 1761 * entirely. 1762 * 1763 * Any pending locks which have been been unblocked are added to 1764 * 'granted' 1765 */ 1766 static void 1767 lf_split(struct lockf *state, struct lockf_entry *lock1, 1768 struct lockf_entry *lock2, struct lockf_entry_list *granted) 1769 { 1770 struct lockf_entry *splitlock; 1771 1772 #ifdef LOCKF_DEBUG 1773 if (lockf_debug & 2) { 1774 lf_print("lf_split", lock1); 1775 lf_print("splitting from", lock2); 1776 } 1777 #endif /* LOCKF_DEBUG */ 1778 /* 1779 * Check to see if we don't need to split at all. 1780 */ 1781 if (lock1->lf_start == lock2->lf_start) { 1782 lf_set_start(state, lock1, lock2->lf_end + 1, granted); 1783 return; 1784 } 1785 if (lock1->lf_end == lock2->lf_end) { 1786 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1787 return; 1788 } 1789 /* 1790 * Make a new lock consisting of the last part of 1791 * the encompassing lock. 1792 */ 1793 splitlock = lf_alloc_lock(lock1->lf_owner); 1794 memcpy(splitlock, lock1, sizeof *splitlock); 1795 if (splitlock->lf_flags & F_REMOTE) 1796 vref(splitlock->lf_vnode); 1797 1798 /* 1799 * This cannot cause a deadlock since any edges we would add 1800 * to splitlock already exist in lock1. We must be sure to add 1801 * necessary dependancies to splitlock before we reduce lock1 1802 * otherwise we may accidentally grant a pending lock that 1803 * was blocked by the tail end of lock1. 1804 */ 1805 splitlock->lf_start = lock2->lf_end + 1; 1806 LIST_INIT(&splitlock->lf_outedges); 1807 LIST_INIT(&splitlock->lf_inedges); 1808 sx_xlock(&lf_owner_graph_lock); 1809 lf_add_incoming(state, splitlock); 1810 sx_xunlock(&lf_owner_graph_lock); 1811 1812 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1813 1814 /* 1815 * OK, now link it in 1816 */ 1817 lf_insert_lock(state, splitlock); 1818 } 1819 1820 struct lockdesc { 1821 STAILQ_ENTRY(lockdesc) link; 1822 struct vnode *vp; 1823 struct flock fl; 1824 }; 1825 STAILQ_HEAD(lockdesclist, lockdesc); 1826 1827 int 1828 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) 1829 { 1830 struct lockf *ls; 1831 struct lockf_entry *lf; 1832 struct lockdesc *ldesc; 1833 struct lockdesclist locks; 1834 int error; 1835 1836 /* 1837 * In order to keep the locking simple, we iterate over the 1838 * active lock lists to build a list of locks that need 1839 * releasing. We then call the iterator for each one in turn. 1840 * 1841 * We take an extra reference to the vnode for the duration to 1842 * make sure it doesn't go away before we are finished. 1843 */ 1844 STAILQ_INIT(&locks); 1845 sx_xlock(&lf_lock_states_lock); 1846 LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1847 sx_xlock(&ls->ls_lock); 1848 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1849 if (lf->lf_owner->lo_sysid != sysid) 1850 continue; 1851 1852 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1853 M_WAITOK); 1854 ldesc->vp = lf->lf_vnode; 1855 vref(ldesc->vp); 1856 ldesc->fl.l_start = lf->lf_start; 1857 if (lf->lf_end == OFF_MAX) 1858 ldesc->fl.l_len = 0; 1859 else 1860 ldesc->fl.l_len = 1861 lf->lf_end - lf->lf_start + 1; 1862 ldesc->fl.l_whence = SEEK_SET; 1863 ldesc->fl.l_type = F_UNLCK; 1864 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1865 ldesc->fl.l_sysid = sysid; 1866 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1867 } 1868 sx_xunlock(&ls->ls_lock); 1869 } 1870 sx_xunlock(&lf_lock_states_lock); 1871 1872 /* 1873 * Call the iterator function for each lock in turn. If the 1874 * iterator returns an error code, just free the rest of the 1875 * lockdesc structures. 1876 */ 1877 error = 0; 1878 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1879 STAILQ_REMOVE_HEAD(&locks, link); 1880 if (!error) 1881 error = fn(ldesc->vp, &ldesc->fl, arg); 1882 vrele(ldesc->vp); 1883 free(ldesc, M_LOCKF); 1884 } 1885 1886 return (error); 1887 } 1888 1889 int 1890 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) 1891 { 1892 struct lockf *ls; 1893 struct lockf_entry *lf; 1894 struct lockdesc *ldesc; 1895 struct lockdesclist locks; 1896 int error; 1897 1898 /* 1899 * In order to keep the locking simple, we iterate over the 1900 * active lock lists to build a list of locks that need 1901 * releasing. We then call the iterator for each one in turn. 1902 * 1903 * We take an extra reference to the vnode for the duration to 1904 * make sure it doesn't go away before we are finished. 1905 */ 1906 STAILQ_INIT(&locks); 1907 ls = vp->v_lockf; 1908 if (!ls) 1909 return (0); 1910 1911 sx_xlock(&ls->ls_lock); 1912 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1913 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1914 M_WAITOK); 1915 ldesc->vp = lf->lf_vnode; 1916 vref(ldesc->vp); 1917 ldesc->fl.l_start = lf->lf_start; 1918 if (lf->lf_end == OFF_MAX) 1919 ldesc->fl.l_len = 0; 1920 else 1921 ldesc->fl.l_len = 1922 lf->lf_end - lf->lf_start + 1; 1923 ldesc->fl.l_whence = SEEK_SET; 1924 ldesc->fl.l_type = F_UNLCK; 1925 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1926 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; 1927 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1928 } 1929 sx_xunlock(&ls->ls_lock); 1930 1931 /* 1932 * Call the iterator function for each lock in turn. If the 1933 * iterator returns an error code, just free the rest of the 1934 * lockdesc structures. 1935 */ 1936 error = 0; 1937 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1938 STAILQ_REMOVE_HEAD(&locks, link); 1939 if (!error) 1940 error = fn(ldesc->vp, &ldesc->fl, arg); 1941 vrele(ldesc->vp); 1942 free(ldesc, M_LOCKF); 1943 } 1944 1945 return (error); 1946 } 1947 1948 static int 1949 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) 1950 { 1951 1952 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); 1953 return (0); 1954 } 1955 1956 void 1957 lf_clearremotesys(int sysid) 1958 { 1959 1960 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1961 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); 1962 } 1963 1964 int 1965 lf_countlocks(int sysid) 1966 { 1967 int i; 1968 struct lock_owner *lo; 1969 int count; 1970 1971 count = 0; 1972 sx_xlock(&lf_lock_owners_lock); 1973 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1974 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1975 if (lo->lo_sysid == sysid) 1976 count += lo->lo_refs; 1977 sx_xunlock(&lf_lock_owners_lock); 1978 1979 return (count); 1980 } 1981 1982 #ifdef LOCKF_DEBUG 1983 1984 /* 1985 * Return non-zero if y is reachable from x using a brute force 1986 * search. If reachable and path is non-null, return the route taken 1987 * in path. 1988 */ 1989 static int 1990 graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 1991 struct owner_vertex_list *path) 1992 { 1993 struct owner_edge *e; 1994 1995 if (x == y) { 1996 if (path) 1997 TAILQ_INSERT_HEAD(path, x, v_link); 1998 return 1; 1999 } 2000 2001 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2002 if (graph_reaches(e->e_to, y, path)) { 2003 if (path) 2004 TAILQ_INSERT_HEAD(path, x, v_link); 2005 return 1; 2006 } 2007 } 2008 return 0; 2009 } 2010 2011 /* 2012 * Perform consistency checks on the graph. Make sure the values of 2013 * v_order are correct. If checkorder is non-zero, check no vertex can 2014 * reach any other vertex with a smaller order. 2015 */ 2016 static void 2017 graph_check(struct owner_graph *g, int checkorder) 2018 { 2019 int i, j; 2020 2021 for (i = 0; i < g->g_size; i++) { 2022 if (!g->g_vertices[i]->v_owner) 2023 continue; 2024 KASSERT(g->g_vertices[i]->v_order == i, 2025 ("lock graph vertices disordered")); 2026 if (checkorder) { 2027 for (j = 0; j < i; j++) { 2028 if (!g->g_vertices[j]->v_owner) 2029 continue; 2030 KASSERT(!graph_reaches(g->g_vertices[i], 2031 g->g_vertices[j], NULL), 2032 ("lock graph vertices disordered")); 2033 } 2034 } 2035 } 2036 } 2037 2038 static void 2039 graph_print_vertices(struct owner_vertex_list *set) 2040 { 2041 struct owner_vertex *v; 2042 2043 printf("{ "); 2044 TAILQ_FOREACH(v, set, v_link) { 2045 printf("%d:", v->v_order); 2046 lf_print_owner(v->v_owner); 2047 if (TAILQ_NEXT(v, v_link)) 2048 printf(", "); 2049 } 2050 printf(" }\n"); 2051 } 2052 2053 #endif 2054 2055 /* 2056 * Calculate the sub-set of vertices v from the affected region [y..x] 2057 * where v is reachable from y. Return -1 if a loop was detected 2058 * (i.e. x is reachable from y, otherwise the number of vertices in 2059 * this subset. 2060 */ 2061 static int 2062 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 2063 struct owner_vertex *y, struct owner_vertex_list *delta) 2064 { 2065 uint32_t gen; 2066 struct owner_vertex *v; 2067 struct owner_edge *e; 2068 int n; 2069 2070 /* 2071 * We start with a set containing just y. Then for each vertex 2072 * v in the set so far unprocessed, we add each vertex that v 2073 * has an out-edge to and that is within the affected region 2074 * [y..x]. If we see the vertex x on our travels, stop 2075 * immediately. 2076 */ 2077 TAILQ_INIT(delta); 2078 TAILQ_INSERT_TAIL(delta, y, v_link); 2079 v = y; 2080 n = 1; 2081 gen = g->g_gen; 2082 while (v) { 2083 LIST_FOREACH(e, &v->v_outedges, e_outlink) { 2084 if (e->e_to == x) 2085 return -1; 2086 if (e->e_to->v_order < x->v_order 2087 && e->e_to->v_gen != gen) { 2088 e->e_to->v_gen = gen; 2089 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2090 n++; 2091 } 2092 } 2093 v = TAILQ_NEXT(v, v_link); 2094 } 2095 2096 return (n); 2097 } 2098 2099 /* 2100 * Calculate the sub-set of vertices v from the affected region [y..x] 2101 * where v reaches x. Return the number of vertices in this subset. 2102 */ 2103 static int 2104 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2105 struct owner_vertex *y, struct owner_vertex_list *delta) 2106 { 2107 uint32_t gen; 2108 struct owner_vertex *v; 2109 struct owner_edge *e; 2110 int n; 2111 2112 /* 2113 * We start with a set containing just x. Then for each vertex 2114 * v in the set so far unprocessed, we add each vertex that v 2115 * has an in-edge from and that is within the affected region 2116 * [y..x]. 2117 */ 2118 TAILQ_INIT(delta); 2119 TAILQ_INSERT_TAIL(delta, x, v_link); 2120 v = x; 2121 n = 1; 2122 gen = g->g_gen; 2123 while (v) { 2124 LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2125 if (e->e_from->v_order > y->v_order 2126 && e->e_from->v_gen != gen) { 2127 e->e_from->v_gen = gen; 2128 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2129 n++; 2130 } 2131 } 2132 v = TAILQ_PREV(v, owner_vertex_list, v_link); 2133 } 2134 2135 return (n); 2136 } 2137 2138 static int 2139 graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2140 { 2141 struct owner_vertex *v; 2142 int i, j; 2143 2144 TAILQ_FOREACH(v, set, v_link) { 2145 for (i = n; 2146 i > 0 && indices[i - 1] > v->v_order; i--) 2147 ; 2148 for (j = n - 1; j >= i; j--) 2149 indices[j + 1] = indices[j]; 2150 indices[i] = v->v_order; 2151 n++; 2152 } 2153 2154 return (n); 2155 } 2156 2157 static int 2158 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2159 struct owner_vertex_list *set) 2160 { 2161 struct owner_vertex *v, *vlowest; 2162 2163 while (!TAILQ_EMPTY(set)) { 2164 vlowest = NULL; 2165 TAILQ_FOREACH(v, set, v_link) { 2166 if (!vlowest || v->v_order < vlowest->v_order) 2167 vlowest = v; 2168 } 2169 TAILQ_REMOVE(set, vlowest, v_link); 2170 vlowest->v_order = indices[nextunused]; 2171 g->g_vertices[vlowest->v_order] = vlowest; 2172 nextunused++; 2173 } 2174 2175 return (nextunused); 2176 } 2177 2178 static int 2179 graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2180 struct owner_vertex *y) 2181 { 2182 struct owner_edge *e; 2183 struct owner_vertex_list deltaF, deltaB; 2184 int nF, nB, n, vi, i; 2185 int *indices; 2186 2187 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2188 2189 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2190 if (e->e_to == y) { 2191 e->e_refs++; 2192 return (0); 2193 } 2194 } 2195 2196 #ifdef LOCKF_DEBUG 2197 if (lockf_debug & 8) { 2198 printf("adding edge %d:", x->v_order); 2199 lf_print_owner(x->v_owner); 2200 printf(" -> %d:", y->v_order); 2201 lf_print_owner(y->v_owner); 2202 printf("\n"); 2203 } 2204 #endif 2205 if (y->v_order < x->v_order) { 2206 /* 2207 * The new edge violates the order. First find the set 2208 * of affected vertices reachable from y (deltaF) and 2209 * the set of affect vertices affected that reach x 2210 * (deltaB), using the graph generation number to 2211 * detect whether we have visited a given vertex 2212 * already. We re-order the graph so that each vertex 2213 * in deltaB appears before each vertex in deltaF. 2214 * 2215 * If x is a member of deltaF, then the new edge would 2216 * create a cycle. Otherwise, we may assume that 2217 * deltaF and deltaB are disjoint. 2218 */ 2219 g->g_gen++; 2220 if (g->g_gen == 0) { 2221 /* 2222 * Generation wrap. 2223 */ 2224 for (vi = 0; vi < g->g_size; vi++) { 2225 g->g_vertices[vi]->v_gen = 0; 2226 } 2227 g->g_gen++; 2228 } 2229 nF = graph_delta_forward(g, x, y, &deltaF); 2230 if (nF < 0) { 2231 #ifdef LOCKF_DEBUG 2232 if (lockf_debug & 8) { 2233 struct owner_vertex_list path; 2234 printf("deadlock: "); 2235 TAILQ_INIT(&path); 2236 graph_reaches(y, x, &path); 2237 graph_print_vertices(&path); 2238 } 2239 #endif 2240 return (EDEADLK); 2241 } 2242 2243 #ifdef LOCKF_DEBUG 2244 if (lockf_debug & 8) { 2245 printf("re-ordering graph vertices\n"); 2246 printf("deltaF = "); 2247 graph_print_vertices(&deltaF); 2248 } 2249 #endif 2250 2251 nB = graph_delta_backward(g, x, y, &deltaB); 2252 2253 #ifdef LOCKF_DEBUG 2254 if (lockf_debug & 8) { 2255 printf("deltaB = "); 2256 graph_print_vertices(&deltaB); 2257 } 2258 #endif 2259 2260 /* 2261 * We first build a set of vertex indices (vertex 2262 * order values) that we may use, then we re-assign 2263 * orders first to those vertices in deltaB, then to 2264 * deltaF. Note that the contents of deltaF and deltaB 2265 * may be partially disordered - we perform an 2266 * insertion sort while building our index set. 2267 */ 2268 indices = g->g_indexbuf; 2269 n = graph_add_indices(indices, 0, &deltaF); 2270 graph_add_indices(indices, n, &deltaB); 2271 2272 /* 2273 * We must also be sure to maintain the relative 2274 * ordering of deltaF and deltaB when re-assigning 2275 * vertices. We do this by iteratively removing the 2276 * lowest ordered element from the set and assigning 2277 * it the next value from our new ordering. 2278 */ 2279 i = graph_assign_indices(g, indices, 0, &deltaB); 2280 graph_assign_indices(g, indices, i, &deltaF); 2281 2282 #ifdef LOCKF_DEBUG 2283 if (lockf_debug & 8) { 2284 struct owner_vertex_list set; 2285 TAILQ_INIT(&set); 2286 for (i = 0; i < nB + nF; i++) 2287 TAILQ_INSERT_TAIL(&set, 2288 g->g_vertices[indices[i]], v_link); 2289 printf("new ordering = "); 2290 graph_print_vertices(&set); 2291 } 2292 #endif 2293 } 2294 2295 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2296 2297 #ifdef LOCKF_DEBUG 2298 if (lockf_debug & 8) { 2299 graph_check(g, TRUE); 2300 } 2301 #endif 2302 2303 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2304 2305 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2306 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2307 e->e_refs = 1; 2308 e->e_from = x; 2309 e->e_to = y; 2310 2311 return (0); 2312 } 2313 2314 /* 2315 * Remove an edge x->y from the graph. 2316 */ 2317 static void 2318 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2319 struct owner_vertex *y) 2320 { 2321 struct owner_edge *e; 2322 2323 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2324 2325 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2326 if (e->e_to == y) 2327 break; 2328 } 2329 KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2330 2331 e->e_refs--; 2332 if (e->e_refs == 0) { 2333 #ifdef LOCKF_DEBUG 2334 if (lockf_debug & 8) { 2335 printf("removing edge %d:", x->v_order); 2336 lf_print_owner(x->v_owner); 2337 printf(" -> %d:", y->v_order); 2338 lf_print_owner(y->v_owner); 2339 printf("\n"); 2340 } 2341 #endif 2342 LIST_REMOVE(e, e_outlink); 2343 LIST_REMOVE(e, e_inlink); 2344 free(e, M_LOCKF); 2345 } 2346 } 2347 2348 /* 2349 * Allocate a vertex from the free list. Return ENOMEM if there are 2350 * none. 2351 */ 2352 static struct owner_vertex * 2353 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2354 { 2355 struct owner_vertex *v; 2356 2357 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2358 2359 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2360 if (g->g_size == g->g_space) { 2361 g->g_vertices = realloc(g->g_vertices, 2362 2 * g->g_space * sizeof(struct owner_vertex *), 2363 M_LOCKF, M_WAITOK); 2364 free(g->g_indexbuf, M_LOCKF); 2365 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2366 M_LOCKF, M_WAITOK); 2367 g->g_space = 2 * g->g_space; 2368 } 2369 v->v_order = g->g_size; 2370 v->v_gen = g->g_gen; 2371 g->g_vertices[g->g_size] = v; 2372 g->g_size++; 2373 2374 LIST_INIT(&v->v_outedges); 2375 LIST_INIT(&v->v_inedges); 2376 v->v_owner = lo; 2377 2378 return (v); 2379 } 2380 2381 static void 2382 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2383 { 2384 struct owner_vertex *w; 2385 int i; 2386 2387 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2388 2389 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2390 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2391 2392 /* 2393 * Remove from the graph's array and close up the gap, 2394 * renumbering the other vertices. 2395 */ 2396 for (i = v->v_order + 1; i < g->g_size; i++) { 2397 w = g->g_vertices[i]; 2398 w->v_order--; 2399 g->g_vertices[i - 1] = w; 2400 } 2401 g->g_size--; 2402 2403 free(v, M_LOCKF); 2404 } 2405 2406 static struct owner_graph * 2407 graph_init(struct owner_graph *g) 2408 { 2409 2410 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2411 M_LOCKF, M_WAITOK); 2412 g->g_size = 0; 2413 g->g_space = 10; 2414 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2415 g->g_gen = 0; 2416 2417 return (g); 2418 } 2419 2420 #ifdef LOCKF_DEBUG 2421 /* 2422 * Print description of a lock owner 2423 */ 2424 static void 2425 lf_print_owner(struct lock_owner *lo) 2426 { 2427 2428 if (lo->lo_flags & F_REMOTE) { 2429 printf("remote pid %d, system %d", 2430 lo->lo_pid, lo->lo_sysid); 2431 } else if (lo->lo_flags & F_FLOCK) { 2432 printf("file %p", lo->lo_id); 2433 } else { 2434 printf("local pid %d", lo->lo_pid); 2435 } 2436 } 2437 2438 /* 2439 * Print out a lock. 2440 */ 2441 static void 2442 lf_print(char *tag, struct lockf_entry *lock) 2443 { 2444 2445 printf("%s: lock %p for ", tag, (void *)lock); 2446 lf_print_owner(lock->lf_owner); 2447 if (lock->lf_inode != (struct inode *)0) 2448 printf(" in ino %ju on dev <%s>,", 2449 (uintmax_t)lock->lf_inode->i_number, 2450 devtoname(lock->lf_inode->i_dev)); 2451 printf(" %s, start %jd, end ", 2452 lock->lf_type == F_RDLCK ? "shared" : 2453 lock->lf_type == F_WRLCK ? "exclusive" : 2454 lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2455 (intmax_t)lock->lf_start); 2456 if (lock->lf_end == OFF_MAX) 2457 printf("EOF"); 2458 else 2459 printf("%jd", (intmax_t)lock->lf_end); 2460 if (!LIST_EMPTY(&lock->lf_outedges)) 2461 printf(" block %p\n", 2462 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 2463 else 2464 printf("\n"); 2465 } 2466 2467 static void 2468 lf_printlist(char *tag, struct lockf_entry *lock) 2469 { 2470 struct lockf_entry *lf, *blk; 2471 struct lockf_edge *e; 2472 2473 if (lock->lf_inode == (struct inode *)0) 2474 return; 2475 2476 printf("%s: Lock list for ino %ju on dev <%s>:\n", 2477 tag, (uintmax_t)lock->lf_inode->i_number, 2478 devtoname(lock->lf_inode->i_dev)); 2479 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 2480 printf("\tlock %p for ",(void *)lf); 2481 lf_print_owner(lock->lf_owner); 2482 printf(", %s, start %jd, end %jd", 2483 lf->lf_type == F_RDLCK ? "shared" : 2484 lf->lf_type == F_WRLCK ? "exclusive" : 2485 lf->lf_type == F_UNLCK ? "unlock" : 2486 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2487 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2488 blk = e->le_to; 2489 printf("\n\t\tlock request %p for ", (void *)blk); 2490 lf_print_owner(blk->lf_owner); 2491 printf(", %s, start %jd, end %jd", 2492 blk->lf_type == F_RDLCK ? "shared" : 2493 blk->lf_type == F_WRLCK ? "exclusive" : 2494 blk->lf_type == F_UNLCK ? "unlock" : 2495 "unknown", (intmax_t)blk->lf_start, 2496 (intmax_t)blk->lf_end); 2497 if (!LIST_EMPTY(&blk->lf_inedges)) 2498 panic("lf_printlist: bad list"); 2499 } 2500 printf("\n"); 2501 } 2502 } 2503 #endif /* LOCKF_DEBUG */ 2504